Thursday 31 January 2019

LSTM autoencoder always returns the average of the input sequence

I'm trying to build a very simple LSTM autoencoder with PyTorch. I always train it with the same data:

x = torch.Tensor([[0.0], [0.1], [0.2], [0.3], [0.4]])

I have built my model following this link:

inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(latent_dim)(inputs)

decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True)(decoded)

sequence_autoencoder = Model(inputs, decoded)
encoder = Model(inputs, encoded)

My code is running with no errors but y_pred converge to:

tensor([[[0.2]],
        [[0.2]],
        [[0.2]],
        [[0.2]],
        [[0.2]]], grad_fn=<StackBackward>)

Here is my code:

import torch
import torch.nn as nn
import torch.optim as optim


class LSTM(nn.Module):

    def __init__(self, input_dim, latent_dim, batch_size, num_layers):
        super(LSTM, self).__init__()
        self.input_dim = input_dim
        self.latent_dim = latent_dim
        self.batch_size = batch_size
        self.num_layers = num_layers

        self.encoder = nn.LSTM(self.input_dim, self.latent_dim, self.num_layers)

        self.decoder = nn.LSTM(self.latent_dim, self.input_dim, self.num_layers)

    def init_hidden_encoder(self):
        return (torch.zeros(self.num_layers, self.batch_size, self.latent_dim),
                torch.zeros(self.num_layers, self.batch_size, self.latent_dim))

    def init_hidden_decoder(self):
        return (torch.zeros(self.num_layers, self.batch_size, self.input_dim),
                torch.zeros(self.num_layers, self.batch_size, self.input_dim))

    def forward(self, input):
        # Reset hidden layer
        self.hidden_encoder = self.init_hidden_encoder()
        self.hidden_decoder = self.init_hidden_decoder()

        # Reshape input
        input = input.view(len(input), self.batch_size, -1)

        # Encode
        encoded, self.hidden = self.encoder(input, self.hidden_encoder)
        encoded = encoded[-1].repeat(5, 1, 1)

        # Decode
        y, self.hidden = self.decoder(encoded, self.hidden_decoder)
        return y


model = LSTM(input_dim=1, latent_dim=20, batch_size=1, num_layers=1)
loss_function = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

x = torch.Tensor([[0.0], [0.1], [0.2], [0.3], [0.4]])

while True:
    y_pred = model(x)
    optimizer.zero_grad()
    loss = loss_function(y_pred, x)
    loss.backward()
    optimizer.step()
    print(y_pred)



from LSTM autoencoder always returns the average of the input sequence

No comments:

Post a Comment