I'm trying to write an autoencoder which accepts images or latent vector and returns both reconstructed image and the latent space.
dim_code = 128
class Autoencoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
#Custom(lambda x: x.permute(0,3,1,2).float()),
nn.ConvTranspose2d(in_channels = 3, out_channels = 6, kernel_size=3, stride=1, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2), # 8192
nn.ConvTranspose2d(in_channels = 6, out_channels = 12, kernel_size=3, stride=1, padding = 1),
nn.ReLU(),
nn.MaxPool2d(2), # 4096
nn.Flatten(),
nn.Linear(3072, 512),
nn.ReLU(),
nn.Linear(512,dim_code)
)
self.decoder = nn.Sequential(
nn.ReLU(),
nn.Linear(dim_code, 512),
nn.ReLU(),
nn.Linear(512, 4096),
nn.ReLU(),
nn.Linear(4096, 64*64*3),
# Custom(lambda x: x.view(-1,64,64,3).float())
)
def forward(self, x, len_batch=1, latent=False):
if latent==False:
latent_code = self.encoder(x)
reconstruct = self.decoder(latent_code)
reconstruction = torch.reshape(reconstruct, (len_batch, 3, 64, 64))
else:
reconstruct = self.decoder(X)
reconstruction = torch.reshape(reconstruct, (len_batch, 3, 64, 64))
return reconstruction, latent_code
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
criterion = F.mse_loss
model = Autoencoder().to(device)
optimizer = torch.optim.Adam(model.parameters())
Then I tried to generate the latent vector and give it to the autoencoder to see the results:
z = np.random.randn(25, dim_code)
z_t = torch.tensor(z, dtype=float)
output = model(z_t.to(device), len_batch=25, latent=True)[0]
for i in range(5):
plt.subplot(2, 3, i+1)
plt.axis("off")
plt.imshow(output[i].permute(1,2,0).cpu().detach().numpy())
But unfortunately it raises an error: thats it
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat2 in method wrapper_CUDA_mm)
I tried to assign tensor to cpu or use numpy.array format, but it raises the same error.