I'm new to pyTorch. I want to use the autoencoder concept to get an unsupervised classification. It seems like you should be able to use the minimum dimension from the autoencoder as input to a softmax fx. I used a "dummy" copy of the data to implement the loss fx. However, I can't get the loss to change and I don't understand why.
x = pd.read_csv(<file.csv>)
xray=x.to_numpy()
tx = torch.from_numpy(xray)
txr = tx.float()
txr.requires_grad=True
dmmy=torch.nn.Linear(22,6)(txr)
class AE(nn.Module):
`def __init__(self):`
`torch.autograd
self.L1=nn.Linear(in_features=22, out_features=20)
self.E1=nn.ELU(alpha=9)
...
...
self.L7=nn.Linear(in_features=8, out_features=6)
self.E7=nn.ELU(alpha=9)`
`def forward(self, x: torch.Tensor, retain_graph=True):`
`torch.autograd
L1 = self.L1(x)
E1 = self.E1(L1)
...
...
L7 = self.L7(E6)
E7 = self.E7(L7)
return E7`
Lossfx = torch.nn.MSELoss()
opt=torch.optim.SGD(params=m.parameters(), lr=0.01)
epochs=2
for epoch in range(epochs):
`m.train()
pred=m(txr)
loss=Lossfx(dmmy,pred)
loss.backward(retain_graph=True)
print(f'loss: {loss}')
print(f'pred: {pred[0]}')
opt.zero_grad()
opt.step()`
print('Done')
**## Softmax after training/testing e.g. ypred=nn.Softmax(dim=1)(pred)**
## Result
Loss: 38.851
pred[0]: [ 4.6630, -8.9925, -8.9977, 6.2631, 2.2962, 2.2374]
dmmy[0]: [-0.1594, -0.2595, -0.2731, 0.2965, -0.0969, 0.5345]
Loss: 38.851
pred[0]: same
dmmy[0]: same