mini-batch gradient decent bad accuracy/loss

79 views Asked by At

I’m trying mini-batch gradient descent on the popular iris dataset, but somehow I don’t manage to get the accuracy of the model above 75-80%. Also, I’m not certain if I’m calculating the loss as well as the accuracy correctly. Any suggestions on how to improve my code or mistakes I’m doing are appreciated.

batch_size = 10
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True) 
test_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

Training loop:

n_iters = 1000
steps = n_iters/10
LOSS = []
for epochs in range(n_iters):  
    for i,(inputs, labels) in enumerate(train_loader):
        out = model(inputs)
        train_labels = transform_label(labels)
        l = loss(out, train_labels)
        l.backward()
        #update weights
        optim.step()
        optim.zero_grad()
    LOSS.append(l.item())
    if epochs%steps == 0:
        print(f"\n epoch: {int(epochs+steps)}/{n_iters}, loss: {sum(LOSS)/len(LOSS)}")
        #if i % 1 == 0:
            #print(f" steps: {i+1}, loss : {l.item()}")

claculate accuracy:

    def accuracy(model,test_loader):
        sum_acc= 0
        #map labels with 0,1,2
        def transform_label(label_data):
            data = []
            for i in label_data:
                if i == "Iris-setosa":
                    data.append(torch.tensor([0]))
                if i == "Iris-versicolor":
                    data.append(torch.tensor([1]))
                if i == "Iris-virginica":
                    data.append(torch.tensor([2]))
            return torch.stack(data)

        for i,(X_test, test_labels) in enumerate(test_loader):
            test_labels = transform_label(test_labels)
            x_label_pre = model(X_test)
            _, x_label_pre_hat = torch.max(x_label_pre, 1)
            idx = 0
            number_pred = 0
            while idx < len(X_test):
                if x_label_pre_hat[idx].item() == test_labels[idx].item():
                    number_correct += 1
                idx +=1
lr = 0.01
model = NeuralNetwork()
optim = torch.optim.Adam(model.parameters(), lr=lr)
#optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
loss = torch.nn.CrossEntropyLoss()
#loss = torch.nn.MSELoss()
#Weights are by default torch.32 not 64 --> error message 

class NeuralNetwork(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear_stack =  nn.Sequential(
         nn.Linear(4,128),
         nn.ReLU(),
         nn.Linear(128,64),
         nn.ReLU(),
         nn.Linear(64,3),
         )
    def forward(self, x):
        logits = self.linear_stack(x)
        return logits
0

There are 0 answers