Is this the process of meta-learning MAML for inner loop and outer loop training?

23 views Asked by At

Below is my code def mamltrain(self, train_support_set, train_query_set): # outer loop optimizer out_optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.meta_lr, weight_decay=5e-3) support_set = torch.LongTensor(train_support_set) query_set = torch.LongTensor(train_query_set) self.model.load_state_dict(self.keep_weight) ####outer loop for idx in range(20):

        # print(self.similar_group)
        for user in tqdm(DataLoader(user_set, batch_size=20, shuffle=True)):
            losses = []
            for u in user:
                u = u.item()
                edges = dataprocess.graphSageMAML(G, [u])
                model_copy = self.inner(user=u, support_set=support_set,edges=edges)
                users = []
                items = []
                rating = []
                for u, i, r in query_set[query_set[:, 0] == u]:
                    u = u.item()
                    i = i.item()
                    r = r.item()
                    users.append(u)
                    items.append(i)
                    rating.append(r)
                users = torch.LongTensor(users)
                items = torch.LongTensor(items)
                rating = torch.Tensor(rating)
                logits = model_copy(users, items, edges,isTrain =True)
                del model_copy
                loss = self.criterion(logits, rating)
                losses.append(loss)
            losses = torch.stack(losses).mean(0)  # 
            out_optimizer.zero_grad()
            losses.backward()
            out_optimizer.step()
            self.store_parameters()

    return

def inner(self, user, support_set,edges):
    self.model.load_state_dict(self.keep_weight)
    model_copy = deepcopy(self.model)
    # Freeze specific parameters
    for name, param in model_copy.named_parameters():
        if name in ['denseLayer1.0.weight', 'denseLayer1.0.bias', 'denseLayer2.0.weight',
                                            'denseLayer2.0.bias', 'denseLayer3.0.weight',
                                            'denseLayer3.0.bias']:  # Specify the parameter name to be trained
            param.requires_grad = True
        else:
            param.requires_grad = False

    inner_optimizer = torch.optim.Adam(model_copy.parameters(), lr=self.inner_lr, weight_decay=5e-3)

    y_preds = []
    users = []
    items = []
    rating = []
    for u, i, r in support_set[support_set[:, 0] == user]:
        u = u.item()
        i = i.item()
        r = r.item()
        # r = r.float()
        users.append(u)
        items.append(i)
        rating.append(r)
    users = torch.LongTensor(users)
    items = torch.LongTensor(items)
    rating = torch.Tensor(rating)
    for idx in range(3):  #
        # print("users",users)
        # print("items",items)
        logits = model_copy(users, items, edges,isTrain =True)
        # print("logits",logits)
        # exit(0)
        loss = self.criterion(logits, rating)
        # print("loss",loss)
        # exit(0)
        inner_optimizer.zero_grad()
        loss.backward()
        inner_optimizer.step()
    return model_copy

I want to know if this code operation is correct?

0

There are 0 answers