data augmentation using pytorch : making additional data

39 views Asked by At

and I want to apply data augmentation to my dataset using pytorch and I am working with the faceLandmarks dataset in th official pytorch exemple and instead of making transformation on the fly at every epoch I want make additional data lets say the original one and the transformed one and use both of them for the training I saw an exemple suggesting creating different training loops But I got confused how to do it this is what I tried

  class FaceLandmarksDataset(Dataset):
     def __init__(self, csv_file, root_dir, transform=None):
    """
    Arguments:
        csv_file (string): Path to the csv file with annotations.
        root_dir (string): Directory with all the images.
        transform (callable, optional): Optional transform to be applied
            on a sample.
    """
    self.landmarks_frame = pd.read_csv(csv_file)
    self.root_dir = root_dir
    self.transform = transform

  def __len__(self):
    return len(self.landmarks_frame)

  def __getitem__(self, idx):
    if torch.is_tensor(idx):
        idx = idx.tolist()
        
    self.transform = transforms.Compose([Rescale,
            RandomCrop,
            ToTensor()])
    img_name = os.path.join(self.root_dir,
                            self.landmarks_frame.iloc[idx, 0])
    
    image = io.imread(img_name)
    landmarks = self.landmarks_frame.iloc[idx, 1:]
    landmarks = np.array([landmarks], dtype=float).reshape(-1, 2)
    
    # Assign a label indicating 'none' augmentation by default
    augmentation_label = 'none'
    
    # Apply transformation if it exists and is a list with augmentation labels
    if self.transform:
        if isinstance(self.transform, list):
            augmentation_label = self.transform[idx % len(self.transform)]
    
    sample = {'image': image, 'landmarks': landmarks, 'augmentation_label': augmentation_label}

    if self.transform and augmentation_label != 'none':
        sample = self.transform(sample)
    sample = {'image': image, 'landmarks': landmarks}

    return sample

  # dataloader for the dataset ---------------------------------
transformed_dataset =FaceLandmarksDataset(csv_file='/faces_dataset/faces/faces/face_landmarks.csv',root_dir='/faces_dataset/faces/faces/')
                                      
dataloader = DataLoader(transformed_dataset, batch_size=4,
                    shuffle=True, num_workers=0)

then I got confused how to proceed and how should I load the two datasets with the dataloaders. any suggestions ?

0

There are 0 answers