Pytorch custom randomcrop for semantic segmentation

615 views Asked by At

I am trying to implement a custom dataset loader. Firstly I resize the images and labels with the same ratio between (0.98, 1,1) then I randomly crop both images and labels with same parameters so that I can feed them into NN. However, I am getting an error from PyTorch functional. Here is my code:

class RandomCrop(object):

    def __init__(self, size, padding=None, pad_if_needed=True, fill=0, padding_mode='constant'):
        
        self.size = size
        self.padding = padding
        self.pad_if_needed = pad_if_needed
        self.fill = fill
        self.padding_mode = padding_mode

    @staticmethod
    def get_params(img, output_size):
        
        w, h = img.size
        th, tw = output_size
        if w == tw and h == th:
            return 0, 0, h, w

        i = random.randint(0, h - th)
        j = random.randint(0, w - tw)
        return i, j, th, tw

    def __call__(self, data):
     
        img,mask = data["image"],data["mask"]
       

        # pad the width if needed
        if self.pad_if_needed and img.size[0] < self.size[1]:
            img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)
            mask = F.pad(mask, (self.size[1] - mask.size[0], 0), self.fill, self.padding_mode)
        # pad the height if needed
        if self.pad_if_needed and img.size[1] < self.size[0]:
            img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)
            mask = F.pad(mask, (0, self.size[0] - mask.size[1]), self.fill, self.padding_mode)
       
        i, j, h, w = self.get_params(img, self.size)
        crop_image = transforms.functional.crop(img, i, j, h, w)
        crop_mask = transforms.functional.crop(mask, i, j, h, w)

        return{"image": crop_image, "mask": crop_mask }

Here is the error:

AttributeError: 'Image' object has no attribute 'dim'
1

There are 1 answers

0
Atahan Özer On BEST ANSWER

Mistakenly I imported nn.functional.pad instead of the transforms.functional.pad. After changing it everything went smoothly