Error while trying grad-cam on efficientnet-CBAM

76 views Asked by At
CAMdir = '/content/drive/MyDrive/pneu/'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
CAM_images = ImageFolder(CAMdir, transforms.Compose([transforms.ToTensor(),normalize]))
CAM_loader = torch.utils.data.DataLoader(
    CAM_images,  shuffle = False,
    num_workers=2, pin_memory=True)
for inputs, labels in CAM_loader:
    inputs = inputs.cuda()
    print(inputs.shape) # Move the input tensors to the GPU
    outputs = model(inputs)
input_tensor = list(enumerate(CAM_loader))

the outputs=model(inputs) is where the error is happening, the error is this.

torch.Size([1, 3, 512, 862])
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-22-1ad10e1d4501> in <cell line: 8>()
      9     inputs = inputs.cuda()
     10     print(inputs.shape) # Move the input tensors to the GPU
---> 11     outputs = model(inputs)
     12 input_tensor = list(enumerate(CAM_loader))
     13 

7 frames
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

<ipython-input-18-2d563f39692b> in forward(self, x)
    101         x = self.avg_pool(x)
    102         x = x.view(x.size(0), -1)  # flatten to (bs, 2048)
--> 103         x = self.clf(x)
    104 
    105         return x

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py in forward(self, input)
    215     def forward(self, input):
    216         for module in self:
--> 217             input = module(input)
    218         return input
    219 

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py in _call_impl(self, *args, **kwargs)
   1499                 or _global_backward_pre_hooks or _global_backward_hooks
   1500                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501             return forward_call(*args, **kwargs)
   1502         # Do not call functions when jit is used
   1503         full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.10/dist-packages/torch/nn/modules/batchnorm.py in forward(self, input)
    169         used for normalization (i.e. in eval mode when buffers are not None).
    170         """
--> 171         return F.batch_norm(
    172             input,
    173             # If buffers are not to be tracked, ensure that they won't be updated

/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
   2446         )
   2447     if training:
-> 2448         _verify_batch_size(input.size())
   2449 
   2450     return torch.batch_norm(

/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py in _verify_batch_size(size)
   2414         size_prods *= size[i + 2]
   2415     if size_prods == 1:
-> 2416         raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
   2417 
   2418 

I think the third and the last error line is the most important, and I put the target line as model.clf

       Sigmoid-288              [-1, 1, 5, 5]               0
SpatialAttentionModule-289              [-1, 1, 5, 5]               0
            CBAM-290           [-1, 1280, 5, 5]               0
AdaptiveAvgPool2d-291           [-1, 1280, 1, 1]               0
     BatchNorm1d-292                 [-1, 1280]           2,560
          Linear-293                    [-1, 3]           3,832

It looks like the output of BatchNorm1d-292 is the one causing the problem, but I tried changing the target_layer but the errors are all same.

0

There are 0 answers