I tried to implement a Residual connection neural network and recreate Lenet-5, and can't set up architecture Here is Residual connection block
class ResidualBlock(torch.nn.Module): 
    def __init__(  
        self,
        input_c,  # Число каналов на входе
        output_c,  # Число каналов на выходе
        kernel_size,  # Размер ядра
        activation=torch.nn.ReLU,  # Класс функции активации
    ):
        super().__init__()
        # Функция активации для нелинейности
        self.activation = activation()
        # Паддинг подбираем такой, чтобы размеры h и w изображения не менялись
        #  (считаем что размер ядра всегда нечётный)
        padding_size = (kernel_size - 1) // 2
        # Операция свёртки
        self.conv = torch.nn.Conv2d(
            in_channels=input_c,
            out_channels=output_c,
            kernel_size=kernel_size,
            padding=padding_size,
            padding_mode="zeros",
        )
        # Если число каналов у входа и выхода различаются, то будем делать свёртку с
        #  ядром размера 1
        if input_c != output_c:
            self.correct_channels = torch.nn.Conv2d(
                in_channels=input_c,
                out_channels=output_c,
                kernel_size=1,
            )
        else:
            self.correct_channels = torch.nn.Identity()
Here's architecture
class MyModel(torch.nn.Module):
    def __init__(self):
      super().__init__()
      
      self.conv1 = ResidualBlock(input_c=1, output_c=6, kernel_size=5,)
      self.maxpool1 = torch.nn.MaxPool2d(kernel_size=2)
      self.conv2 = ResidualBlock(input_c=6, output_c=16, kernel_size=5)
      self.maxpool2 = torch.nn.MaxPool2d(kernel_size=2)
      self.conv3 = ResidualBlock(input_c=16, output_c=120, kernel_size=5)
      self.lin1 = torch.nn.Linear(in_features=120, out_features=84)
      self.relu1 = torch.nn.ReLU()
      self.lin2 = torch.nn.Linear(in_features=84, out_features=10)
      self.softmax = torch.nn.Softmax()
    def forward(self, x):
      x = self.activation(self.conv(x) + self.correct_channels(x))
      x = self.maxpool1(x)
      x = self.activation(self.conv(x) + self.correct_channels(x))
      x = self.maxpool2(x)
      x = self.activation(self.conv(x) + self.correct_channels(x))
      x = self.lin1(x)
      x = self.relu1(x)
      x = self.lin2(x)
      x = self.softmax(x)
      return x
model = MyModel()
I tried to change the sizes of the input and output channels, but nothing worked
 
                        
I've made some modifications to your code:
and
Move the
self.activation(self.conv(x) + self.correct_channels(x))line to theResidualBlockclass for clarity.Add
x = x.view(x.shape[0], -1)to flatten the extracted convolutional features.The size of
self.lin1depends on the size of your input tensor. In the provided code, I assumed the input size to be[8, 1, 256, 256]. Since you usetorch.nn.MaxPool2dtwice in your model, the tensor size would be[8, 120, 64, 64]after passing throughself.conv3. Consequently, the tensor size would change to[8, 491520](120 * 64 * 64 = 491520) after passsing throughx.view(x.shape[0], -1), so it requires the adjustment inself.lin1totorch.nn.Linear(in_features=120*64*64, out_features=84). Similarly, if your input tensor shape differs, such as[8, 1, 1000, 1000], you should setself.lin1astorch.nn.Linear(in_features=120*250*250, out_features=84).For more information about LeNet-5, you can refer this repository: https://github.com/ChawDoe/LeNet5-MNIST-PyTorch/blob/master/model.py