changing tensorflow CNN model to Pytorch Lightning Question

45 views Asked by At

I have made this CNN model and I want to change into PyTorch Lightning code. But have a hard time. Because I am not sure how to code the forward part...


needed_shapes = []
def expansion_block(x,t,filters):
    total_filters = t*filters
    # print("input_expansion_black",x.shape)
    # needed_shapes.append(x.shape)
    x = layers.Conv2D(filters = total_filters, kernel_size = (1,1),padding = 'same', use_bias = False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU(6)(x)
    return x
def depthwise_block(x,stride):
    x = layers.DepthwiseConv2D(5,strides=(stride,stride),padding ='same', use_bias = False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU(6)(x)
    return x

def projection_block(x, out_channels):
    x = layers.Conv2D(filters = out_channels,kernel_size = (1,1),padding = 'same', use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    # needed_shapes.append(x.shape)
    return x

def Bottleneck(x,t,filters,out_channels,stride):
    y = expansion_block(x,t,filters)
    y = depthwise_block(y,stride)
    y = projection_block(y,out_channels)
    if y.shape[-1]==x.shape[-1]:
        y = layers.add([x,y])
    return y

class ChannelSum(keras.layers.Layer):
    def __init__(self, sum_dim=2):
        super(ChannelSum, self).__init__()
        self.sum_dim = sum_dim
    def call(self, x):
        return tf.reduce_sum(tf.reshape(x, (-1, x.shape[1], x.shape[2], x.shape[3]//self.sum_dim, self.sum_dim)), -1)


#%%[11]:


inputs = keras.Input(shape = input_image_shape, name='bottleneck')
concat_list = []
for i in range(4): # defined_out_channels = 8
    globals()['a{}'.format(i)] = layers.Conv2D(defined_out_channels,kernel_size=3,strides=(2,2),padding = 'same', use_bias=False)(inputs)
    globals()['a{}'.format(i)]= layers.BatchNormalization()(globals()['a{}'.format(i)])
    globals()['a{}'.format(i)] = layers.ReLU(6)(globals()['a{}'.format(i)])
    #1
    globals()['a{}'.format(i)] = Bottleneck(x = globals()['a{}'.format(i)], t = 9, filters = globals()['a{}'.format(i)].shape[-1], out_channels = defined_out_channels, stride = 1)
    #2
    globals()['a{}'.format(i)] = Bottleneck(x = globals()['a{}'.format(i)], t = 9, filters = inputs.shape[-1], out_channels = defined_out_channels, stride = 1)
    #3
    globals()['a{}'.format(i)] = Bottleneck(x = globals()['a{}'.format(i)], t = 9, filters = inputs.shape[-1], out_channels = defined_out_channels, stride = 1)
    globals()['a{}'.format(i)] = activations.relu(globals()['a{}'.format(i)])

    
    globals()['a{}'.format(i)] = layers.AveragePooling2D(2,2)(globals()['a{}'.format(i)])
    for j in range(2):
        #5
        globals()['b{}'.format(i,j)] = Bottleneck(x = globals()['a{}'.format(i)], t = 9, filters = globals()['a{}'.format(i)].shape[-1],out_channels = defined_out_channels, stride = 1)
        #1
        globals()['b{}'.format(i,j)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 16, stride = 1)
        #2
        globals()['b{}'.format(i,j)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 16, stride = 1)
        #3
        globals()['b{}'.format(i,j)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 16, stride = 1)
        globals()['b{}'.format(i,j)] = layers.AveragePooling2D(2,2)(globals()['b{}'.format(i,j)])
        #3
        globals()['b{}'.format(i,j)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 62, stride = 1)
        globals()['b{}'.format(i,j)] = activations.relu(globals()['b{}'.format(i,j)])
        globals()['b{}'.format(i,j)] = layers.AveragePooling2D(2,2)(globals()['b{}'.format(i,j)])

        
        for k in range(1):
            #6
            globals()['c{}'.format(i,j,k)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 248, stride = 1)
            # globals()['c{}'.format(i,j,k)] = Bottleneck(x = globals()['b{}'.format(i,j)], t = 9, filters = globals()['b{}'.format(i,j)].shape[-1],out_channels = 32, stride = 1)
            globals()['c{}'.format(i,j,k)] = activations.relu(globals()['c{}'.format(i,j,k)])
            globals()['c{}'.format(i,j,k)] = layers.AveragePooling2D(2,2)(globals()['c{}'.format(i,j,k)])
            concat_list.append(globals()['c{}'.format(i,j,k)])
                                                                      
        
z = layers.Concatenate(axis = -1)(concat_list)
# z = Add()(concat_list)

z = layers.AveragePooling2D(2,2)(z)
flatten = layers.Flatten()(z)
softmax_result = layers.Dense(num_classes, activation = 'softmax')(flatten)

this is the code that I was working on but started to think it might not do what I have intended

class Bottleneck(nn.Module):
    def __init__(self, in_channels, out_channels, t, stride):
        super(Bottleneck, self).__init__()
        #nn.Conv2d(in_channels, out_channels(t*in_channels, kernels_size, bias))
        self.expansion = nn.Conv2d(in_channels, t * in_channels, kernel_size=1, bias=False)
        self.expansion_bn = nn.BatchNorm2d(t * in_channels)
        self.expansion_relu = nn.ReLU6()
        
        self.depthwise = nn.Conv2d(t * in_channels, t * in_channels, kernel_size=5, stride=stride, padding=2, groups=t * in_channels, bias=False)
        self.depthwise_bn = nn.BatchNorm2d(t * in_channels)
        self.depthwise_relu = nn.ReLU6()
        
        self.projection = nn.Conv2d(t * in_channels, out_channels, kernel_size=1, bias=False)
        self.projection_bn = nn.BatchNorm2d(out_channels)
        
    def forward(self, x):
        y = self.expansion(x)
        y = self.expansion_bn(y)
        y = self.expansion_relu(y)
        
        y = self.depthwise(y)
        y = self.depthwise_bn(y)    
        y = self.depthwise_relu(y)
        
        y = self.projection(y)  
        y = self.projection_bn(y)
        
        if y.shape[-1] == x.shape[-1]:
            out += x
        return out
class MyModel(pl.LightningModule):
    def __init__(self,defined_out_channels=8, num_classes=1000, t_value=9):
        super(MyModel, self).__init__()
        self.a = nn.ModuleList()
        
        self.defined_out_channels = defined_out_channels
        self.num_classes = num_classes
        self.t_value = t_value
        # bottleneck -> int_channel, out_channel, t , stride
        for i in range(4):
            a_=nn.Sequential(
                nn.Conv2d(in_channels=3, out_channels=defined_out_channels, kernel_size=3, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(defined_out_channels),
                nn.ReLU6(),
            )
            # 앞에 convolution을 한 다음에 뒤에 bottleneck을 적용해야함.
            a_.add_module('bottleneck1', Bottleneck(in_channels=defined_out_channels, out_channels=defined_out_channels, t=t_value, stride=1))
            a_.add_module('bottleneck2', Bottleneck(in_channels=defined_out_channels, out_channels=defined_out_channels, t=t_value, stride=1))
            a_.add_module('bottleneck3', Bottleneck(in_channels=defined_out_channels, out_channels=defined_out_channels, t=t_value, stride=1))
            a_.add_module('relu', nn.ReLU6())
            a_.add_module('avg_pool', nn.AvgPool2d(kernel_size=2, stride=2))
            
            # a is module list so a_ will be appended like a list in python.
            self.a.append(a_)
            
            for j in range(2):
                b_ = nn.Sequential(
                    b_.add_module('b_bottleneck1', Bottleneck(in_channel=defined_out_channels, out_channels=16, t=t_value, stride=1)),
                    b_.add_module('b_bottleneck2', Bottleneck(in_channel=16, out_channels=16, t=t_value, stride=1)),
                    b_.add_module('b_bottleneck3', Bottleneck(in_channel=16, out_channels=16, t=t_value, stride=1)),
                    b_.add_module('b_bottleneck4', Bottleneck(in_channel=16, out_channels=16, t=t_value, stride=1)),
                    b_.add_module('b_avg_pool', nn.AvgPool2d(kernel_size=2, stride=2)),
                    b_.add_module('b_bottleneck5', Bottleneck(in_channel=16, out_channels=62, t=t_value, stride=1)),
                    b_.add_module('relu6', nn.ReLU6())
                    
                    
                )

just in case I'll attach how the CNN model looks like

enter image description here

0

There are 0 answers