class Generator(nn.Module): def init(self): # defines an init function which is used to initialize all the required variables super(Generator, self).init()

    self.label_emb = nn.Embedding(opt.n_classes, opt.n_classes)

    def block(in_feat, out_feat, normalize=True):
        layers = [nn.Linear(in_feat, out_feat)]
        if normalize:
            layers.append(nn.BatchNorm1d(out_feat, 0.8))
        layers.append(nn.LeakyReLU(0.2, inplace=True))
        return layers

    self.model = nn.Sequential(
        *block(opt.latent_dim + opt.n_classes, 128, normalize=False),
        *block(128, 256),
        *block(256, 512),
        *block(512, 1024),
        nn.Linear(1024, int(np.prod(img_shape))),
        nn.Tanh()
    )

def forward(self, noise, labels):
    # Concatenate label embedding and image to produce input
    gen_input = torch.cat((self.label_emb(labels), noise), -1)
    img = self.model(gen_input)
    img = img.view(img.size(0), *img_shape)
    return img
1

There are 1 answers

0
Sascha Kirch On

Here are the links to the documentation of BatchNorm1d and LeakyReLU

nn.BatchNorm1d(out_feat, 0.8): The 0.8 represents the epsilon term in the batchnorm calculation. It is used for numerical stability during training and to prevent division by 0

enter image description here

nn.LeakyReLU(0.2, inplace=True): The 0.2 is often refered to as alpha and is the inclination of the LeakyReLU function for x<0.

enter image description here

If you look closly, the slope for x<0 is non-negative:

enter image description here