GAN, generate regression output by the real image, not from the random noise

165 views Asked by At

Is this concept possible to be implemented with the GAN algorithm?

I want the GAN to generate a regression-output(G-Value) of the shape(4,) by the real-image, not from the random noise, and discriminate G-Value with real regression-value(R-Value) of the same shape(4, ). R-Value is of the "y-train" dataset.

It means that if an image has a pattern like circular, it generally has the 4 features of position x, y, z, and alpha. I call it Real-Value(R-Value) and I want the GAN to generate fake value (G-Value) fooling the discriminator.

I have tried to implement it as below.

class UTModel:
    def __init__(self):
        optimizer__ = Adam(2e-4)

        self.__dropout = .3

        self.optimizerGenerator = Adam(1e-4)
        self.optimizerDiscriminator = Adam(1e-4)

        self.generator, self.discriminator = self.build()

    def build(self):
        # build the generator
        g = Sequential()
        g.add(Conv2D(512, kernel_size=3, strides=2, input_shape=(128, 128, 1), padding='same'))
        g.add(BatchNormalization(momentum=0.8))
        g.add(LeakyReLU(alpha=0.2))
        g.add(Dropout(self.__dropout))
        g.add(Conv2D(256, kernel_size=3, strides=2, padding='same'))
        g.add(BatchNormalization(momentum=0.8))
        g.add(LeakyReLU(alpha=0.2))
        g.add(Dropout(self.__dropout))
        g.add(Conv2D(128, kernel_size=3, strides=2, padding='same'))
        g.add(BatchNormalization(momentum=0.8))
        g.add(LeakyReLU(alpha=0.2))
        g.add(Dropout(self.__dropout))
        g.add(Conv2D(64, kernel_size=3, strides=1, padding='same'))
        g.add(BatchNormalization(momentum=0.8))
        g.add(LeakyReLU(alpha=0.2))
        g.add(Dropout(self.__dropout))
        g.add(Flatten())
        g.add(Dense(4, activation='linear'))

        # build the discriminator
        d = Sequential()
        d.add(Dense(128, input_shape=(4,)))
        d.add(LeakyReLU(alpha=0.2))
        d.add(Dropout(self.__dropout))
        d.add(Dense(64))
        d.add(LeakyReLU(alpha=0.2))
        d.add(Dropout(self.__dropout))
        d.add(Dense(64))
        d.add(LeakyReLU(alpha=0.2))
        d.add(Dropout(self.__dropout))
        d.add(Dense(32))
        d.add(LeakyReLU(alpha=0.2))
        d.add(Dropout(self.__dropout))
        d.add(Dense(1, activation='sigmoid'))

        return g, d

    def computeLosses(self, rValid, fValid):
        bce = BinaryCrossentropy(from_logits=True)

        # Discriminator loss
        rLoss = bce(tf.ones_like(rValid), rValid)
        fLoss = bce(tf.zeros_like(fValid), fValid)
        dLoss = rLoss + fLoss

        # Generator loss
        gLoss = bce(tf.zeros_like(fValid), fValid)

        return dLoss, gLoss

    def train(self, images, rValues):
        with tf.GradientTape() as gTape, tf.GradientTape() as dTape:
            gValues = self.generator(images, training=True)

            rValid = self.discriminator(rValues, training=True)
            fValid = self.discriminator(gValues, training=True)

            dLoss, gLoss = self.computeLosses(rValid, fValid)

        dGradients = dTape.gradient(dLoss, self.discriminator.trainable_variables)
        gGradients = gTape.gradient(gLoss, self.generator.trainable_variables)

        self.optimizerDiscriminator.apply_gradients(zip(dGradients, self.discriminator.trainable_variables))
        self.optimizerGenerator.apply_gradients(zip(gGradients, self.generator.trainable_variables))

        print (dLoss, gLoss)


class UTTrainer:
    def __init__(self):
        self.env = 3DPatterns()
        self.model = UTModel()

    def start(self):
        if not self.env.available:
            return

        batch = 32

        for epoch in range(1):
            # set new episod
            while self.env.setEpisod():
                for i in range(0, self.env.episodelen, batch):
                    self.model.train(self.env.episode[i:i+batch], self.env.y[i:i+batch])

But the G-Values have not generated as valid values. It converges the 1 or -1 always. The proper value should be like [-0.192798, 0.212887, -0.034519, -0.015000]. Please help me to find the right way.

Thank you.

0

There are 0 answers