I am new to python and DL. Please help me to correct the error. This class was originly created with mnist dataset (28 x 28) I tried to adapt it to my work and the image that I am using are (224 x 224). I changed the input image shape but still have the incompatible shape image and the model still use the old shapes of mnist. Knowng that the that I am using: X_train=(676, 224, 224)/y_train(676,)/X_test(170, 224, 224)/y_test(170,)
The code :
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 224
self.img_cols = 224
self.channels = 1
self.num_classes = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 72
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 10, 10
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=128, sample_interval=50)
the error :
Model: "sequential_23"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_47 (Dense) (None, 6272) 457856
_________________________________________________________________
reshape_11 (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
batch_normalization_87 (Batc (None, 7, 7, 128) 512
_________________________________________________________________
up_sampling2d_40 (UpSampling (None, 14, 14, 128) 0
_________________________________________________________________
conv2d_99 (Conv2D) (None, 14, 14, 128) 147584
_________________________________________________________________
activation_42 (Activation) (None, 14, 14, 128) 0
_________________________________________________________________
batch_normalization_88 (Batc (None, 14, 14, 128) 512
_________________________________________________________________
up_sampling2d_41 (UpSampling (None, 28, 28, 128) 0
_________________________________________________________________
conv2d_100 (Conv2D) (None, 28, 28, 64) 73792
_________________________________________________________________
activation_43 (Activation) (None, 28, 28, 64) 0
_________________________________________________________________
batch_normalization_89 (Batc (None, 28, 28, 64) 256
_________________________________________________________________
conv2d_101 (Conv2D) (None, 28, 28, 1) 577
_________________________________________________________________
activation_44 (Activation) (None, 28, 28, 1) 0
=================================================================
Total params: 681,089
Trainable params: 680,449
Non-trainable params: 640
_________________________________________________________________
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("input_22:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("conv2d_95_input:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-45-60a1c6b0bc8b> in <module>()
225
226 if __name__ == '__main__':
--> 227 infogan = INFOGAN()
228 infogan.train(epochs=50000, batch_size=128, sample_interval=50)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
214 ' incompatible with the layer: expected axis ' + str(axis) +
215 ' of input shape to have value ' + str(value) +
--> 216 ' but received input with shape ' + str(shape))
217 # Check shape.
218 if spec.shape is not None:
ValueError: Input 0 of layer dense_44 is incompatible with the layer: expected axis -1 of input shape to have value 115200 but received input with shape [None, 2048]
You forgot to change the architecture of the generator. The generator's output shape and the discriminator's input shape have to match. That's what causing the error.
To fix it, you need to fix the architecture. The generator produces images in shape (28, 28, 1), but you want (224, 224, 1). The shape the architecture produces is the result of the architecture itself and its parameters.
So I added two Upsampling layers and changed the size of the other layers to match the discriminator's output.
Also, I removed ZeroPadding2D layer from discriminator, since it made the shape odd (15, 15, ..), and therefore it was impossible to match the same size in the generator.
Here's the code:
And the summaries:
EDIT:
Because you decreased the number of classes from 10 to 3, therefore you have to change the latent_dim parameter to 65. Notice that the method
sample_generator_input
generates noise of size 62 and labels of size number of classes, which then concatenates (size becomes 62 + 3 = 65).The generator is defined to accept input_dim of
self.latent_dim
, it would be appropriate to calculate thelatent_dim
in the constructor based on the number of classes instead:self.latent_dim = 62 + self.num_classes
.Moreover, in method
sample_images
, there are hardcoded magical numbers. How can one know what it means? I mean this:r, c = 10, 10
. I assume that it means number of classes. Since you changed it from 10 to 3 in your example, I suggest you change the line to:Overall, the code is badly written and if you change a constant then it all breaks. Be careful when copying full pieces of code. Make sure you understand each and every part of it before copying.
Here's the full code: