Import Libraries
%matplotlib inline
import tensorflow as tf
from tensorflow import keras
import numpy as np
import plot_utils
import matplotlib.pyplot as plt
from tqdm import tqdm
print('Tensorflow version:', tf.__version__)
Task 3: Create Batches of Training Data
batch_size = 32
# This dataset fills a buffer with buffer_size elements,
#then randomly samples elements from this buffer, replacing the selected elements with new elements.
dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(1000)
#Combines consecutive elements of this dataset into batches.
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
#Creates a Dataset that prefetches elements from this dataset
print(dataset)
output:<PrefetchDataset shapes: (32, 32, 32, 3), types: tf.float64>
Task 4: Build the Generator Network for DCGAN
num_features = 100
generator = keras.models.Sequential([
keras.layers.Dense(256*4*4, input_shape=[num_features]),
keras.layers.Reshape([4,4,256]),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
keras.layers.BatchNormalization(),
keras.layers.Conv2DTranspose(3, (3,3), padding="same", activation="tanh"),
])
import numpy as np
import matplotlib.pyplot as plt
def show(images, n_cols=None):
n_cols = n_cols or len(images)
n_rows = (len(images) - 1) // n_cols + 1
if images.shape[-1] == 1:
images = np.squeeze(images, axis=-1)
plt.figure(figsize=(n_cols, n_rows))
for index, image in enumerate(images):
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(image, cmap="binary")
plt.axis("off")
noise = tf.random.normal(shape=[1, num_features])
generated_images = generator(noise, training=False)
show(generated_images,1)
Task 5: Build the Discriminator Network for DCGAN
discriminator = keras.models.Sequential([
keras.layers.Conv2D(64, (3,3), (2,2), padding="same", input_shape=[32,32,3]),
keras.layers.LeakyReLU(0.2),
keras.layers.Dropout(0.3),
keras.layers.Conv2D(128, (3,3), (2,2), padding="same"),
keras.layers.LeakyReLU(0.2),
keras.layers.Dropout(0.3),
keras.layers.Conv2D(256, (3,3), (2,2), padding="same"),
keras.layers.LeakyReLU(0.2),
keras.layers.Dropout(0.3),
keras.layers.Flatten(),
keras.layers.Dense(1, activation='sigmoid')
])
decision = discriminator(generated_images)
print(decision)
output:tf.Tensor([[0.5006197]], shape=(1, 1), dtype=float32)
Task 6: Compile the Deep Convolutional Generative Adversarial Network (DCGAN)
discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop")
discriminator.trainable = False
gan = keras.models.Sequential([generator, discriminator])
gan.compile(loss="binary_crossentropy", optimizer="rmsprop")
from IPython import display
from tqdm import tqdm
seed = tf.random.normal(shape=[batch_size, 100])
Task 7: Define Training Procedure
from tqdm import tqdm
def train_dcgan(gan, dataset, batch_size, num_features, epochs=5):
generator, discriminator = gan.layers
for epoch in tqdm(range(epochs)):
print("Epoch {}/{}".format(epoch + 1, epochs))
for X_batch in dataset:
noise = tf.random.normal(shape=[batch_size, num_features])
generated_images = generator(noise)
X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
discriminator.trainable = True
discriminator.train_on_batch(X_fake_and_real, y1)
noise = tf.random.normal(shape=[batch_size, num_features])
y2 = tf.constant([[1.]] * batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y2)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator, epoch + 1, seed)
display.clear_output(wait=True)
generate_and_save_images(generator, epochs, seed)
## Source https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='binary')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
Task 8: Train DCGAN
x_train_dcgan = x_train.reshape(-1, 32,32,3) * 2. - 1.
batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices(x_train_dcgan)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
this is main problem
%%time
train_dcgan(gan, dataset, batch_size, num_features, epochs=10)**
output:
7 noise = tf.random.normal(shape=[batch_size, num_features])
8 generated_images = generator(noise)
----> 9 X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
10 y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
11 discriminator.trainable = True
cannot compute ConcatV2 as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:ConcatV2] name: concat
It is Cifar10 DCGAN I am really not understanding this error and how to fix it.
By default, Tensorflow uses float32.You have to convert your data to tf.float32.