I am implementing pixel_norm mention in this paper for my generator network network of GAN. For this i am using following code

from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from tensorflow.keras import regularizers, initializers  
from skimage.io import imread_collection
from tensorflow.keras.preprocessing import image
import numpy as np,os,cv2
from keras import backend as K
from tensorflow.keras.layers import Activation
import keras
from keras.layers import  Lambda
def pixel_norm(x, epsilon=1e-8):
    return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
from collections import defaultdict
from PIL import Image
from six.moves import range
from glob import glob
models = tf.contrib.keras.models
layers = tf.contrib.keras.layers
utils = tf.contrib.keras.utils
losses = tf.contrib.keras.losses
optimizers = tf.contrib.keras.optimizers 
metrics = tf.contrib.keras.metrics
preprocessing_image = tf.contrib.keras.preprocessing.image
datasets = tf.contrib.keras.datasets

def generator(latent_size, classes=2):

    def up_sampling_block(x, filter_size):
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(filter_size, (5,5), padding='same', activation='relu')(x)
        x = Lambda(pixel_norm, arguments={'epsilon':1e-8})(x)
        return x

    # Input 1
    # image class label
    image_class = layers.Input(shape=(1,), dtype='int32', name='image_class')

    # class embeddings
    emb = layers.Embedding(classes, latent_size,

    # 10 classes in MNIST
    cls = layers.Flatten()(emb)

    # Input 2
    # latent noise vector
    latent_input = layers.Input(shape=(latent_size,), name='latent_noise')

    h = layers.multiply([latent_input, cls])

    # Conv generator
    x = layers.Dense(256,)(h)
    x = Activation('relu')(x)
    x = layers.Dense(128 * 13 * 13, activation='relu')(x)
    x = layers.Reshape((13, 13, 128))(x)

    # upsample to (14, 14, 128)
    x = up_sampling_block(x, 256)

    # upsample to (28, 28, 256)
    x = up_sampling_block(x, 128)

    # reduce channel into binary image (28, 28, 1)
    generated_img = layers.Conv2D(1, (3,3), padding='valid', activation='tanh')(x)

    return models.Model(inputs=[latent_input, image_class],


And after running it i am getting follwing error:

Traceback (most recent call last):

  File "<ipython-input-5-4cbc575f5dae>", line 66, in <module>

  File "<ipython-input-5-4cbc575f5dae>", line 53, in generator
    x = up_sampling_block(x, 256)

  File "<ipython-input-5-4cbc575f5dae>", line 26, in up_sampling_block
    x = Lambda(pixel_norm, arguments={'epsilon':1e-8})(x)

  File "/home/nd/anaconda3/lib/python3.6/site-packages/keras/engine/base_layer.py", line 443, in __call__
    previous_mask = _collect_previous_mask(inputs)

  File "/home/nd/anaconda3/lib/python3.6/site-packages/keras/engine/base_layer.py", line 1311, in _collect_previous_mask
    mask = node.output_masks[tensor_index]

AttributeError: 'Node' object has no attribute 'output_masks'

How to solve this issue. I searched on net and one thing i noticed that there may be problem of importing tensorflow layer and keras layers simultaneously. But how to solve this i donot know.

0 Answers