I'm creating a cGAN model network for a project yet run into the error (the code will be at the bottom):

Exception has occurred: ValueError
Exception encountered when calling layer 'sequential' (type Sequential).

Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)

Call arguments received by layer 'sequential' (type Sequential):
  • inputs=tf.Tensor(shape=(102,), dtype=float32)
  • training=None
  • mask=None
ValueError: Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)

During handling of the above exception, another exception occurred:

  File "D:\School\Internship (S5)\data\cgan_model_v2.py", line 131, in <module>
    generated_data = generator([noise_tensor, fake_labels_tensor])
                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ValueError: Exception encountered when calling layer 'sequential' (type Sequential).

Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (102,)

Call arguments received by layer 'sequential' (type Sequential):
  • inputs=tf.Tensor(shape=(102,), dtype=float32)
  • training=None
  • mask=None

What would be a solution to this error? I've tried searching it up with no luck of finding a solution. the data format for labels would be a array with 2 entries. for data it would be an array with 512 data points. both arrays are 1D

For any more details please don't hesitate to ask

Code:

import mat73
import tensorflow as tf
from keras.layers import Dense, Input, Concatenate, Flatten
from keras.models import Model
from keras.optimizers import Adam


# Load the data
def import_data():
    # Load the data
    data_dict = mat73.loadmat('example_data.mat')

    # Assuming raw_data contains a list of lists
    raw_data = list(data_dict.values())

    # Convert each element in raw_data to a NumPy array
    data = np.asarray(raw_data[0])
    labels = np.asarray(raw_data[1])
    secotion_labels = [x for x in labels for _ in range(4)]
    data, split_labels = split_data(data)
    labels = combine_arrays(split_labels,secotion_labels)
    data = z_score_normalization(data)
    labels = z_score_normalization(labels)
    return data, labels

def split_data(input_data):
    data_array = input_data
    label_array = 0, 1, 2, 3
    
    split_data_r = []
    split_labels = []
    
    for i in data_array:
        temp_data = i
        for j in range(0, len(temp_data), 512):
            split_data_r.append(temp_data[j:j + 512])
            split_labels.append(label_array)
    
    return np.array(split_data_r), np.array(split_labels)

def combine_arrays(array1, array2):
    if len(array1) != len(array2):
        raise ValueError("Input arrays must have the same length")

    combined_array = np.column_stack((array1, array2))
    return combined_array

def z_score_normalization(data_array):
    if data_array.ndim == 1:
        n_data = tf.convert_to_tensor(data_array, dtype=tf.float32)
        mean = tf.reduce_mean(n_data)
        stddev = tf.math.reduce_std(n_data)
        normalized_data = (n_data - mean) / stddev
    elif data_array.ndim == 2:
        mean = tf.reduce_mean(data_array, axis=0)
        stddev = tf.math.reduce_std(data_array, axis=0)
        normalized_data = (data_array - mean) / stddev
    else:
        raise ValueError("Input array must be 1D or 2D")
    return normalized_data



def discriminator(data_input_shape, label_input_shape):
    # Create input layers for data and labels
    data_input = Input(shape=data_input_shape, name='data_input')
    label_input = Input(shape=label_input_shape, name='label_input')

    # Concatenate data and labels
    merged_inputs = Concatenate(axis=-1)([data_input, label_input])

    # Create the discriminator model
    model = tf.keras.Sequential()
    model.add(Dense(256, activation='relu'))
    model.add(Dense(128, activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))  # Use 'sigmoid' for binary classification

    # Connect the input layer to the discriminator model
    discriminator_output = model(merged_inputs)

    # Create a Model using the input layers and discriminator output
    discriminator_model = Model(inputs=[data_input, label_input], outputs=discriminator_output)

    return discriminator_model

def generator(noise_input_shape, label_input_shape):
    noise_input = Input(shape=noise_input_shape, name='data_input')
    label_input = Input(shape=label_input_shape, name='label_input')
    
    merged_inputs = Concatenate(axis=-1)([noise_input,label_input])
    
    model = tf.keras.Sequential()
    model.add(Dense(512, activation='relu'))
    model.add(Dense(768, activation='relu'))
    model.add(Dense(768, activation='relu'))
    model.add(Dense(512, activation='relu'))
    
    genarator_output = model(merged_inputs)

    # Create a Model using the input layers and genarator output
    genarator_model = Model(inputs=[noise_input, label_input], outputs=genarator_output)

    return genarator_model

def cgan(genartor_model, discrimanator_model):
    print ("not done yet")

noise_input_shape = (100,)
label_input_shape = (2,)

generator = generator(noise_input_shape, label_input_shape)
discriminator = discriminator((512,), label_input_shape)


optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
cross_entropy = tf.keras.losses.BinaryCrossentropy()

#training loop
epochs = 200
batch_size = 1
for x in range(epochs):
    data, labels = import_data();
    for i in range(len(data)):
        noise = np.random.rand(noise_input_shape[0])
        fake_labels = np.random.rand(label_input_shape[0])
        noise_tensor = tf.convert_to_tensor(noise, dtype=tf.float32)
        fake_labels_tensor = tf.convert_to_tensor(fake_labels, dtype=tf.float32)
        
        generated_data = generator([noise_tensor, fake_labels_tensor])
        
        real_data = data[i]
        real_labels = labels[i]
        #discriminator
        with tf.GradientTape as disc_tape:
            real_loss = cross_entropy(real_labels, discriminator([real_data, real_labels]))
            fake_loss = cross_entropy(fake_labels, discriminator([generated_data, fake_labels]))
            total_disc_loss = real_loss + fake_loss
            
        # Update discriminator weights
        gradients_of_discriminator = disc_tape.gradient(total_disc_loss, discriminator.trainable_variables)
        optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
        
        #genarator
        with tf.GradientTape() as gen_tape:
            generated_data = generator([noise, fake_labels])
            validity = discriminator([generated_data, fake_labels])
            gen_loss = cross_entropy(tf.ones_like(validity), validity)

        # Update generator weights
        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
        optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))

i tried reformatting the input into the model but haven't gotten any results worth mentioning unfortunately

1

There are 1 answers

0
Danny On

As you can see from the error message which states that the sequential Dense layer is expecting an input with min_ndim=2, you need to reshape your 1D array into a 2D array or redefine your model's layers to accept 1D inputs.