I found many answers about similar questions, but they are almost about 'removing of flatten layer'. However I didn't use any flatten layer for input layer. So through searching I changed () into [] along the whole codes like this:
model.add(Conv3D(32, kernel_size = (3,3,3), input_shape = (32, 32,16), activation = 'relu'))
dat = np.load("/content/drive/MyDrive/trn_dat.npy")
dat = dat/255.0
lbl = np.load("/content/drive/MyDrive/trn_lbl.npy")
lbl = keras.utils.to_categorical(lbl, 2)
x_train, x_vd, y_train, y_vd = train_test_split(dat, lbl, test_size = 0.9)
# model creating
model = tf.keras.Sequential()
model.add(BatchNormalization(center = True, scale = True))
model.add(Conv3D(32, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Conv3D(32, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Dropout(0.2))
model.add(Conv3D(64, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Conv3D(64, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Dropout(0.25))
model.add(Conv3D(128, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Conv3D(128, kernel_size = [3,3,3], input_shape = [32, 32,16], activation = 'relu'))
model.add(MaxPooling3D(pool_size = (2,2,2)))
model.add(Dropout(0.25))
model.compile(loss = 'categorical_entropy', optimizer = 'adam', metrics = ['accuracy'])
early_stopping_callback = EarlyStopping(monitor = 'val_loss', patience =10)
path4 = model.fit(x_train, y_train, validation_data = (x_vd, y_vd), epochs = 100, callbacks = [early_stopping_callback])
And this is my error code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:747 train_step
y_pred = self(x, training=True)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py:985 __call__
outputs = call_fn(inputs, *args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py:386 call
outputs = layer(inputs, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py:976 __call__
self.name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py:196 assert_input_compatibility
str(x.shape.as_list()))
ValueError: Input 0 of layer conv3d_71 is incompatible with the layer: : expected min_ndim=5, found ndim=4. Full shape received: [None, 32, 32, 16]
Then what is the min_ndim? Is it one of hyperparameters that I can define?
Finally, how can I solve this error?
It means you need to feed 4D images (ndim = 5 because of the batch_size). You can reshape your dataset easily :
and change input_shape
(32,32,16)
to(32,32,16,1)
.Additionally, the input_shape is only valid for your first weight layer, you have to take off the others otherwise it will raise an error