I'm engaging the CNN network. because of high volume of data, I'm using data generator. i engaged data augmentation in data generator function. i determine the batch size as 10, when i wanna run the model, it gives me this error (Input data in NumpyArrayIterator should have rank 4. You passed an array with shape', (10, 100, 100, , 100, 1)), however i didn't get same error without using data augmentation, what should i do?
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
def Data_generator(df, batch_size, augment=False):
for start in range(0, df.shape[0], batch_size):
x_batch_img = []y_batch=[]end = min(start + batch_size, df.shape[0])
for idd in range(start,end):
img = open(np.array(df.imgpath)[idd],'rb').read()
img_1=np.frombuffer(img,dtype=np.uint8)
img_1=img_1.reshape(100,100,100,1)
Porosity=np.array(df.Porosity)[idd]
throat_radius=np.array(df['throat radius'])[idd]
pore_radius=np.array(df['pore radius'])[idd]
pore_connection_number=np.array(df.pore_connection_number)[idd]
pore_shape_factor=np.array(df['pore shape factor'])[idd]
y_1=np.concatenate((Porosity, throat_radius,pore_radius,pore_connection_number, pore_shape_factor),axis=None)
y=np.array([y_1])
x_batch_img.append(img_1)
y_batch.append(y)
x_batch_img = np.array(x_batch_img)
#x_batch_img = tf.squeeze(np.array(x_batch_img), axis=-1)
y_batch= np.array(y_batch)
y_batch=y_batch.reshape(-1,5)
datagen = ImageDataGenerator(rotation_range=180, width_shift_range=0.2,height_shift_range=0.2)
datagen.fit(x_batch_img)
data = datagen.flow(x_batch_img, y_batch, batch_size=32, shuffle=True)
X = []
while True:
try:
X.append(data.next())
except:
break
# print(type(data))
#y_batch = to_categorical(y_batch,5)
yield np.array(X)
batch_size=10train_gen= Data_generator(df_train,batch_size)
valid_gen=Data_generator(df_valid,batch_size)train_gen_pre=Data_predict_generator(df_train,batch_size)valid_gen_pre=Data_predict_generator(df_valid,batch_size)
test_gen= Data_generator(df_test,batch_size)test_gen_pre=Data_predict_generator(df_test,batch_size)
model=Sequential()
model.add(Conv3D(16,kernel_size=(7,7,7),input_shape=(100,100,100,1),padding="same",activation='relu',kernel_initializer=initializers.RandomNormal(stddev=0.01),))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32,kernel_size=(5,5,5),activation='relu',padding="same",kernel_initializer=initializers.RandomNormal(stddev=0.01)))
model.add(BatchNormalization())#model.add(Dropout(0.2))model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(64,kernel_size=(3,3,3),activation='relu',padding="same",kernel_initializer=initializers.RandomNormal(stddev=0.01)))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(128,kernel_size=(3,3,3),activation='relu',padding="same",kernel_initializer=initializers.RandomNormal(stddev=0.01)))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(256,kernel_size=(3,3,3),activation='relu',padding="same",kernel_initializer=initializers.RandomNormal(stddev=0.01)))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(512,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(5))
model.summary()
history=model.fit_generator(train_gen, steps_per_epoch=nbatches_train, epochs=200, verbose=1, validation_data=valid_gen, validation_steps=nbatches_valid,callbacks=[cp_callback,csvlogger])