how to reduce the loss and loss val of my model which do not show any improvement?

21 views Asked by At
from tensorflow.keras.layers import Conv1D,BatchNormalization
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import MaxPool1D,Dense,Dropout,Flatten
from tensorflow.keras.layers import MaxPool1D,GlobalAveragePooling1D
from tensorflow.keras.models import Sequential
from tensorflow.keras.backend import clear_session
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import GroupKFold,LeaveOneGroupOut
from scipy  import stats
#from sklearn.preprocessing import StandardScaler
gkf=GroupKFold()
import gc

def cnn_EEG_model_version_2():

    
    clear_session()
    model=Sequential()
    #conv 1
    model.add(Conv1D(filters=4,kernel_size=16,strides=1, activation = 'relu',input_shape=(250,19)))#1
    model.add(BatchNormalization())
    #model.add(MaxPool1D(pool_size=2,strides=2))#2
     #conv 2
    model.add(Conv1D(filters=8, kernel_size=16,strides=1,activation = 'relu'))#3
    #model.add(MaxPool1D(pool_size=2,strides=2))#4
    #conv 3
    model.add(Conv1D(filters=8,kernel_size=8,strides=1,activation = 'relu'))#7
    #model.add(MaxPool1D(pool_size=2,strides=2))#8
     #conv 4
    model.add(Conv1D(filters=16,kernel_size=8,strides=1,activation = 'relu'))#7
    model.add(Conv1D(filters=16,kernel_size=4,strides=1,activation = 'relu'))
    #model.add(LeakyReLU())
    #model.add(MaxPool1D(pool_size=2,strides=2))#8
    #fully connect layers
    model.add(GlobalAveragePooling1D())
    model.add(Flatten())
    model.add(Dense(units=10,activation = 'relu'))
    model.add(Dropout(0.5))
    model.add(Dense(units=5,activation = 'relu'))
    model.add(Dense(1,activation='sigmoid'))#11
  
    #model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    model.compile(optimizer=Adam(learning_rate =.01),loss='binary_crossentropy',metrics=['accuracy'])
    return model

model=cnn_EEG_model_version_2()
model.summary()



accuracy=[]
History = []

for train_index, val_index in gkf.split(full_train_set, full_labels, groups=full_group_array):
    train_features,train_labels=full_train_set[train_index],full_labels[train_index]
    val_features,val_labels=full_train_set[val_index],full_labels[val_index]
    #scaler=StandardScaler()
    train_features = stats.zscore(train_features.reshape(-1, train_features.shape[-1]), axis = None ).reshape(train_features.shape)
    val_features = stats.zscore(val_features.reshape(-1, val_features.shape[-1]), axis = None).reshape(val_features.shape)
    #train_features = scaler.fit_transform(train_features.reshape(-1, train_features.shape[-1])).reshape(train_features.shape)
    #val_features = scaler.transform(val_features.reshape(-1, val_features.shape[-1])).reshape(val_features.shape)
    model= cnn_EEG_model_version_2()
    History = model.fit(train_features,train_labels,epochs=50,batch_size=32,validation_data=(val_features,val_labels))
    accuracy.append(model.evaluate(val_features,val_labels)[1])
    del train_features,val_features
    gc.collect()

enter image description here

I enter image description here am creating a CNN model to classify EEG data to two class 0 and 1. after i have trained the model several time with several changes. I noticed that the loss and loss_val doesn't improve at all and doesn't go less than this. I couldn't identify what is the problem with my model.

0

There are 0 answers