I am generating adversarial attack examples using FGSM on MNIST dataset.

This is my fgsm_attack_generation code:


def fgsm_attack(model, data, epsilon):
    # Retrieve the inputs and labels from the data
    inputs, labels = data

    # Create a TensorFlow session
    sess = tf.compat.v1.Session()

    # Convert the inputs and labels to TensorFlow tensors
    inputs_tf = tf.convert_to_tensor(inputs)
    labels_tf = tf.convert_to_tensor(labels)

    # Calculate the gradients of the loss with respect to the inputs
    with tf.GradientTape() as tape:
        tape.watch(inputs_tf)
        logits = model(inputs_tf, training=False)
        loss = tf.keras.losses.sparse_categorical_crossentropy(labels_tf, logits)

    gradient = tape.gradient(loss, inputs_tf)

    # Compute the sign of the gradients
    gradient_signs = tf.sign(gradient)

    # Generate the perturbed inputs by adding epsilon times the sign of the gradients
    perturbed_inputs = tf.add(inputs_tf, epsilon * gradient_signs)

    # Clip the perturbed inputs to ensure they stay within the valid range (0 to 1)
    perturbed_inputs = tf.clip_by_value(perturbed_inputs, 0, 1)

    # Run the TensorFlow session to obtain the perturbed inputs
    perturbed_inputs_np = sess.run(perturbed_inputs)

    # Close the TensorFlow session
    sess.close()

    return perturbed_inputs_np

epsilon = 0.1  # Adjust the value as needed

perturbed_inputs = fgsm_attack(model, (test_images, test_labels), epsilon)


and i got this error :


WARNING:tensorflow:The dtype of the watched tensor must be floating (e.g. tf.float32), got tf.uint8
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-15-f634fa398776> in <cell line: 38>()
     36 
     37 epsilon = 0.1  # Adjust the value as needed
---> 38 perturbed_inputs = fgsm_attack(model, (test_images, test_labels), epsilon)

2 frames
/usr/local/lib/python3.10/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
    100       dtype = dtypes.as_dtype(dtype).as_datatype_enum
    101   ctx.ensure_initialized()
--> 102   return ops.EagerTensor(value, ctx.device_name, dtype)
    103 
    104 

ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.

and my CNN model's architecture is as below :

Model: "sequential_15"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_45 (Conv2D)          (None, 28, 28, 32)        320       
                                                                 
 max_pooling2d_45 (MaxPooli  (None, 14, 14, 32)        0         
 ng2D)                                                           
                                                                 
 batch_normalization_45 (Ba  (None, 14, 14, 32)        128       
 tchNormalization)                                               
                                                                 
 conv2d_46 (Conv2D)          (None, 14, 14, 64)        18496     
                                                                 
 max_pooling2d_46 (MaxPooli  (None, 7, 7, 64)          0         
 ng2D)                                                           
                                                                 
 batch_normalization_46 (Ba  (None, 7, 7, 64)          256       
 tchNormalization)                                               
                                                                 
 conv2d_47 (Conv2D)          (None, 7, 7, 64)          36928     
                                                                 
 max_pooling2d_47 (MaxPooli  (None, 3, 3, 64)          0         
 ng2D)                                                           
                                                                 
 batch_normalization_47 (Ba  (None, 3, 3, 64)          256       
 tchNormalization)                                               
                                                                 
 flatten_15 (Flatten)        (None, 576)               0         
                                                                 
 dense_30 (Dense)            (None, 64)                36928     
                                                                 
 dropout_15 (Dropout)        (None, 64)                0         
                                                                 
 dense_31 (Dense)            (None, 10)                650      

================================================================= Total params: 93962 (367.04 KB) Trainable params: 93642 (365.79 KB) Non-trainable params: 320 (1.25 KB)


Why is this error popping up ? what can i do ? Is the models architecture problem or the libraries version ?

0

There are 0 answers