ModuleNotFoundError: No module named 'art.attacks'

4.1k views Asked by At

I have already installed the module "art" yesterday, but when I try to run my code, it happens:

Traceback (most recent call last):
  File "D:/Desktop/captcha/src1/adv_ex.py", line 10, in <module>
    from art.attacks.evasion import FastGradientMethod
ModuleNotFoundError: No module named 'art.attacks'

I try to search it on Internet, but there is little information about this error.
I am sure that the module "art" (version 5.1) is already installed.
I hope you can help me. Many thanks.
Here is the full code:

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torch
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from art.attacks.evasion import FastGradientMethod
from art.attacks.evasion import BasicIterativeMethod
from art.estimators.classification import PyTorchClassifier
from art.utils import load_mnist

from model import Net
from model import CaptchaData
from model import DataLoader
from model import vec2text

# Step1: Load the original dataset
transform = transforms.Compose([transforms.ToTensor()]) # 不做数据增强和标准化了
test_data = CaptchaData('./testset/', transform=transform)
test_data_loader = DataLoader(test_data, batch_size=128, num_workers=0, shuffle=True, drop_last=True)


#Step 2: Load the model
net = Net()

#更换设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('current device:' , device)
print("right")
net.to(device)

#Define the loss function and optimizer
criterion = nn.MultiLabelSoftMarginLoss()
criterion.requires_grad = True #loss function
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)

#Load the model
model_path = './module_build/model.pth'

checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
net.eval()

# Step 3: Create the ART classifier for attacking
classifier = PyTorchClassifier(
    model=net,
    clip_values=(0.0,255.0),
    loss=criterion,
    optimizer=optimizer,
    input_shape=(3,140,44),
    nb_classes=222,
)


# Step 4: Define the eval function for evaulating the result
captcha_list = list('0123456789abcdefghijklmnopqrstuvwxcyz_')
captcha_length =6


def calculat_acc(output, target):
    output, target = output.view(-1,len(captcha_list)),target.view(-1,len(captcha_list))
    output = nn.functional.softmax(output, dim=1)
    output = torch.argmax(output,dim=1)
    target = torch.argmax(target,dim=1)
    output,target = output.view(-1, captcha_length),target.view(-1,captcha_length)
    c=0
    for i, j in zip(target,output):
        if torch.equal(i,j):
            c += 1
    acc = c / output.size()[0]* 100
    return acc

# Step 6: craft attack with FGSM and show the adversarial picture
acc,i =0,0
# with torch.no_grad( ) :
for inputs, labels in test_data_loader:

    # Before
    plt.figure()
    plt.imshow(inputs[124].permute(1,2,0))

    attack = FastGradientMethod(estimator=classifier, eps=0.1)

    inputs_adv = attack.generate(x=inputs)
    inputs_adv = torch.as_tensor(inputs_adv)
    print(vec2text(labels[124].view(6,-1)))
    # After
    plt.figure()
    plt.imshow(inputs_adv[124].permute(1,2,0))

    for i in range(128):
        result =transforms.ToPILImage()(inputs_adv[i])
        result.save("./input_adv/"+vec2text(labels[i].view(6,-1))+ ".jpg")
    outputs = net(inputs_adv)
    print(vec2text(outputs[124].view(6,-1)))
    acc += calculat_acc(outputs, labels)
    i += 1
    break
print ('Accuracy: %.3f %%' % (acc/i))

'''
# Step 6: craft attack with BIM and show the adversarial picture
acc,i =0,0
# with torch.no_grad( ) :
for inputs, labels in test_data_loader:

    # Before
    plt.figure()
    plt.imshow(inputs[124]. permute(1,2,0))

    attack = BasicIterativeMethod(estimator=classifier, eps=0.1, eps_step=0.01)

    inputs_adv = attack.generate(x=inputs)
    inputs_adv = torch.as_tensor(inputs_adv)
    
    # After
    plt.figure()
    plt.imshow(inputs_adv[124].permute(1,2,0))

    
    outputs = net(inputs_adv)
    acc += calculat_acc(outputs, labels)
    i += 1
    break
print ('Accuracy: %.3f %%' % (acc/i))
'''


#Step 7: crop the image
for inputs, labels in test_data_loader:
    # Before
    plt.figure()
    plt.imshows(inputs[124].permute(1,2,0))

    #Transform the tensor to image for further operation
    image = transforms.ToPILImage()(inputs[124])

    # crop the left part
    image_left = image.crop((28,0,140,44))
    plt.figure()
    plt.imshow(np.asarray(image_left))
    attack = FastGradientMethod(estimator = classifier,eps=0.1)

    inputs_adv = attack.generate(x = inputs)
    inputs_adv = torch.as_tensor(inputs_adv)
    #After
    plt.figure()
    plt.imshow(inputs_adv[124].permute(1,2,0))
    #Crop the one part of image.
    image = transforms.ToPILImage()(inputs_adv[124])
    (left, upper, right, lower) = (0,0,28,44)
    image_crop = image.crop((left, upper, right,lower))
    plt.figure()
    plt.imshow(np.asarray(image_crop))

    #combine the final image.
    dst = Image.new('RGB',(image_crop.width + image_left.width, image_crop.height))
    dst.paste(image_crop,(0, 0))
    dst.paste(image_left, (image_crop.width, 0))

    dst.save("./input_adv/"+ str(i) +".jpg")
    # Show the final result
    plt.figure()
    plt.imshow(np.asarray(dst))
    # Test it on pre-trained model
    outputs = net(inputs_adv)
    break

# save cropped image
for inputs, labels in test_data_loader:
    for i in range(128):

        # Transform the tensor to image for further operation
        image = transforms.ToPILImage()(inputs[i])

        #crop the left part
        image_left =image.crop((28,8,140,44))

        attack = FastGradientMethod(estimator=classifier, eps=0.1)

        inputs_adv = attack.generate(x=inputs)
        inputs_adv = torch.as_tensor(inputs_adv)
        #crop the one part of image.
        image = transforms.ToPILImage()(inputs_adv[i])
        (left, upper, right, lower) = (0,8,28,44)
        image_crop = image.crop((left,upper,right,lower))
        # combine the final image.
        dst = Image.new('RGB',(image_crop.width + image_left.width, image_crop.height))
        dst.paste(image_crop, (0,0))
        dst.paste(image_left,(image_crop.width,0))

        # Save the final results
        dst.save("./input_adv_cropped/"+vec2text(labels[i].view(6,-1))+ ".jpg")
    break

# Test accuracy on cropped images
test_data_cropped = CaptchaData('./input_adv_cropped/', transform=transform)
test_data_loader_cropped = DataLoader(test_data_cropped , batch_size=8,  num_workers=0,shuffle=True,drop_last=True)

acc,i=0, 0
with torch.no_grad():
    for inputs, labels in test_data_loader_cropped:
        outputs = net(inputs)
        for j in range(8):
            plt.figure()
            print(vec2text(labels[j].view(6.-1)))
            print(vec2text(outputs[j].vieu( 6.-1)))
            plt.imshow(inputs[j].permute(1,2,0))
            print("\n")
        acc += calculat_acc(outputs, labels)
        i +=1
        break
print('Accuracy: %.3f %%' %(acc/i))
1

There are 1 answers

0
CHI NEW On

I have solved this problem.
It is because I mixed art with adversarial-robustness-toolbox. After I use

pip install adversarial-robustness-toolbox

to install it, my code can run normally.