In my inference code with Trained PyTorch model, What's wrong ?
There is an runtime error message: "expected CPU tensor(got CUDA tensor)"
import torch
import torch.nn as nn
#from __future__ import print_function
import argparse
from PIL import Image
import torchvision.models as models
import skimage.io
from torch.autograd import Variable as V
from torch.nn import functional as f
from torchvision import transforms as trn
# define image transformation
centre_crop = trn.Compose([
trn.ToPILImage(),
trn.Scale(256),
trn.CenterCrop(224),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
filename=r'ed91.png'
img = skimage.io.imread(filename)
x = V(centre_crop(img).unsqueeze(0), volatile=True)
model = models.__dict__['resnet18']()
model = torch.nn.DataParallel(model).cuda()
model = torch.load('mw_model0831.pth')
#model.load_state_dict(checkpoint['state_dict'])
#best_prec1 = checkpoint['best_prec1']
logit = model(x)
print(logit)
print(len(logit))
h_x = f.softmax(logit).data.squeeze()
How can I fix this ?
Probably the error is a mismatch between the model which is in
cuda
and the variablex
you are using as an input, which is a CPU tensor.Try to add
.cuda()
to your variable so both match: