Created
November 10, 2021 06:50
-
-
Save growvv/c3188af99b49315423afbd6843fcd05d to your computer and use it in GitHub Desktop.
使用FGSM方法生成对抗样本
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
from torchvision import datasets, transforms | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import ipdb | |
# NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets | |
# see, https://github.com/pytorch/vision/issues/3497 for more information | |
from six.moves import urllib | |
opener = urllib.request.build_opener() | |
opener.addheaders = [('User-agent', 'Mozilla/5.0')] | |
urllib.request.install_opener(opener) | |
epsilons = [0, .05, .1, .15, .2, .25, .3] | |
pretrained_model = "model/lenet_mnist_model.pth" | |
use_cuda= torch.device("gpu" if torch.cuda.is_available() else "cpu") | |
# LeNet Model definition | |
class Net(nn.Module): | |
def __init__(self): | |
super(Net, self).__init__() | |
self.conv1 = nn.Conv2d(1, 10, kernel_size=5) | |
self.conv2 = nn.Conv2d(10, 20, kernel_size=5) | |
self.conv2_drop = nn.Dropout2d() | |
self.fc1 = nn.Linear(320, 50) | |
self.fc2 = nn.Linear(50, 10) | |
def forward(self, x): | |
x = F.relu(F.max_pool2d(self.conv1(x), 2)) | |
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) | |
x = x.view(-1, 320) | |
x = F.relu(self.fc1(x)) | |
x = F.dropout(x, training=self.training) | |
x = self.fc2(x) | |
return F.log_softmax(x, dim=1) | |
# MNIST Test dataset and dataloader declaration | |
test_loader = torch.utils.data.DataLoader( | |
datasets.MNIST('./data', train=False, download=True, transform=transforms.Compose([ | |
transforms.ToTensor(), | |
])), | |
batch_size=1, shuffle=True) | |
print("len: ", len(test_loader)) | |
# Define what device we are using | |
print("CUDA Available: ",torch.cuda.is_available()) | |
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu") | |
# Initialize the network | |
model = Net().to(device) | |
# Load the pretrained model | |
model.load_state_dict(torch.load(pretrained_model, map_location='cpu')) | |
# Set the model in evaluation mode. In this case this is for the Dropout layers | |
model.eval() | |
def fgsm_attack(image, epsilon, data_grad): | |
# Collect the element-wise sign of the data gradient | |
sign_data_grad = data_grad.sign() | |
# Create the perturbed image by adjusting each pixel of the input image | |
perturbed_image = image + epsilon*sign_data_grad | |
# Adding clipping to maintain [0,1] range | |
perturbed_image = torch.clamp(perturbed_image, 0, 1) | |
# Return the perturbed image | |
return perturbed_image | |
def test( model, device, test_loader, epsilon ): | |
# Accuracy counter | |
correct = 0 | |
adv_examples = [] | |
# Loop over all examples in test set | |
for data, target in test_loader: | |
# ipdb.set_trace() | |
# Send the data and label to the device | |
data, target = data.to(device), target.to(device) | |
# Set requires_grad attribute of tensor. Important for Attack | |
data.requires_grad = True | |
# Forward pass the data through the model | |
output = model(data) | |
init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability | |
# If the initial prediction is wrong, dont bother attacking, just move on | |
if init_pred.item() != target.item(): | |
continue | |
# Calculate the loss | |
loss = F.nll_loss(output, target) | |
# Zero all existing gradients | |
model.zero_grad() | |
# Calculate gradients of model in backward pass | |
loss.backward() | |
# Collect datagrad | |
data_grad = data.grad.data | |
# Call FGSM Attack | |
perturbed_data = fgsm_attack(data, epsilon, data_grad) | |
# Re-classify the perturbed image | |
output = model(perturbed_data) | |
# Check for success | |
final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability | |
if final_pred.item() == target.item(): | |
correct += 1 | |
# Special case for saving 0 epsilon examples | |
if (epsilon == 0) and (len(adv_examples) < 5): | |
adv_ex = perturbed_data.squeeze().detach().cpu().numpy() | |
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) | |
else: | |
# Save some adv examples for visualization later | |
if len(adv_examples) < 5: | |
adv_ex = perturbed_data.squeeze().detach().cpu().numpy() | |
adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) | |
# Calculate final accuracy for this epsilon | |
final_acc = correct/float(len(test_loader)) | |
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc)) | |
# Return the accuracy and an adversarial example | |
return final_acc, adv_examples | |
accuracies = [] | |
examples = [] | |
# Run test for each epsilon | |
for eps in epsilons: | |
acc, ex = test(model, device, test_loader, eps) | |
accuracies.append(acc) | |
examples.append(ex) | |
# Plot several examples of adversarial samples at each epsilon | |
cnt = 0 | |
plt.figure(figsize=(8,10)) | |
for i in range(len(epsilons)): | |
for j in range(len(examples[i])): | |
cnt += 1 | |
plt.subplot(len(epsilons),len(examples[0]),cnt) | |
plt.xticks([], []) | |
plt.yticks([], []) | |
if j == 0: | |
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14) | |
orig,adv,ex = examples[i][j] | |
plt.title("{} -> {}".format(orig, adv)) | |
plt.imshow(ex, cmap="gray") | |
plt.tight_layout() | |
plt.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
ref https://pytorch.org/tutorials/beginner/fgsm_tutorial.html