Created
November 22, 2018 16:44
-
-
Save ptrblck/4ead4abc62a488cecbc8ccc01af45c5d to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function, division | |
import torch | |
from torch.autograd import Variable | |
from sklearn.metrics import accuracy_score | |
from sklearn.metrics import confusion_matrix | |
import torch | |
import torch.nn as nn | |
import torch.optim as optim | |
from torch.optim import lr_scheduler | |
import numpy as np | |
import torchvision | |
from torchvision import datasets, models, transforms | |
import matplotlib.pyplot as plt | |
import time | |
import os | |
import copy | |
import torch.utils.data as data_utils | |
from torch.utils import data | |
torch.manual_seed(2809) | |
def train_model(model, criterion, optimizer, scheduler, | |
train_input, train_label, num_epochs=25): | |
since = time.time() | |
model.train() # Set model to training mode | |
for epoch in range(num_epochs): | |
print('Epoch {}/{}'.format(epoch, num_epochs - 1)) | |
print('-' * 10) | |
scheduler.step() | |
running_loss = 0.0 | |
running_corrects = 0 | |
# Iterate over data. | |
train_input = train_input.to(device) | |
train_label = train_label.to(device) | |
# zero the parameter gradients | |
optimizer.zero_grad() | |
output = model(train_input) | |
_, pred = torch.max(output, 1) | |
loss = criterion(output, train_label) | |
loss.backward() | |
optimizer.step() | |
# statistics | |
running_loss += loss.item() * train_input.size(0) | |
running_corrects += torch.sum(pred == train_label.data) | |
epoch_loss = running_loss / dataset_size['train'] | |
epoch_acc = running_corrects.double() / dataset_size['train'] | |
print('train Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc)) | |
print() | |
time_elapsed = time.time() - since | |
print('Training complete in {:.0f}m {:.0f}s'.format( | |
time_elapsed // 60, time_elapsed % 60)) | |
return model | |
data_transforms = { | |
'train': transforms.Compose([ | |
transforms.RandomResizedCrop(224), | |
transforms.RandomHorizontalFlip(), | |
transforms.RandomRotation(20), | |
transforms.ToTensor(), | |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) | |
]), | |
'test': transforms.Compose([ | |
transforms.Resize(224), | |
transforms.ToTensor(), | |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) | |
]) | |
} | |
data_dir = "test_images" | |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
model_ft = models.resnet50(pretrained=True) | |
num_ftrs = model_ft.fc.in_features | |
model_ft.fc = nn.Linear(num_ftrs, 2) | |
model_ft = model_ft.to(device) | |
# Save a clone of initial model to restore later | |
initial_model = copy.deepcopy(model_ft) | |
criterion = nn.CrossEntropyLoss() | |
# Observe that all parameters are being optimized | |
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) | |
# Decay LR by a factor of 0.1 every 7 epochs | |
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) | |
#model_ft = model_ft.cuda() | |
nb_samples = 10 | |
nb_classes = 2 | |
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), | |
data_transforms[x]) | |
for x in ['train']} | |
dataset_size = {x: len(image_datasets[x]) for x in ['train']} | |
class_names = image_datasets['train'].classes | |
# LOOCV | |
loocv_preds = [] | |
loocv_targets = [] | |
for idx in range(nb_samples): | |
print('Using sample {} as test data'.format(idx)) | |
print('Resetting model') | |
model_ft = copy.deepcopy(initial_model) | |
# Get all indices and remove test sample | |
train_indices = list(range(len(image_datasets['train']))) | |
del train_indices[idx] | |
# Create new sampler | |
sampler = data.SubsetRandomSampler(train_indices) | |
dataloader = data.DataLoader( | |
image_datasets['train'], | |
num_workers=2, | |
batch_size=1, | |
sampler=sampler | |
) | |
# Train model | |
for batch_idx, (sample, target) in enumerate(dataloader): | |
print('Batch {}'.format(batch_idx)) | |
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, sample, target, num_epochs=2) # do I add this line here? | |
# Test on LOO sample | |
model_ft.eval() | |
test_data, test_target = image_datasets['train'][idx] | |
# Apply test preprocessing on data | |
test_data = data_transforms['test'](test_data) | |
test_data = test_data.cuda() | |
test_target = torch.tensor(test_target) | |
test_target = test_target.cuda() | |
test_data.unsqueeze_(0) | |
test_target.unsqueeze_(0) | |
print(test_data.shape) | |
output = model_ft(test_data) | |
pred = torch.argmax(output, 1) | |
loocv_preds.append(pred) | |
loocv_targets.append(test_target.item()) | |
print("loocv preds: ", loocv_preds) | |
print("loocv targets: ", loocv_targets) | |
print(accuracy_score(loocv_targets, loocv_preds)) | |
print(confusion_matrix(loocv_targets, loocv_preds)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment