PyTorch官方教程中文版:Pytorch之图像篇

微调基于 torchvision 0.3的目标检测模型

"""
为数据集编写类
"""
import os
import numpy as np
import torch
from PIL import Imageclass PennFudanDataset(object):def __init__(self, root, transforms):self.root = rootself.transforms = transforms# 下载所有图像文件,为其排序# 确保它们对齐self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))def __getitem__(self, idx):# load images ad masksimg_path = os.path.join(self.root, "PNGImages", self.imgs[idx])mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])img = Image.open(img_path).convert("RGB")# 请注意我们还没有将mask转换为RGB,# 因为每种颜色对应一个不同的实例# 0是背景mask = Image.open(mask_path)# 将PIL图像转换为numpy数组mask = np.array(mask)# 实例被编码为不同的颜色obj_ids = np.unique(mask)# 第一个id是背景,所以删除它obj_ids = obj_ids[1:]# 将颜色编码的mask分成一组# 二进制格式masks = mask == obj_ids[:, None, None]# 获取每个mask的边界框坐标num_objs = len(obj_ids)boxes = []for i in range(num_objs):pos = np.where(masks[i])xmin = np.min(pos[1])xmax = np.max(pos[1])ymin = np.min(pos[0])ymax = np.max(pos[0])boxes.append([xmin, ymin, xmax, ymax])# 将所有转换为torch.Tensorboxes = torch.as_tensor(boxes, dtype=torch.float32)# 这里仅有一个类labels = torch.ones((num_objs,), dtype=torch.int64)masks = torch.as_tensor(masks, dtype=torch.uint8)image_id = torch.tensor([idx])area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])# 假设所有实例都不是人群iscrowd = torch.zeros((num_objs,), dtype=torch.int64)target = {}target["boxes"] = boxestarget["labels"] = labelstarget["masks"] = maskstarget["image_id"] = image_idtarget["area"] = areatarget["iscrowd"] = iscrowdif self.transforms is not None:img, target = self.transforms(img, target)return img, targetdef __len__(self):return len(self.imgs)"""
第一个是我们想要从预先训练的模型开始,然后微调最后一层。 
另一种是当我们想要用不同的模型替换模型的主干时(例如,用于更快的预测)。
下面是对这两种情况的处理。 
"""
# """
# 1.微调已经预训练的模型
# """
#
# import torchvision
# from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
#
# # 在COCO上加载经过预训练的预训练模型
# model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
#
# # replace the classifier with a new one, that has
# # 将分类器替换为具有用户定义的 num_classes的新分类器
# num_classes = 2  # 1 class (person) + background
# # 获取分类器的输入参数的数量
# in_features = model.roi_heads.box_predictor.cls_score.in_features
# # 用新的头部替换预先训练好的头部
# model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#
# """
# 2.修改模型以添加不同的主干
# """
#
# import torchvision
# from torchvision.models.detection import FasterRCNN
# from torchvision.models.detection.rpn import AnchorGenerator
#
# # 加载预先训练的模型进行分类和返回
# # 只有功能
# backbone = torchvision.models.mobilenet_v2(pretrained=True).features
# # FasterRCNN需要知道骨干网中的输出通道数量。对于mobilenet_v2,它是1280,所以我们需要在这里添加它
# backbone.out_channels = 1280
#
# # 我们让RPN在每个空间位置生成5 x 3个锚点
# # 具有5种不同的大小和3种不同的宽高比。
# # 我们有一个元组[元组[int]]
# # 因为每个特征映射可能具有不同的大小和宽高比
# anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
#                                    aspect_ratios=((0.5, 1.0, 2.0),))
#
# # 定义一下我们将用于执行感兴趣区域裁剪的特征映射,以及重新缩放后裁剪的大小。
# # 如果您的主干返回Tensor,则featmap_names应为[0]。
# # 更一般地,主干应该返回OrderedDict [Tensor]
# # 并且在featmap_names中,您可以选择要使用的功能映射。
# roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
#                                                 output_size=7,
#                                                 sampling_ratio=2)
#
# # 将这些pieces放在FasterRCNN模型中
# model = FasterRCNN(backbone,
#                    num_classes=2,
#                    rpn_anchor_generator=anchor_generator,
#                    box_roi_pool=roi_pooler)"""
PennFudan 数据集的实例分割模型
"""
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictordef get_model_instance_segmentation(num_classes):# 加载在COCO上预训练的预训练的实例分割模型model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)# 获取分类器的输入特征数in_features = model.roi_heads.box_predictor.cls_score.in_features# 用新的头部替换预先训练好的头部model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)# 现在获取掩膜分类器的输入特征数in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channelshidden_layer = 256# 并用新的掩膜预测器替换掩膜预测器model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes)return model"""
为数据扩充/转换编写辅助函数:
"""
import transforms as Tdef get_transform(train):transforms = []transforms.append(T.ToTensor())if train:transforms.append(T.RandomHorizontalFlip(0.5))return T.Compose(transforms)"""
编写执行训练和验证的主要功能
"""
from engine import train_one_epoch, evaluate
import utilsdef main():# 在GPU上训练,若无GPU,可选择在CPU上训练device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')# 我们的数据集只有两个类 - 背景和人num_classes = 2# 使用我们的数据集和定义的转换dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))# 在训练和测试集中拆分数据集indices = torch.randperm(len(dataset)).tolist()dataset = torch.utils.data.Subset(dataset, indices[:-50])dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])# 定义训练和验证数据加载器data_loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=4,collate_fn=utils.collate_fn)data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1, shuffle=False, num_workers=4,collate_fn=utils.collate_fn)# 使用我们的辅助函数获取模型model = get_model_instance_segmentation(num_classes)# 将我们的模型迁移到合适的设备model.to(device)# 构造一个优化器params = [p for p in model.parameters() if p.requires_grad]optimizer = torch.optim.SGD(params, lr=0.005,momentum=0.9, weight_decay=0.0005)# 和学习率调度程序lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=3,gamma=0.1)# 训练10个epochsnum_epochs = 10for epoch in range(num_epochs):# 训练一个epoch,每10次迭代打印一次train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)# 更新学习速率lr_scheduler.step()# 在测试集上评价evaluate(model, data_loader_test, device=device)print("That's it!")

微调 Torchvision 模型

from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy"""
输入
"""
# 顶级数据目录。 这里我们假设目录的格式符合ImageFolder结构
data_dir = "./data/hymenoptera_data"# 从[resnet, alexnet, vgg, squeezenet, densenet, inception]中选择模型
model_name = "squeezenet"# 数据集中类别数量
num_classes = 2# 训练的批量大小(根据您的内存量而变化)
batch_size = 8# 你要训练的epoch数
num_epochs = 15# 用于特征提取的标志。 当为False时,我们微调整个模型,
# 当True时我们只更新重新形成的图层参数
feature_extract = True"""
辅助函数
"""
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):since = time.time()val_acc_history = []best_model_wts = copy.deepcopy(model.state_dict())best_acc = 0.0for epoch in range(num_epochs):print('Epoch {}/{}'.format(epoch, num_epochs - 1))print('-' * 10)# 每个epoch都有一个训练和验证阶段for phase in ['train', 'val']:if phase == 'train':model.train()  # Set model to training modeelse:model.eval()   # Set model to evaluate moderunning_loss = 0.0running_corrects = 0# 迭代数据for inputs, labels in dataloaders[phase]:inputs = inputs.to(device)labels = labels.to(device)# 零参数梯度optimizer.zero_grad()# 前向# 如果只在训练时则跟踪轨迹with torch.set_grad_enabled(phase == 'train'):# 获取模型输出并计算损失# 开始的特殊情况,因为在训练中它有一个辅助输出。# 在训练模式下,我们通过将最终输出和辅助输出相加来计算损耗# 但在测试中我们只考虑最终输出。if is_inception and phase == 'train':outputs, aux_outputs = model(inputs)loss1 = criterion(outputs, labels)loss2 = criterion(aux_outputs, labels)loss = loss1 + 0.4*loss2else:outputs = model(inputs)loss = criterion(outputs, labels)_, preds = torch.max(outputs, 1)# backward + optimize only if in training phaseif phase == 'train':loss.backward()optimizer.step()# 统计running_loss += loss.item() * inputs.size(0)running_corrects += torch.sum(preds == labels.data)epoch_loss = running_loss / len(dataloaders[phase].dataset)epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))# deep copy the modelif phase == 'val' and epoch_acc > best_acc:best_acc = epoch_accbest_model_wts = copy.deepcopy(model.state_dict())if phase == 'val':val_acc_history.append(epoch_acc)print()time_elapsed = time.time() - sinceprint('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))print('Best val Acc: {:4f}'.format(best_acc))# load best model weightsmodel.load_state_dict(best_model_wts)return model, val_acc_historydef set_parameter_requires_grad(model, feature_extracting):if feature_extracting:for param in model.parameters():param.requires_grad = False"""
初始化和重塑网络
当进行特征提取时,我们只想更新最后一层的参数,换句话说,我们只想更新我们正在重塑层的参数。
因此,我们不需要计算不需要改变 的参数的梯度,因此为了提高效率,我们将其它层的.requires_grad属性设置为False。
这很重要,因为默认情况下,此属性设置为True。 然后,当我们初始化新层时,默认情况下新参数.requires_grad = True,因此只更新新层的参数。
当我们进行微调时,我们可以将所有 .required_grad设置为默认值True。
"""
# #Resnet
# model.fc = nn.Linear(512, num_classes)
# #Alexnet
# model.classifier[6] = nn.Linear(4096,num_classes)
# #VGG
# model.classifier[6] = nn.Linear(4096,num_classes)
# #Squeezenet
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
# #Densenet
# model.classifier = nn.Linear(1024, num_classes)
# #Inception v3
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)"""
重塑代码
"""def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):# 初始化将在此if语句中设置的这些变量。# 每个变量都是模型特定的。model_ft = Noneinput_size = 0if model_name == "resnet":""" Resnet18"""model_ft = models.resnet18(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.fc.in_featuresmodel_ft.fc = nn.Linear(num_ftrs, num_classes)input_size = 224elif model_name == "alexnet":""" Alexnet"""model_ft = models.alexnet(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier[6].in_featuresmodel_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)input_size = 224elif model_name == "vgg":""" VGG11_bn"""model_ft = models.vgg11_bn(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier[6].in_featuresmodel_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)input_size = 224elif model_name == "squeezenet":""" Squeezenet"""model_ft = models.squeezenet1_0(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))model_ft.num_classes = num_classesinput_size = 224elif model_name == "densenet":""" Densenet"""model_ft = models.densenet121(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier.in_featuresmodel_ft.classifier = nn.Linear(num_ftrs, num_classes)input_size = 224elif model_name == "inception":""" Inception v3Be careful, expects (299,299) sized images and has auxiliary output"""model_ft = models.inception_v3(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)# 处理辅助网络num_ftrs = model_ft.AuxLogits.fc.in_featuresmodel_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)# 处理主要网络num_ftrs = model_ft.fc.in_featuresmodel_ft.fc = nn.Linear(num_ftrs,num_classes)input_size = 299else:print("Invalid model name, exiting...")exit()return model_ft, input_size# 在这步中初始化模型
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)# 打印我们刚刚实例化的模型
print(model_ft)"""
加载模型
"""
# 数据扩充和训练规范化
# 只需验证标准化
data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(input_size),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),'val': transforms.Compose([transforms.Resize(input_size),transforms.CenterCrop(input_size),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
}print("Initializing Datasets and Dataloaders...")# 创建训练和验证数据集
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# 创建训练和验证数据加载器
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}# 检测我们是否有可用的GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")"""
创建优化器
"""# 将模型发送到GPU
model_ft = model_ft.to(device)# 在此运行中收集要优化/更新的参数。
# 如果我们正在进行微调,我们将更新所有参数。
# 但如果我们正在进行特征提取方法,我们只会更新刚刚初始化的参数,即`requires_grad`的参数为True。
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:params_to_update = []for name,param in model_ft.named_parameters():if param.requires_grad == True:params_to_update.append(param)print("\t",name)
else:for name,param in model_ft.named_parameters():if param.requires_grad == True:print("\t",name)# 观察所有参数都在优化
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)"""
运行训练和验证
"""
# 设置损失函数
criterion = nn.CrossEntropyLoss()# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=(model_name=="inception"))"""
对比从头开始模型
"""
# 初始化用于此运行的模型的非预训练版本
scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
scratch_model = scratch_model.to(device)
scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
scratch_criterion = nn.CrossEntropyLoss()
_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))# 绘制验证精度的训练曲线与转移学习方法
# 和从头开始训练的模型的训练epochs的数量
ohist = []
shist = []ohist = [h.cpu().numpy() for h in hist]
shist = [h.cpu().numpy() for h in scratch_hist]plt.title("Validation Accuracy vs. Number of Training Epochs")
plt.xlabel("Training Epochs")
plt.ylabel("Validation Accuracy")
plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
plt.plot(range(1,num_epochs+1),shist,label="Scratch")
plt.ylim((0,1.))
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.show()

空间变换器网络

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as npplt.ion()   # 交互模式"""
加载数据
"""device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 训练数据集
train_loader = torch.utils.data.DataLoader(datasets.MNIST(root='./data/mnist/MNIST', train=True, download=False,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])), batch_size=64, shuffle=True, num_workers=4)
# 测试数据集
test_loader = torch.utils.data.DataLoader(datasets.MNIST(root='./data/mnist/MNIST', train=False, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])), batch_size=64, shuffle=True, num_workers=4)"""
空间变换器网络:
结构:
本地网络(Localisation Network)是常规CNN,其对变换参数进行回归。不会从该数据集中明确地学习转换,而是网络自动学习增强 全局准确性的空间变换。
网格生成器( Grid Genator)在输入图像中生成与输出图像中的每个像素相对应的坐标网格。
采样器(Sampler)使用变换的参数并将其应用于输入图像。
"""
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=5)self.conv2 = nn.Conv2d(10, 20, kernel_size=5)self.conv2_drop = nn.Dropout2d()self.fc1 = nn.Linear(320, 50)self.fc2 = nn.Linear(50, 10)# 空间变换器定位 - 网络self.localization = nn.Sequential(nn.Conv2d(1, 8, kernel_size=7),nn.MaxPool2d(2, stride=2),nn.ReLU(True),nn.Conv2d(8, 10, kernel_size=5),nn.MaxPool2d(2, stride=2),nn.ReLU(True))# 3 * 2 affine矩阵的回归量self.fc_loc = nn.Sequential(nn.Linear(10 * 3 * 3, 32),nn.ReLU(True),nn.Linear(32, 3 * 2))# 使用身份转换初始化权重/偏差self.fc_loc[2].weight.data.zero_()self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))# 空间变换器网络转发功能def stn(self, x):xs = self.localization(x)xs = xs.view(-1, 10 * 3 * 3)theta = self.fc_loc(xs)theta = theta.view(-1, 2, 3)grid = F.affine_grid(theta, x.size())x = F.grid_sample(x, grid)return xdef forward(self, x):# transform the inputx = self.stn(x)# 执行一般的前进传递x = F.relu(F.max_pool2d(self.conv1(x), 2))x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))x = x.view(-1, 320)x = F.relu(self.fc1(x))x = F.dropout(x, training=self.training)x = self.fc2(x)return F.log_softmax(x, dim=1)model = Net().to(device)"""
训练模型
"""optimizer = optim.SGD(model.parameters(), lr=0.01)def train(epoch):model.train()for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.nll_loss(output, target)loss.backward()optimizer.step()if batch_idx % 500 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
#
# 一种简单的测试程序,用于测量STN在MNIST上的性能。.
#def test():with torch.no_grad():model.eval()test_loss = 0correct = 0for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)# 累加批量损失test_loss += F.nll_loss(output, target, size_average=False).item()# 获取最大对数概率的索引pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))"""
可视化 STN 结果
"""
def convert_image_np(inp):"""Convert a Tensor to numpy image."""inp = inp.numpy().transpose((1, 2, 0))mean = np.array([0.485, 0.456, 0.406])std = np.array([0.229, 0.224, 0.225])inp = std * inp + meaninp = np.clip(inp, 0, 1)return inp# 我们想要在训练之后可视化空间变换器层的输出
# 我们使用STN可视化一批输入图像和相应的变换批次。
def visualize_stn():with torch.no_grad():# Get a batch of training datadata = next(iter(test_loader))[0].to(device)input_tensor = data.cpu()transformed_input_tensor = model.stn(data).cpu()in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor))out_grid = convert_image_np(torchvision.utils.make_grid(transformed_input_tensor))# Plot the results side-by-sidef, axarr = plt.subplots(1, 2)axarr[0].imshow(in_grid)axarr[0].set_title('Dataset Images')axarr[1].imshow(out_grid)axarr[1].set_title('Transformed Images')for epoch in range(1, 20 + 1):train(epoch)test()# 在某些输入批处理上可视化STN转换
visualize_stn()plt.ioff()
plt.show()

使用 PyTorch 进行 Neural-Transfer

from __future__ import print_functionimport torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimfrom PIL import Image
import matplotlib.pyplot as pltimport torchvision.transforms as transforms
import torchvision.models as modelsimport copy"""
基本原理
我们定义两个间距,一个用于内容D_C,另一个用于风格D_S。D_C测量两张图片内容的不同,而D_S用来测量两张图片风格的不同。 
然后,我们输入第三张图片,并改变这张图片,使其与内容图片的内容间距和风格图片的风格间距最小化。
开始图像风格转换。
"""device = torch.device("cuda" if torch.cuda.is_available() else "cpu")"""
加载图片
原始的PIL图片的值介于0到255之间,但是当转换成torch张量时,它们的值被转换成0到1之间。
图片也需要被重设成相同的维度。
一个重要的细节是,注意torch库中的神经网络用来训练的张量的值为0到1之间。
如果你尝试将0到255的张量图片加载到神经网络,然后激活的特征映射将不能侦测到目标内容和风格。
然而,Caffe库中的预训练网络用来训练的张量值为0到255之间的图片。
"""
imsize = 512 if torch.cuda.is_available() else 128  # use small size if no gpuloader = transforms.Compose([transforms.Resize(imsize),  # scale imported imagetransforms.ToTensor()])  # transform it into a torch tensordef image_loader(image_name):image = Image.open(image_name)# 需要假批量维度来匹配网络的输入维度image = loader(image).unsqueeze(0)return image.to(device, torch.float)style_img = image_loader("./data/images//neural-style/picasso.jpg")
content_img = image_loader("./data/images//neural-style/dancing.jpg")assert style_img.size() == content_img.size(), \"我们需要导入相同大小的样式和内容图像"
#现在,让我们创建一个方法,通过重新将图片转换成PIL格式来展示,并使用plt.imshow展示它的拷贝。我们将尝试展示内容和风格图片来确保它们被正确的导入。unloader = transforms.ToPILImage()  # reconvert into PIL imageplt.ion()def imshow(tensor, title=None):image = tensor.cpu().clone()  # we clone the tensor to not do changes on itimage = image.squeeze(0)      # remove the fake batch dimensionimage = unloader(image)plt.imshow(image)if title is not None:plt.title(title)plt.pause(0.001) # pause a bit so that plots are updatedplt.figure()
imshow(style_img, title='Style Image')plt.figure()
imshow(content_img, title='Content Image')"""
内容损失
内容损失是一个表示一层内容间距的加权版本。
"""class ContentLoss(nn.Module):def __init__(self, target,):super(ContentLoss, self).__init__()# 我们从用于动态计算梯度的树中“分离”目标内容:# 这是一个声明的值,而不是变量。# 否则标准的正向方法将引发错误。self.target = target.detach()def forward(self, input):self.loss = F.mse_loss(input, self.target)return input"""
风格损失
它要作为一个网络中的透明层,来计算相应层的风格损失
"""def gram_matrix(input):a, b, c, d = input.size()  # a=batch size(=1)# 特征映射 b=number# (c,d)=dimensions of a f. map (N=c*d)features = input.view(a * b, c * d)  # resise F_XL into \hat F_XLG = torch.mm(features, features.t())  # compute the gram product# 我们通过除以每个特征映射中的元素数来“标准化”gram矩阵的值.return G.div(a * b * c * d)class StyleLoss(nn.Module):def __init__(self, target_feature):super(StyleLoss, self).__init__()self.target = gram_matrix(target_feature).detach()def forward(self, input):G = gram_matrix(input)self.loss = F.mse_loss(G, self.target)return input"""
导入模型
"""cnn = models.vgg19(pretrained=True).features.to(device).eval()"""
图片预处理
"""
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)# 创建一个模块来规范化输入图像
# 这样我们就可以轻松地将它放入nn.Sequential中
class Normalization(nn.Module):def __init__(self, mean, std):super(Normalization, self).__init__()# .view the mean and std to make them [C x 1 x 1] so that they can# directly work with image Tensor of shape [B x C x H x W].# B is batch size. C is number of channels. H is height and W is width.self.mean = torch.tensor(mean).view(-1, 1, 1)self.std = torch.tensor(std).view(-1, 1, 1)def forward(self, img):# normalize imgreturn (img - self.mean) / self.std"""
创建一个新的Sequential模型,并正确的插入内容损失和风格损失模型。
"""
# 期望的深度层来计算样式/内容损失:
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']def get_style_model_and_losses(cnn, normalization_mean, normalization_std,style_img, content_img,content_layers=content_layers_default,style_layers=style_layers_default):cnn = copy.deepcopy(cnn)# 规范化模块normalization = Normalization(normalization_mean, normalization_std).to(device)# 只是为了拥有可迭代的访问权限或列出内容/系统损失content_losses = []style_losses = []# 假设cnn是一个`nn.Sequential`,# 所以我们创建一个新的`nn.Sequential`来放入应该按顺序激活的模块model = nn.Sequential(normalization)i = 0  # increment every time we see a convfor layer in cnn.children():if isinstance(layer, nn.Conv2d):i += 1name = 'conv_{}'.format(i)elif isinstance(layer, nn.ReLU):name = 'relu_{}'.format(i)# 对于我们在下面插入的`ContentLoss`和`StyleLoss`,# 本地版本不能很好地发挥作用。所以我们在这里替换不合适的layer = nn.ReLU(inplace=False)elif isinstance(layer, nn.MaxPool2d):name = 'pool_{}'.format(i)elif isinstance(layer, nn.BatchNorm2d):name = 'bn_{}'.format(i)else:raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))model.add_module(name, layer)if name in content_layers:# 加入内容损失:target = model(content_img).detach()content_loss = ContentLoss(target)model.add_module("content_loss_{}".format(i), content_loss)content_losses.append(content_loss)if name in style_layers:# 加入风格损失:target_feature = model(style_img).detach()style_loss = StyleLoss(target_feature)model.add_module("style_loss_{}".format(i), style_loss)style_losses.append(style_loss)# 现在我们在最后的内容和风格损失之后剪掉了图层for i in range(len(model) - 1, -1, -1):if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):breakmodel = model[:(i + 1)]return model, style_losses, content_losses"""
选择输入图片
"""input_img = content_img.clone()
# 如果您想使用白噪声而取消注释以下行:
# input_img = torch.randn(content_img.data.size(), device=device)# 将原始输入图像添加到图中:
plt.figure()
imshow(input_img, title='Input Image')"""
梯度下降
"""def get_input_optimizer(input_img):# 此行显示输入是需要渐变的参数optimizer = optim.LBFGS([input_img.requires_grad_()])return optimizer"""
每次网络运行的时候将输 入的值矫正到0到1之间
"""def run_style_transfer(cnn, normalization_mean, normalization_std,content_img, style_img, input_img, num_steps=300,style_weight=1000000, content_weight=1):"""Run the style transfer."""print('Building the style transfer model..')model, style_losses, content_losses = get_style_model_and_losses(cnn,normalization_mean, normalization_std, style_img, content_img)optimizer = get_input_optimizer(input_img)print('Optimizing..')run = [0]while run[0] <= num_steps:def closure():# 更正更新的输入图像的值input_img.data.clamp_(0, 1)optimizer.zero_grad()model(input_img)style_score = 0content_score = 0for sl in style_losses:style_score += sl.lossfor cl in content_losses:content_score += cl.lossstyle_score *= style_weightcontent_score *= content_weightloss = style_score + content_scoreloss.backward()run[0] += 1if run[0] % 50 == 0:print("run {}:".format(run))print('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))print()return style_score + content_scoreoptimizer.step(closure)# 最后的修正......input_img.data.clamp_(0, 1)return input_img"""
运行这个算法。
"""output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,content_img, style_img, input_img)plt.figure()
imshow(output, title='Output Image')# sphinx_gallery_thumbnail_number = 4
plt.ioff()
plt.show()

生成对抗示例

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt"""
输入
"""
epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "data/lenet_mnist_model.pth"
use_cuda=True"""
被攻击的模型
"""
# 定义LeNet模型
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=5)self.conv2 = nn.Conv2d(10, 20, kernel_size=5)self.conv2_drop = nn.Dropout2d()self.fc1 = nn.Linear(320, 50)self.fc2 = nn.Linear(50, 10)def forward(self, x):x = F.relu(F.max_pool2d(self.conv1(x), 2))x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))x = x.view(-1, 320)x = F.relu(self.fc1(x))x = F.dropout(x, training=self.training)x = self.fc2(x)return F.log_softmax(x, dim=1)#声明 MNIST 测试数据集何数据加载
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),])),batch_size=1, shuffle=True)# 定义我们正在使用的设备
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")# 初始化网络
model = Net().to(device)# 加载已经预训练的模型
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))# 在评估模式下设置模型。在这种情况下,这适用于Dropout图层
model.eval()"""
FGSM算法攻击
"""
# FGSM算法攻击代码
def fgsm_attack(image, epsilon, data_grad):# 收集数据梯度的元素符号sign_data_grad = data_grad.sign()# 通过调整输入图像的每个像素来创建扰动图像perturbed_image = image + epsilon*sign_data_grad# 添加剪切以维持[0,1]范围perturbed_image = torch.clamp(perturbed_image, 0, 1)# 返回被扰动的图像return perturbed_image"""
测试函数
"""
def test( model, device, test_loader, epsilon ):# 精度计数器correct = 0adv_examples = []# 循环遍历测试集中的所有示例for data, target in test_loader:# 把数据和标签发送到设备data, target = data.to(device), target.to(device)# 设置张量的requires_grad属性,这对于攻击很关键data.requires_grad = True# 通过模型前向传递数据output = model(data)init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability# 如果初始预测是错误的,不打断攻击,继续if init_pred.item() != target.item():continue# 计算损失loss = F.nll_loss(output, target)# 将所有现有的渐变归零model.zero_grad()# 计算后向传递模型的梯度loss.backward()# 收集datagraddata_grad = data.grad.data# 唤醒FGSM进行攻击perturbed_data = fgsm_attack(data, epsilon, data_grad)# 重新分类受扰乱的图像output = model(perturbed_data)# 检查是否成功final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probabilityif final_pred.item() == target.item():correct += 1# 保存0 epsilon示例的特例if (epsilon == 0) and (len(adv_examples) < 5):adv_ex = perturbed_data.squeeze().detach().cpu().numpy()adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )else:# 稍后保存一些用于可视化的示例if len(adv_examples) < 5:adv_ex = perturbed_data.squeeze().detach().cpu().numpy()adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )# 计算这个epsilon的最终准确度final_acc = correct/float(len(test_loader))print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))# 返回准确性和对抗性示例return final_acc, adv_examples"""
运行攻击
"""
accuracies = []
examples = []# 对每个epsilon运行测试
for eps in epsilons:acc, ex = test(model, device, test_loader, eps)accuracies.append(acc)examples.append(ex)"""
准确度
"""
plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()"""
样本对抗性示例
"""
# 在每个epsilon上绘制几个对抗样本的例子
cnt = 0
plt.figure(figsize=(8,10))
for i in range(len(epsilons)):for j in range(len(examples[i])):cnt += 1plt.subplot(len(epsilons),len(examples[0]),cnt)plt.xticks([], [])plt.yticks([], [])if j == 0:plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)orig,adv,ex = examples[i][j]plt.title("{} -> {}".format(orig, adv))plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/389419.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

大数据数据科学家常用面试题_进行数据科学工作面试

大数据数据科学家常用面试题During my time as a Data Scientist, I had the chance to interview my fair share of candidates for data-related roles. While doing this, I started noticing a pattern: some kinds of (simple) mistakes were overwhelmingly frequent amo…

scrapy模拟模拟点击_模拟大流行

scrapy模拟模拟点击复杂系统 (Complex Systems) In our daily life, we encounter many complex systems where individuals are interacting with each other such as the stock market or rush hour traffic. Finding appropriate models for these complex systems may give…

公司想申请网易企业电子邮箱,怎么样?

不论公司属于哪个行业&#xff0c;选择企业邮箱&#xff0c;交互界面友好度、稳定性、安全性都是选择邮箱所必须考虑的因素。网易企业邮箱邮箱方面已有21年的运营经验&#xff0c;是国内资历最高的电子邮箱&#xff0c;在各个方面都非常成熟完善。 从交互界面友好度来看&#x…

莫烦Matplotlib可视化第二章基本使用代码学习

基本用法 import matplotlib.pyplot as plt import numpy as np""" 2.1基本用法 """ # x np.linspace(-1,1,50) #[-1,1]50个点 # #y 2*x 1 # # y x**2 # plt.plot(x,y) #注意&#xff1a;x,y顺序不能反 # plt.show()"""…

vue.js python_使用Python和Vue.js自动化报告过程

vue.js pythonIf your organization does not have a data visualization solution like Tableau or PowerBI nor means to host a server to deploy open source solutions like Dash then you are probably stuck doing reports with Excel or exporting your notebooks.如果…

plsql中导入csvs_在命令行中使用sql分析csvs

plsql中导入csvsIf you are familiar with coding in SQL, there is a strong chance you do it in PgAdmin, MySQL, BigQuery, SQL Server, etc. But there are times you just want to use your SQL skills for quick analysis on a small/medium sized dataset.如果您熟悉SQ…

第十八篇 Linux环境下常用软件安装和使用指南

提醒&#xff1a;如果之后要安装virtualenvwrapper的话&#xff0c;可以直接跳到安装virtualenvwrapper的方法&#xff0c;而不需要先安装好virtualenv安装virtualenv和生成虚拟环境安装virtualenv&#xff1a;yum -y install python-virtualenv生成虚拟环境&#xff1a;先切换…

莫烦Matplotlib可视化第三章画图种类代码学习

3.1散点图 import matplotlib.pyplot as plt import numpy as npn 1024 X np.random.normal(0,1,n) Y np.random.normal(0,1,n) T np.arctan2(Y,X) #用于计算颜色plt.scatter(X,Y,s75,cT,alpha0.5)#alpha是透明度 #plt.scatter(np.arange(5),np.arange(5)) #一条线的散点…

计算机科学必读书籍_5篇关于数据科学家的产品分类必读文章

计算机科学必读书籍Product categorization/product classification is the organization of products into their respective departments or categories. As well, a large part of the process is the design of the product taxonomy as a whole.产品分类/产品分类是将产品…

es6解决回调地狱问题

本文摘抄自阮一峰老师的 http://es6.ruanyifeng.com/#docs/generator-async 异步 所谓"异步"&#xff0c;简单说就是一个任务不是连续完成的&#xff0c;可以理解成该任务被人为分成两段&#xff0c;先执行第一段&#xff0c;然后转而执行其他任务&#xff0c;等做好…

交替最小二乘矩阵分解_使用交替最小二乘矩阵分解与pyspark建立推荐系统

交替最小二乘矩阵分解pyspark上的动手推荐系统 (Hands-on recommender system on pyspark) Recommender System is an information filtering tool that seeks to predict which product a user will like, and based on that, recommends a few products to the users. For ex…

莫烦Matplotlib可视化第四章多图合并显示代码学习

4.1Subplot多合一显示 import matplotlib.pyplot as plt import numpy as npplt.figure() """ 每个图占一个位置 """ # plt.subplot(2,2,1) #将画板分成两行两列&#xff0c;选取第一个位置,可以去掉逗号 # plt.plot([0,1],[0,1]) # # plt.su…

python 网页编程_通过Python编程检索网页

python 网页编程The internet and the World Wide Web (WWW), is probably the most prominent source of information today. Most of that information is retrievable through HTTP. HTTP was invented originally to share pages of hypertext (hence the name Hypertext T…

Python+Selenium自动化篇-5-获取页面信息

1.获取页面title title&#xff1a;获取当前页面的标题显示的字段from selenium import webdriver import time browser webdriver.Chrome() browser.get(https://www.baidu.com) #打印网页标题 print(browser.title) #输出内容&#xff1a;百度一下&#xff0c;你就知道 2.…

火种 ctf_分析我的火种数据

火种 ctfOriginally published at https://www.linkedin.com on March 27, 2020 (data up to date as of March 20, 2020).最初于 2020年3月27日 在 https://www.linkedin.com 上 发布 (数据截至2020年3月20日)。 Day 3 of social distancing.社会疏离的第三天。 As I sit on…

莫烦Matplotlib可视化第五章动画代码学习

5.1 Animation 动画 import numpy as np import matplotlib.pyplot as plt from matplotlib import animationfig,ax plt.subplots()x np.arange(0,2*np.pi,0.01) line, ax.plot(x,np.sin(x))def animate(i):line.set_ydata(np.sin(xi/10))return line,def init():line.set…

data studio_面向营销人员的Data Studio —报表指南

data studioIn this guide, we describe both the theoretical and practical sides of reporting with Google Data Studio. You can use this guide as a comprehensive cheat sheet in your everyday marketing.在本指南中&#xff0c;我们描述了使用Google Data Studio进行…

人流量统计系统介绍_统计介绍

人流量统计系统介绍Its very important to know about statistics . May you be a from a finance background, may you be data scientist or a data analyst, life is all about mathematics. As per the wiki definition “Statistics is the discipline that concerns the …

pyhive 连接 Hive 时错误

一、User: xx is not allowed to impersonate xxx 解决办法&#xff1a;修改 core-site.xml 文件&#xff0c;加入下面的内容后重启 hadoop。 <property><name>hadoop.proxyuser.xx.hosts</name><value>*</value> </property><property…

乐高ev3 读取外部数据_数据就是新乐高

乐高ev3 读取外部数据When I was a kid, I used to love playing with Lego. My brother and I built almost all kinds of stuff with Lego — animals, cars, houses, and even spaceships. As time went on, our creations became more ambitious and realistic. There were…