Inception(Pytorch实现)

论文在此: Going deeper with convolutions

论文下载: https://arxiv.org/pdf/1409.4842.pdf

网络结构图:

 

 

import torch
import torch.nn as nn
import torch.nn.functional as Fclass Inception3(nn.Module):def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):super(Inception3, self).__init__()self.aux_logits = aux_logitsself.transform_input = transform_inputself.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)self.Mixed_5b = InceptionA(192, pool_features=32)self.Mixed_5c = InceptionA(256, pool_features=64)self.Mixed_5d = InceptionA(288, pool_features=64)self.Mixed_6a = InceptionB(288)self.Mixed_6b = InceptionC(768, channels_7x7=128)self.Mixed_6c = InceptionC(768, channels_7x7=160)self.Mixed_6d = InceptionC(768, channels_7x7=160)self.Mixed_6e = InceptionC(768, channels_7x7=192)if aux_logits:self.AuxLogits = InceptionAux(768, num_classes)self.Mixed_7a = InceptionD(768)self.Mixed_7b = InceptionE(1280)self.Mixed_7c = InceptionE(2048)self.fc = nn.Linear(2048, num_classes)for m in self.modules():if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):import scipy.stats as statsstddev = m.stddev if hasattr(m, 'stddev') else 0.1X = stats.truncnorm(-2, 2, scale=stddev)values = torch.Tensor(X.rvs(m.weight.data.numel()))values = values.view(m.weight.data.size())m.weight.data.copy_(values)elif isinstance(m, nn.BatchNorm2d):m.weight.data.fill_(1)m.bias.data.zero_()def forward(self, x):if self.transform_input:x = x.clone()x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5# 299 x 299 x 3x = self.Conv2d_1a_3x3(x)# 149 x 149 x 32x = self.Conv2d_2a_3x3(x)# 147 x 147 x 32x = self.Conv2d_2b_3x3(x)# 147 x 147 x 64x = F.max_pool2d(x, kernel_size=3, stride=2)# 73 x 73 x 64x = self.Conv2d_3b_1x1(x)# 73 x 73 x 80x = self.Conv2d_4a_3x3(x)# 71 x 71 x 192x = F.max_pool2d(x, kernel_size=3, stride=2)# 35 x 35 x 192x = self.Mixed_5b(x)# 35 x 35 x 256x = self.Mixed_5c(x)# 35 x 35 x 288x = self.Mixed_5d(x)# 35 x 35 x 288x = self.Mixed_6a(x)# 17 x 17 x 768x = self.Mixed_6b(x)# 17 x 17 x 768x = self.Mixed_6c(x)# 17 x 17 x 768x = self.Mixed_6d(x)# 17 x 17 x 768x = self.Mixed_6e(x)# 17 x 17 x 768if self.training and self.aux_logits:aux = self.AuxLogits(x)# 17 x 17 x 768x = self.Mixed_7a(x)# 8 x 8 x 1280x = self.Mixed_7b(x)# 8 x 8 x 2048x = self.Mixed_7c(x)# 8 x 8 x 2048x = F.avg_pool2d(x, kernel_size=8)# 1 x 1 x 2048x = F.dropout(x, training=self.training)# 1 x 1 x 2048x = x.view(x.size(0), -1)# 2048x = self.fc(x)# 1000 (num_classes)if self.training and self.aux_logits:return x, auxreturn xclass InceptionA(nn.Module):def __init__(self, in_channels, pool_features):super(InceptionA, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch5x5 = self.branch5x5_1(x)branch5x5 = self.branch5x5_2(branch5x5)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class InceptionB(nn.Module):def __init__(self, in_channels):super(InceptionB, self).__init__()self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)def forward(self, x):branch3x3 = self.branch3x3(x)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)outputs = [branch3x3, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class InceptionC(nn.Module):def __init__(self, in_channels, channels_7x7):super(InceptionC, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)c7 = channels_7x7self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch7x7 = self.branch7x7_1(x)branch7x7 = self.branch7x7_2(branch7x7)branch7x7 = self.branch7x7_3(branch7x7)branch7x7dbl = self.branch7x7dbl_1(x)branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]return torch.cat(outputs, 1)class InceptionD(nn.Module):def __init__(self, in_channels):super(InceptionD, self).__init__()self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)def forward(self, x):branch3x3 = self.branch3x3_1(x)branch3x3 = self.branch3x3_2(branch3x3)branch7x7x3 = self.branch7x7x3_1(x)branch7x7x3 = self.branch7x7x3_2(branch7x7x3)branch7x7x3 = self.branch7x7x3_3(branch7x7x3)branch7x7x3 = self.branch7x7x3_4(branch7x7x3)branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)outputs = [branch3x3, branch7x7x3, branch_pool]return torch.cat(outputs, 1)class InceptionE(nn.Module):def __init__(self, in_channels):super(InceptionE, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch3x3 = self.branch3x3_1(x)branch3x3 = [self.branch3x3_2a(branch3x3),self.branch3x3_2b(branch3x3),]branch3x3 = torch.cat(branch3x3, 1)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl),self.branch3x3dbl_3b(branch3x3dbl),]branch3x3dbl = torch.cat(branch3x3dbl, 1)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class InceptionAux(nn.Module):def __init__(self, in_channels, num_classes):super(InceptionAux, self).__init__()self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)self.conv1 = BasicConv2d(128, 768, kernel_size=5)self.conv1.stddev = 0.01self.fc = nn.Linear(768, num_classes)self.fc.stddev = 0.001def forward(self, x):# 17 x 17 x 768x = F.avg_pool2d(x, kernel_size=5, stride=3)# 5 x 5 x 768x = self.conv0(x)# 5 x 5 x 128x = self.conv1(x)# 1 x 1 x 768x = x.view(x.size(0), -1)# 768x = self.fc(x)# 1000return xclass BasicConv2d(nn.Module):def __init__(self, in_channels, out_channels, **kwargs):super(BasicConv2d, self).__init__()self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)self.bn = nn.BatchNorm2d(out_channels, eps=0.001)def forward(self, x):x = self.conv(x)x = self.bn(x)return F.relu(x, inplace=True)if __name__ == '__main__':# 'Inception3'# Examplenet = Inception3()print(net)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/469786.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

SecureCRT 用来当串口工具的设置

今天从淘宝网上买的USB转串口线终于到了,从网上下载了驱动,关于USB转串口驱动在我上传的资源里面有,关于SecureCRT这个串口调试工具我也上传了,是个绿色免安装版本。 刚开始的时候一步一步的设置串口,连接串口也可以连…

Brainstorm-the walkthrough example: Image Classification

(1) 运行create data,其中包括下载cifar10,并转换为hdf5格式(详见百度百科:http://baike.baidu.com/view/771949.htm#4_2): cifar10的数据简介见:http://www.cs.toronto.edu/~kriz/cifar.html cd data pyth…

卷积神经网络(高级篇) Inception Moudel

Inception Moudel 1、卷积核超参数选择困难,自动找到卷积的最佳组合。 2、1x1卷积核,不同通道的信息融合。使用1x1卷积核虽然参数量增加了,但是能够显著的降低计算量(operations) 3、Inception Moudel由4个分支组成,要分清哪些…

计算机谈音乐薛之谦,明星浮世绘之薛之谦:分析了50多首音乐作品,为其总结了五个特点...

原标题:明星浮世绘之薛之谦:分析了50多首音乐作品,为其总结了五个特点薛之谦,才华横溢思维敏捷,性格搞怪却又忧郁。我曾经用四个字来形容他,沙雕其外,金玉其中。记得老薛曾经发布了一个动态&…

linux内核下载 编译

linux内核下载网址 今天去看了一场电影“疯狂的原始人”----回来的车上看到一个老奶奶传教士,我想对自己多,加油,加油学习,深思深思 我们现在用的安霸系统,每搞一次我都会进行一次备份,一个系统加上GUI一起都有差不多一G多,而今天下载了最新的linux内核版本,才不80M左…

Deep learning

论文:doi:10.1038/nature14539 论文意义和主要内容 三巨头从机器学习谈起,指出传统机器学习的不足,总览深度学习理论、模型,给出了深度学习的发展历史,以及DL中最重要的算法和理论。 概念: 原理&#xff…

第一周:深度学习引言(Introduction to Deep Learning)

1.1 欢迎(Welcome) 深度学习改变了传统互联网业务,例如如网络搜索和广告。但是深度学习同时也使得许多新产品和企业以很多方式帮助人们,从获得更好的健康关注。 深度学习做的非常好的一个方面就是读取X光图像,到生活中的个性化教育&#xf…

无忧计算机二级试题题库,全国计算机二级MS Office试题

考无忧小编为各位考生搜集整理了的二级MS Office试题,希望可以为各位的备考锦上添花,雪中送炭!记得刷计算机等级考试题库哟!1、被选中要筛选的数据单元格的下拉箭头中有哪几种筛选方式( ABD)A、全部B、前十个C、后十个D、自定义2、…

第二周:神经网络的编程基础之Python与向量化

本节课我们将来探讨Python和向量化的相关知识。 1. Vectorization 深度学习算法中,数据量很大,在程序中应该尽量减少使用循环语句,而可以使用向量运算来提高程序运行速度。 向量化(Vectorization)就是利用矩阵运算的…

U-boot移槙

1、我是照着这里去移植的 http://blog.chinaunix.net/uid-26306203-id-3716785.html 2、然后make 出现问题,到这里去有解决办法:http://blog.csdn.net/zjt289198457/article/details/6854177 : http://blog.csdn.net/zjt289198457/article/details/68…

第三周:浅层神经网络

1. 神经网络综述 首先,我们从整体结构上来大致看一下神经网络模型。 前面的课程中,我们已经使用计算图的方式介绍了逻辑回归梯度下降算法的正向传播和反向传播两个过程。如下图所示。神经网络的结构与逻辑回归类似,只是神经网络的层数比逻辑…

智慧交通day00-项目简介

汽车的日益普及在给人们带来极大便利的同时,也导致了拥堵的交通路况,以及更为频发的交通事故。智能交通技术已成为推动现代技术交通技术发展的重要力量,智能交通不仅能够提供实时的交通路况信息,帮助交通管理者规划管理策略&#…

智慧交通day01-算法库01:numba

1 numba介绍 numba是一个用于编译Python数组和数值计算函数的编译器,这个编译器能够大幅提高直接使用Python编写的函数的运算速度。 numba使用LLVM编译器架构将纯Python代码生成优化过的机器码,通过一些添加简单的注解,将面向数组和使用大量…

计算机语言恢复,win10系统找回消失不见语言栏的恢复方法

win10系统使用久了,好多网友反馈说关于对win10系统找回消失不见语言栏设置的方法,在使用win10系统的过程中经常不知道如何去对win10系统找回消失不见语言栏进行设置,有什么好的办法去设置win10系统找回消失不见语言栏呢?在这里小编…

智慧交通day01-算法库02:imutils

1.imutils功能简介 imutils是在OPenCV基础上的一个封装,达到更为简结的调用OPenCV接口的目的,它可以轻松的实现图像的平移,旋转,缩放,骨架化等一系列的操作。 安装方法: pip install imutils在安装前应确…

智慧交通day01-算法库03:cv.dnn

1.DNN模块 1.1. 模块简介 OpenCV中的深度学习模块(DNN)只提供了推理功能,不涉及模型的训练,支持多种深度学习框架,比如TensorFlow,Caffe,Torch和Darknet。 OpenCV那为什么要实现深度学习模块?…

高标清硬盘播出服务器,高标清4通道SDI硬盘播出服务器 XUAPX4000HD

原标题:高标清4通道SDI硬盘播出服务器 XUAPX4000HDXUAP多通道硬盘自动播出系全系统设计 IP/TS/SDI多种播出模式XUAP系统采用先进硬件译码技术的自动播出系统,可依设定时间自动化切换影片与触发连动外围设备,如 矩阵、录像机、切换器 等&#…

智慧交通day02-车流量检测实现01:总览

随着城市交通量的迅猛增加,车流量统计已成为智能交通系统中一项关键技术和热门研究方向。高效而精确的车流量检测可以交通管理者和决策者,以及驾驶员提供数据支撑,从而为交通调度,降低拥堵情况的发生,提高道路利用率有…

从qq服务器删除误收邮件,如何恢复从qq邮箱删除的电子邮件

Qq邮箱恢复意外删除的邮件方法第1步: 如果您删除邮件,则无论是普通删除还是完整删除,都可以选择在几秒钟内撤消,以便恢复以前意外删除的邮件. 也就是说,在删除消息后,请在删除上述提示后单击“撤消”. 如果提示消息消失…

智慧交通day02-车流量检测实现02:多目标追踪

1.多目标跟踪分类 多目标跟踪,即MOT(Multi-Object Tracking),也就是在一段视频中同时跟踪多个目标。MOT主要应用在安防监控和自动驾驶等领域中。 1.1 初始化方法 多目标跟踪,即MOT(Multi-Object Trackin…