pytorch模型转caffe模型

记录一个好用的pytorch模型转caffe模型的方法,源码链接如下:

https://github.com/xxradon/PytorchToCaffe

把代码clone下来后,进入example目录便可查看示例,

cd example
python resnet_pytorch_2_caffe.py
import sys
sys.path.insert(0,'.')
import torch
from torch.autograd import Variable
from torchvision.models import resnet
import pytorch_to_caffeif __name__=='__main__':## 转换后的模型名称name='resnet18'## 待转换的pytorch模型resnet18=resnet.resnet18()## pt模型权重checkpoint = torch.load("/home/shining/Downloads/resnet18-5c106cde.pth")## 加载模型resnet18.load_state_dict(checkpoint)resnet18.eval()## 输入张量input=torch.ones([1, 3, 224, 224])## 模型转换pytorch_to_caffe.trans_net(resnet18, input,name)pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))

如果想实现把pytorch的flatten转换至caffe中,需修改脚本:pytorch_to_caffe.py。修改后的脚本内容如下,

import torch
import torch.nn as nn
import traceback
from Caffe import caffe_net
import torch.nn.functional as F
from torch.autograd import Variable
from Caffe import layer_param
from torch.nn.modules.utils import _pair
import numpy as npclass Blob_LOG():def __init__(self):self.data={}def __setitem__(self, key, value):self.data[key]=valuedef __getitem__(self, key):return self.data[key]def __len__(self):return len(self.data)NET_INITTED=Falseclass TransLog(object):def __init__(self):"""doing init() with inputs Variable before using it"""self.layers={}self.detail_layers={}  self.detail_blobs={}  self._blobs=Blob_LOG()self._blobs_data=[]self.cnet=caffe_net.Caffemodel('')self.debug=Truedef init(self,inputs):""":param inputs: is a list of input variables"""self.add_blobs(inputs)def add_layer(self,name='layer'):if name in self.layers:return self.layers[name]if name not in self.detail_layers.keys():self.detail_layers[name] =0self.detail_layers[name] +=1name='{}{}'.format(name,self.detail_layers[name])self.layers[name]=nameif self.debug:print("{} was added to layers".format(self.layers[name]))return self.layers[name]def add_blobs(self, blobs,name='blob',with_num=True):rst=[]for blob in blobs:self._blobs_data.append(blob) # to block the memory address be rewritedblob_id=int(id(blob))if name not in self.detail_blobs.keys():self.detail_blobs[name] =0self.detail_blobs[name] +=1           if with_num:rst.append('{}{}'.format(name,self.detail_blobs[name]))else:rst.append('{}'.format(name))if self.debug:print("{}:{} was added to blobs".format(blob_id,rst[-1]))# print('Add blob {} : {}'.format(rst[-1].center(21),blob.size()))self._blobs[blob_id]=rst[-1]return rstdef blobs(self, var):var=id(var)# if self.debug:#     print("{}:{} getting".format(var, self._blobs[var]))try:return self._blobs[var]except:print("WARNING: CANNOT FOUND blob {}".format(var))return Nonelog=TransLog()layer_names={}
def _conv2d(raw,input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):print('conv: ',log.blobs(input))x=raw(input,weight,bias,stride,padding,dilation,groups)name=log.add_layer(name='conv')log.add_blobs([x],name='conv_blob')layer=caffe_net.Layer_param(name=name, type='Convolution',bottom=[log.blobs(input)], top=[log.blobs(x)])layer.conv_param(x.size()[1],weight.size()[2:],stride=_pair(stride),pad=_pair(padding),dilation=_pair(dilation),bias_term=bias is not None,groups=groups)if bias is not None:layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())else:layer.param.convolution_param.bias_term=Falselayer.add_data(weight.cpu().data.numpy())log.cnet.add_layer(layer)return xdef _conv_transpose2d(raw,input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):x=raw(input, weight, bias, stride, padding, output_padding, groups, dilation)name=log.add_layer(name='conv_transpose')log.add_blobs([x],name='conv_transpose_blob')layer=caffe_net.Layer_param(name=name, type='Deconvolution',bottom=[log.blobs(input)], top=[log.blobs(x)])layer.conv_param(x.size()[1],weight.size()[2:],stride=_pair(stride),pad=_pair(padding),dilation=_pair(dilation),bias_term=bias is not None, groups = groups)if bias is not None:layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())else:layer.param.convolution_param.bias_term=Falselayer.add_data(weight.cpu().data.numpy())log.cnet.add_layer(layer)return xdef _linear(raw,input, weight, bias=None):x=raw(input,weight,bias)layer_name=log.add_layer(name='fc')top_blobs=log.add_blobs([x],name='fc_blob')layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',bottom=[log.blobs(input)],top=top_blobs)layer.fc_param(x.size()[1],has_bias=bias is not None)if bias is not None:layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())else:layer.add_data(weight.cpu().data.numpy())log.cnet.add_layer(layer)return xdef _split(raw,tensor, split_size, dim=0):# split in pytorch is slice in caffex=raw(tensor, split_size, dim)layer_name=log.add_layer('split')top_blobs=log.add_blobs(x,name='split_blob')layer=caffe_net.Layer_param(name=layer_name, type='Slice',bottom=[log.blobs(tensor)], top=top_blobs)slice_num=int(np.floor(tensor.size()[dim]/split_size))slice_param=caffe_net.pb.SliceParameter(axis=dim,slice_point=[split_size*i for i in range(1,slice_num)])layer.param.slice_param.CopyFrom(slice_param)log.cnet.add_layer(layer)return xdef _pool(type,raw,input,x,kernel_size,stride,padding,ceil_mode):# TODO dilation,ceil_mode,return indiceslayer_name = log.add_layer(name='{}_pool'.format(type))top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))layer = caffe_net.Layer_param(name=layer_name, type='Pooling',bottom=[log.blobs(input)], top=top_blobs)# TODO w,h different kernel, stride and padding# processing ceil modelayer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,pad=padding, type=type.upper() , ceil_mode = ceil_mode)log.cnet.add_layer(layer)if ceil_mode==False and stride is not None:oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])if oheight!=0 or owidth!=0:caffe_out=raw(input, kernel_size, stride, padding, ceil_mode=True)print("WARNING: the output shape miss match at {}: ""input {} output---Pytorch:{}---Caffe:{}\n""This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch.\n""You can add the clip layer in caffe prototxt manually if shape mismatch error is caused in caffe. ".format(layer_name,input.size(),x.size(),caffe_out.size()))def _max_pool2d(raw,input, kernel_size, stride=None, padding=0, dilation=1,ceil_mode=False, return_indices=False):x = raw(input, kernel_size, stride, padding, dilation,ceil_mode, return_indices)_pool('max',raw,input, x, kernel_size, stride, padding,ceil_mode)return xdef _avg_pool2d(raw, input, kernel_size, stride = None, padding = 0, ceil_mode = False, count_include_pad = True, divisor_override=None): x = raw(input, kernel_size, stride, padding, ceil_mode, count_include_pad) _pool('ave',raw,input, x, kernel_size, stride, padding,ceil_mode) return xdef _adaptive_avg_pool2d(raw, input, output_size):x = raw(input, output_size)if isinstance(output_size, int):out_dim = output_sizeelse:out_dim = output_size[0]tmp = max(input.shape[2], input.shape[3])stride = tmp //out_dimkernel_size = tmp - (out_dim - 1) * stride_pool('ave', raw, input, x, kernel_size, stride, 0, False)return xdef _max(raw,*args):x=raw(*args)if len(args)==1:# TODO max in one tensorassert NotImplementedErrorelse:bottom_blobs=[]for arg in args:bottom_blobs.append(log.blobs(arg))layer_name=log.add_layer(name='max')top_blobs=log.add_blobs([x],name='max_blob')layer=caffe_net.Layer_param(name=layer_name,type='Eltwise',bottom=bottom_blobs,top=top_blobs)layer.param.eltwise_param.operation =2log.cnet.add_layer(layer)return xdef _cat(raw, inputs, dimension=0):x=raw(inputs, dimension)bottom_blobs=[]for input in inputs:bottom_blobs.append(log.blobs(input))layer_name=log.add_layer(name='cat')top_blobs=log.add_blobs([x],name='cat_blob')layer=caffe_net.Layer_param(name=layer_name,type='Concat',bottom=bottom_blobs,top=top_blobs)layer.param.concat_param.axis =dimensionlog.cnet.add_layer(layer)return xdef _dropout(raw,input,p=0.5, training=False, inplace=False):x=raw(input,p, training, inplace)bottom_blobs=[log.blobs(input)]layer_name=log.add_layer(name='dropout')top_blobs=log.add_blobs([x],name=bottom_blobs[0],with_num=False)layer=caffe_net.Layer_param(name=layer_name,type='Dropout',bottom=bottom_blobs,top=top_blobs)layer.param.dropout_param.dropout_ratio = player.param.include.extend([caffe_net.pb.NetStateRule(phase=0)]) # 1 for test, 0 for trainlog.cnet.add_layer(layer)return xdef _threshold(raw,input, threshold, value, inplace=False):# for threshold or reluif threshold==0 and value==0:x = raw(input,threshold, value, inplace)bottom_blobs=[log.blobs(input)]name = log.add_layer(name='relu')log.add_blobs([x], name='relu_blob')layer = caffe_net.Layer_param(name=name, type='ReLU',bottom=bottom_blobs, top=[log.blobs(x)])log.cnet.add_layer(layer)return xif value!=0:raise NotImplemented("value !=0 not implemented in caffe")x=raw(input,input, threshold, value, inplace)bottom_blobs=[log.blobs(input)]layer_name=log.add_layer(name='threshold')top_blobs=log.add_blobs([x],name='threshold_blob')layer=caffe_net.Layer_param(name=layer_name,type='Threshold',bottom=bottom_blobs,top=top_blobs)layer.param.threshold_param.threshold = thresholdlog.cnet.add_layer(layer)return xdef _relu(raw, input, inplace=False):# for threshold or prelux = raw(input, False)name = log.add_layer(name='relu')log.add_blobs([x], name='relu_blob')layer = caffe_net.Layer_param(name=name, type='ReLU',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return x
def _prelu(raw, input, weight):# for threshold or prelux = raw(input, weight)bottom_blobs=[log.blobs(input)]name = log.add_layer(name='prelu')log.add_blobs([x], name='prelu_blob')layer = caffe_net.Layer_param(name=name, type='PReLU',bottom=bottom_blobs, top=[log.blobs(x)])if weight.size()[0]==1:layer.param.prelu_param.channel_shared=Truelayer.add_data(weight.cpu().data.numpy()[0])else:layer.add_data(weight.cpu().data.numpy())log.cnet.add_layer(layer)return xdef _leaky_relu(raw, input, negative_slope=0.01, inplace=False):x = raw(input, negative_slope)name = log.add_layer(name='leaky_relu')log.add_blobs([x], name='leaky_relu_blob')layer = caffe_net.Layer_param(name=name, type='ReLU',bottom=[log.blobs(input)], top=[log.blobs(x)])layer.param.relu_param.negative_slope=negative_slopelog.cnet.add_layer(layer)return xdef _tanh(raw, input):# for tanh activationx = raw(input)name = log.add_layer(name='tanh')log.add_blobs([x], name='tanh_blob')layer = caffe_net.Layer_param(name=name, type='TanH',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return xdef _softmax(raw, input, dim=None, _stacklevel=3):# for F.softmaxx=raw(input, dim=dim)if dim is None:dim=F._get_softmax_dim('softmax', input.dim(), _stacklevel)bottom_blobs=[log.blobs(input)]name = log.add_layer(name='softmax')log.add_blobs([x], name='softmax_blob')layer = caffe_net.Layer_param(name=name, type='Softmax',bottom=bottom_blobs, top=[log.blobs(x)])layer.param.softmax_param.axis=dimlog.cnet.add_layer(layer)return xdef _batch_norm(raw,input, running_mean, running_var, weight=None, bias=None,training=False, momentum=0.1, eps=1e-5):# because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parametersx = raw(input, running_mean, running_var, weight, bias,training, momentum, eps)bottom_blobs = [log.blobs(input)]layer_name1 = log.add_layer(name='batch_norm')top_blobs = log.add_blobs([x], name='batch_norm_blob')layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',bottom=bottom_blobs, top=top_blobs)if running_mean is None or running_var is None:# not use global_stats, normalization is performed over the current mini-batchlayer1.batch_norm_param(use_global_stats=0,eps=eps)else:layer1.batch_norm_param(use_global_stats=1, eps=eps)running_mean_clone = running_mean.clone()running_var_clone = running_var.clone()layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))log.cnet.add_layer(layer1)if weight is not None and bias is not None:layer_name2 = log.add_layer(name='bn_scale')layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',bottom=top_blobs, top=top_blobs)layer2.param.scale_param.bias_term = Truelayer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())log.cnet.add_layer(layer2)return xdef _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):# TODO: the batch size!=1 view operationsprint("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")if running_var is not None or weight is not None:# TODO: the affine=True or track_running_stats=True caseraise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")x= torch.batch_norm(input, weight, bias, running_mean, running_var,use_input_stats, momentum, eps,torch.backends.cudnn.enabled)bottom_blobs = [log.blobs(input)]layer_name1 = log.add_layer(name='instance_norm')top_blobs = log.add_blobs([x], name='instance_norm_blob')layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',bottom=bottom_blobs, top=top_blobs)if running_mean is None or running_var is None:# not use global_stats, normalization is performed over the current mini-batchlayer1.batch_norm_param(use_global_stats=0,eps=eps)running_mean=torch.zeros(input.size()[1])running_var=torch.ones(input.size()[1])else:layer1.batch_norm_param(use_global_stats=1, eps=eps)running_mean_clone = running_mean.clone()running_var_clone = running_var.clone()layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))log.cnet.add_layer(layer1)if weight is not None and bias is not None:layer_name2 = log.add_layer(name='bn_scale')layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',bottom=top_blobs, top=top_blobs)layer2.param.scale_param.bias_term = Truelayer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())log.cnet.add_layer(layer2)return x#upsample layer
def _interpolate(raw, input,size=None, scale_factor=None, mode='nearest', align_corners=None):# 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,# 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时# 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输# 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、# upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。# for nearest _interpolateif mode != "nearest" or align_corners != None:raise NotImplementedError("not implement F.interpolate totoaly")x = raw(input,size , scale_factor ,mode)layer_name = log.add_layer(name='upsample')top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))layer = caffe_net.Layer_param(name=layer_name, type='Upsample',bottom=[log.blobs(input)], top=top_blobs)layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)log.cnet.add_layer(layer)return x#sigmid layer
def _sigmoid(raw, input):# Applies the element-wise function:# # Sigmoid(x)= 1/(1+exp(−x))# # ​	x = raw(input)name = log.add_layer(name='sigmoid')log.add_blobs([x], name='sigmoid_blob')layer = caffe_net.Layer_param(name=name, type='Sigmoid',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return x#tanh layer
def _tanh(raw, input):# Applies the element-wise function:# # torch.nn.Tanh# # ​	x = raw(input)name = log.add_layer(name='tanh')log.add_blobs([x], name='tanh_blob')layer = caffe_net.Layer_param(name=name, type='TanH',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return xdef _hardtanh(raw, input, min_val, max_val, inplace):# Applies the element-wise function:## torch.nn.ReLu6## ​print('relu6: ', log.blobs(input))x = raw(input, min_val, max_val)name = log.add_layer(name='relu6')log.add_blobs([x], name='relu6_blob')layer = caffe_net.Layer_param(name=name, type='ReLU6',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return x#L2Norm layer
def _l2Norm(raw, input, weight, eps):# Applies the element-wise function:## L2Norm in vgg_ssd## ​x = raw(input, weight, eps)name = log.add_layer(name='normalize')log.add_blobs([x], name='normalize_blob')layer = caffe_net.Layer_param(name=name, type='Normalize',bottom=[log.blobs(input)], top=[log.blobs(x)])layer.norm_param(eps)layer.add_data(weight.cpu().data.numpy())log.cnet.add_layer(layer)return xdef _div(raw,inputs, inputs2):x=raw(inputs, inputs2)log.add_blobs([x],name='div_blob')return x# ----- for Variable operations --------def _view(input, *args):x=raw_view(input, *args)if not NET_INITTED:return xlayer_name=log.add_layer(name='view')top_blobs=log.add_blobs([x],name='view_blob')layer=caffe_net.Layer_param(name=layer_name,type='Reshape',bottom=[log.blobs(input)],top=top_blobs)# TODO: reshpae added to nn_tools layerdims=list(args)dims[0]=0 # the first dim should be batch_sizelayer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))log.cnet.add_layer(layer)return xdef _mean(input, *args,**kwargs):x=raw_mean(input, *args,**kwargs)if not NET_INITTED:return xlayer_name=log.add_layer(name='mean')top_blobs=log.add_blobs([x],name='mean_blob')layer=caffe_net.Layer_param(name=layer_name,type='Reduction',bottom=[log.blobs(input)],top=top_blobs)if len(args)==1:dim=args[0]elif 'dim' in kwargs:dim=kwargs['dim']else:raise NotImplementedError('mean operation must specify a dim')layer.param.reduction_param.operation=4layer.param.reduction_param.axis=dimlog.cnet.add_layer(layer)return xdef _add(input, *args):x = raw__add__(input, *args)if not NET_INITTED:return xlayer_name = log.add_layer(name='add')top_blobs = log.add_blobs([x], name='add_blob')if log.blobs(args[0]) == None:log.add_blobs([args[0]], name='extra_blob')else:layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 1 # sum is 1log.cnet.add_layer(layer)return xdef _iadd(input, *args):x = raw__iadd__(input, *args)if not NET_INITTED:return xx=x.clone()layer_name = log.add_layer(name='add')top_blobs = log.add_blobs([x], name='add_blob')layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 1 # sum is 1log.cnet.add_layer(layer)return xdef _sub(input, *args):x = raw__sub__(input, *args)if not NET_INITTED:return xlayer_name = log.add_layer(name='sub')top_blobs = log.add_blobs([x], name='sub_blob')layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 1 # sum is 1layer.param.eltwise_param.coeff.extend([1.,-1.])log.cnet.add_layer(layer)return xdef _isub(input, *args):x = raw__isub__(input, *args)if not NET_INITTED:return xx=x.clone()layer_name = log.add_layer(name='sub')top_blobs = log.add_blobs([x], name='sub_blob')layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 1 # sum is 1log.cnet.add_layer(layer)return xdef _mul(input, *args):x = raw__mul__(input, *args)if not NET_INITTED:return xlayer_name = log.add_layer(name='mul')top_blobs = log.add_blobs([x], name='mul_blob')layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 0  # product is 1log.cnet.add_layer(layer)return xdef _imul(input, *args):x = raw__imul__(input, *args)if not NET_INITTED:return xx = x.clone()layer_name = log.add_layer(name='mul')top_blobs = log.add_blobs([x], name='mul_blob')layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)layer.param.eltwise_param.operation = 0  # product is 1layer.param.eltwise_param.coeff.extend([1., -1.])log.cnet.add_layer(layer)return x#Permute layer
def _permute(input, *args):x = raw__permute__(input, *args)name = log.add_layer(name='permute')log.add_blobs([x], name='permute_blob')layer = caffe_net.Layer_param(name=name, type='Permute',bottom=[log.blobs(input)], top=[log.blobs(x)])order1 = args[0]order2 = args[1]order3 = args[2]order4 = args[3]layer.permute_param(order1, order2, order3, order4)log.cnet.add_layer(layer)return x#contiguous
def _contiguous(input, *args):x = raw__contiguous__(input, *args)name = log.add_layer(name='contiguous')log.add_blobs([x], name='contiguous_blob')layer = caffe_net.Layer_param(name=name, type='NeedRemove',bottom=[log.blobs(input)], top=[log.blobs(x)])log.cnet.add_layer(layer)return x#pow
def _pow(input, *args):x = raw__pow__(input, *args)log.add_blobs([x], name='pow_blob')return x#sum
def _sum(input, *args):x = raw__sum__(input, *args)log.add_blobs([x], name='sum_blob')return x# sqrt
def _sqrt(input, *args):x = raw__sqrt__(input, *args)log.add_blobs([x], name='sqrt_blob')return x# unsqueeze
def _unsqueeze(input, *args):x = raw__unsqueeze__(input, *args)log.add_blobs([x], name='unsqueeze_blob')return xdef _expand_as(input, *args):# only support expand A(1, 1, H, W) to B(1, C, H, W)x = raw__expand_as__(input, *args)layer_name = log.add_layer(name="expand_as", with_num=True)log.add_blobs([x], name='expand_as_blob')layer = caffe_net.Layer_param(name=layer_name, type='Convolution',bottom=[log.blobs(input)], top=[log.blobs(x)])def constant_weight(shape):weights = np.ones(shape, dtype='float32')return weightschannels = args[0].size(1)weight = constant_weight([channels, 1, 1, 1])layer.conv_param(channels, kernel_size = 1, bias_term=False, weight_filler_type='xavier')layer.add_data(weight)log.cnet.add_layer(layer)return x## 这里修改
def _flatten(raw , input, *args):x = raw(input, *args)if not NET_INITTED:return xlayer_name=log.add_layer(name='flatten')top_blobs=log.add_blobs([x],name='flatten_blob')layer=caffe_net.Layer_param(name=layer_name,type='Reshape',bottom=[log.blobs(input)],top=top_blobs)start_dim = args[0]end_dim = len(x.shape)if len(args) > 1:end_dim = args[1]dims = []for i in range(start_dim):dims.append(x.shape[i])cum = 1for i in range(start_dim, end_dim):cum = cum * x.shape[i]dims.append(cum)if end_dim != len(x.shape):cum = 1for i in range(end_dim, len(x.shape)):cum = cum * x.shape[i]dims.append(cum)layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))log.cnet.add_layer(layer)return xclass Rp(object):def __init__(self,raw,replace,**kwargs):# replace the raw function to replace functionself.obj=replaceself.raw=rawdef __call__(self,*args,**kwargs):if not NET_INITTED:return self.raw(*args,**kwargs)for stack in traceback.walk_stack(None):if 'self' in stack[0].f_locals:layer=stack[0].f_locals['self']if layer in layer_names:log.pytorch_layer_name=layer_names[layer]print(layer_names[layer])breakout=self.obj(self.raw,*args,**kwargs)# if isinstance(out,Variable):#     out=[out]return outF.conv2d=Rp(F.conv2d,_conv2d)
F.linear=Rp(F.linear,_linear)
F.relu=Rp(F.relu,_relu)
F.leaky_relu=Rp(F.leaky_relu,_leaky_relu)
F.max_pool2d=Rp(F.max_pool2d,_max_pool2d)
F.avg_pool2d=Rp(F.avg_pool2d,_avg_pool2d)
F.adaptive_avg_pool2d = Rp(F.adaptive_avg_pool2d,_adaptive_avg_pool2d)
F.dropout=Rp(F.dropout,_dropout)
F.threshold=Rp(F.threshold,_threshold)
F.prelu=Rp(F.prelu,_prelu)
F.batch_norm=Rp(F.batch_norm,_batch_norm)
F.instance_norm=Rp(F.instance_norm,_instance_norm)
F.softmax=Rp(F.softmax,_softmax)
F.conv_transpose2d=Rp(F.conv_transpose2d,_conv_transpose2d)
F.interpolate = Rp(F.interpolate,_interpolate)
F.sigmoid = Rp(F.sigmoid,_sigmoid)
F.tanh = Rp(F.tanh,_tanh)
F.tanh = Rp(F.tanh,_tanh)
F.hardtanh = Rp(F.hardtanh,_hardtanh)
# F.l2norm = Rp(F.l2norm,_l2Norm)torch.split=Rp(torch.split,_split)
torch.max=Rp(torch.max,_max)
torch.cat=Rp(torch.cat,_cat)
torch.div=Rp(torch.div,_div)
## 这里也需要修改
torch.flatten = Rp(torch.flatten,_flatten)# TODO: other types of the view function
try:raw_view=Variable.viewVariable.view=_viewraw_mean=Variable.meanVariable.mean=_meanraw__add__=Variable.__add__Variable.__add__=_addraw__iadd__=Variable.__iadd__Variable.__iadd__=_iaddraw__sub__=Variable.__sub__Variable.__sub__=_subraw__isub__=Variable.__isub__Variable.__isub__=_isubraw__mul__ = Variable.__mul__Variable.__mul__ = _mulraw__imul__ = Variable.__imul__Variable.__imul__ = _imul
except:# for new version 0.4.0 and later versionfor t in [torch.Tensor]:raw_view = t.viewt.view = _viewraw_mean = t.meant.mean = _meanraw__add__ = t.__add__t.__add__ = _addraw__iadd__ = t.__iadd__t.__iadd__ = _iaddraw__sub__ = t.__sub__t.__sub__ = _subraw__isub__ = t.__isub__t.__isub__ = _isubraw__mul__ = t.__mul__t.__mul__=_mulraw__imul__ = t.__imul__t.__imul__ = _imulraw__permute__ = t.permutet.permute = _permuteraw__contiguous__ = t.contiguoust.contiguous = _contiguousraw__pow__ = t.powt.pow = _powraw__sum__ = t.sumt.sum = _sumraw__sqrt__ = t.sqrtt.sqrt = _sqrtraw__unsqueeze__ = t.unsqueezet.unsqueeze = _unsqueezeraw__expand_as__ = t.expand_ast.expand_as = _expand_asdef trans_net(net, input_var, name='TransferedPytorchModel'):print('Starting Transform, This will take a while')log.init([input_var])log.cnet.net.name=namelog.cnet.net.input.extend([log.blobs(input_var)])log.cnet.net.input_dim.extend(input_var.size())global NET_INITTEDNET_INITTED=Truefor name,layer in net.named_modules():layer_names[layer]=nameprint("torch ops name:", layer_names)out = net.forward(input_var)print('Transform Completed')def save_prototxt(save_name):log.cnet.remove_layer_by_type("NeedRemove")log.cnet.save_prototxt(save_name)def save_caffemodel(save_name):log.cnet.save(save_name)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/639995.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

【遥感数字图像处理(朱文泉)】各章博文链接汇总及思维导图

遥感数字图像处理课程汇总 第0章 绪论第一章 数字图像基础第二章 数字图像存储与处理第三章 空间域处理方法第四章 变换域处理方法第五章 辐射校正第六章 几何校正第七章 图像去噪声第八章 图像增强第九章 感兴趣目标及对象提取第十章 特征提取与选择第十一章 遥感数字图像分类…

【ASP.NET Core 基础知识】--路由和请求处理--路由概念(二)

一、路由参数传递方式 1.1 查询字符串参数 在路由中,查询字符串参数是一种常见的方式传递信息。这种方式通过URL中的查询字符串(?key1value1&key2value2)将参数附加到请求中。在ASP.NET Core中,可以通过以下方式在控制器动…

c++学习之IO流

目录 前言: 一,流的概念 二,c的io流 输入输出流 缓冲区的同步 文件流 文件的打开 文件读写自定义类型数据 字符流 1. 将数值类型数据格式化为字符串 2. 字符串拼接 3. 序列化和反序列化结构数据 前言: 在了解c的输入输…

SpringBoot异常处理和单元测试

学习目标 Spring Boot 异常处理Spring Boot 单元测试 1.SpringBoot异常处理 1.1.自定义错误页面 SpringBoot默认的处理异常的机制:SpringBoot 默认的已经提供了一套处理异常的机制。一旦程序中出现了异常 SpringBoot 会向/error 的 url 发送请求。在 springBoot…

c语言->学会offsetof宏计算结构体相对偏移量

前言 ✅作者简介:大家好,我是橘橙黄又青,一个想要与大家共同进步的男人😉😉 🍎个人主页:橘橙黄又青-CSDN博客 目的,学习offsetof宏计算结构体相对偏移量 1.offsetof宏 来我们看图…

vector讲解

在学习玩string后我们开始学习vector,本篇博客将对vector进行简单的介绍,还会对vector一些常用的函数进行讲解 vector的介绍 实际上vector就是一个数组的数据结构,但是vector是由C编写而成的,他和数组也有本质上的区别&#xff…

2.机器学习-K最近邻(k-Nearest Neighbor,KNN)分类算法原理讲解

2️⃣机器学习-K最近邻(k-Nearest Neighbor,KNN)分类算法原理讲解 个人简介一算法概述二算法思想2.1 KNN的优缺点 三实例演示3.1电影分类3.2使用KNN算法预测 鸢(yuan)尾花 的种类3.3 预测年收入是否大于50K美元 个人简介 🏘️&…

android 导航app 稳定性问题总结

一 重写全局异常处理: 1 是过滤掉一些已知的无法处理的 问题,比如TimeoutException 这种无法根除只能缓解的问题可以直接catch掉 2 是 一些无法继续的问题可以直接杀死重启,一些影响不是很大的,可以局部还原 比如: p…

题记(23)--整除问题

目录 一、题目内容 二、输入描述 三、输出描述 四、输入输出示例 五、完整C语言代码 一、题目内容 给定n&#xff0c;a求最大的k&#xff0c;使n&#xff01;可以被a^k整除但不能被a^(k1)整除。 二、输入描述 两个整数n(2<n<1000)&#xff0c;a(2<a<1000) 三、…

实时流媒体传输开源库——Live555

Live555&#xff08;LiveMedia Libraries&#xff09;是一个开源的多媒体流处理库&#xff0c;主要用于实现基于标准网络协议的实时流媒体传输。Live555提供了一套 C 类库&#xff0c;可以用于开发支持 RTP/RTCP、RTSP、SIP 等协议的流媒体服务器和客户端应用程序。它广泛用于视…

flink部署模式介绍

在一些应用场景中&#xff0c;对于集群资源分配和占用的方式&#xff0c;可能会有特定的需求。Flink 为各种场景提供了不同的部署模式&#xff0c;主要有以下三种&#xff0c;它们的区别主要在于&#xff1a; 集群的生命周期以及资源的分配方式&#xff1b;应用的 main 方法到…

XXL-Job的搭建接入Springboot项目(详细)

一、XXL-Job介绍 XXL-Job 是一款开源的分布式任务调度平台&#xff0c;由 Xuxueli&#xff08;徐雪里&#xff09;开发。它基于 Java 技术栈&#xff0c;提供了一套简单易用、高可靠性的任务调度解决方案。 XXL-Job 的主要作用是帮助开发者实现定时任务的调度和执行。它可以用…

OpenCvSharp 通道拆分、空间转换

通道拆分、空间转换 通道拆分 //通道拆分&#xff08;BGR&#xff09; Mat[] BGR Cv2.Split(src); //通道合并 Mat dst new Mat(); Cv2.Merge(BGR, dst);空间转换 //BGR转灰度图 Mat dst new Mat(); Cv2.CvtColor(src, dst , ColorConversionCodes.BGR2GRAY); Cv2.ImShow…

MySQL 深度分页

文章目录 一、深度分页二、测试数据三、分页策略3.1 默认分页3.2 索引分页3.3 子查询3.4 延迟关联 一、深度分页 MySQL 深度分页是指在分页查询数据量比较大的表时&#xff0c;需要访问表中的某一段数据&#xff0c;而这段数据的位置非常靠后&#xff0c;需要通过较大的 offse…

2024年轻人返乡创业潮,外卖平台市场需求是怎么样的?

目前&#xff0c;县域经济正面临着几大利好。“返乡就业、返乡创业和告老还乡”现象越发普遍&#xff0c;这不仅在小县城中有所体现&#xff0c;同样在乡镇中也呈现出同样的趋势。一些产业链和工厂纷纷下沉到乡镇&#xff0c;带来了更多的就业机会。这不仅能够吸引年轻人回乡就…

Spring Boot 4.0:构建云原生Java应用的前沿工具

目录 前言 Spring Boot简介 Spring Boot 的新特性 1. 支持JDK 17 2. 集成云原生组件 3. 响应式编程支持 4. 更强大的安全性 5. 更简化的配置 Spring Boot 的应用场景 1. 云原生应用开发 2. 响应式应用程序 3. 安全性要求高的应用 4. JDK 17的应用 总结 作…

【Java万花筒】代码安全护航:Java自动化测试与BDD

自动化测试与行为驱动开发&#xff08;BDD&#xff09;库&#xff1a; 前言 在当今软件开发领域&#xff0c;自动化测试和行为驱动开发&#xff08;BDD&#xff09;已经成为确保软件质量和可维护性的不可或缺的一部分。Java开发者可以借助多种库和框架来实现自动化测试&#…

CRM系统--盘点五大CRM客户管理系统

在当今市场经济中&#xff0c;销售工作的重要性日益凸显&#xff0c;有效的客户管理成为了提升销售业绩的关键因素。面对日新月异的市场环境和客户需求的多样化&#xff0c;销售人员通常会面临以下问题&#xff1a; 接到了新的销售任务&#xff0c;该如何选择和确定目标客户&am…

什么是线程?线程和进程有什么区别?在Java中如何创建线程?你能解释一下Java中的synchronized关键字吗?

什么是线程&#xff1f;线程和进程有什么区别&#xff1f; 线程是操作系统进行运算调度的最小单位&#xff0c;它是进程中的实际运作单位。线程自己不拥有系统资源&#xff0c;只拥有一点在运行中必不可少的资源&#xff0c;但它可与同属一个进程的其它线程共享进程所拥有的全部…

API协议设计的十种技术

文章目录 前言一、REST二、GraphQL三、gRPC&#xff08;google Remote Procedure Calls&#xff09;四、Webhooks五、服务端的事件发送——SSE&#xff08;Server-sent Events&#xff09;六、EDI&#xff08;Electronic Data Interchange&#xff09;七、面向API 的事件驱动设…