莫烦Pytorch神经网络第三章代码修改

3.1Regression回归

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt"""
创建数据
"""x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size()) #增加噪点
x , y = Variable(x),Variable(y)# plt.scatter(x.data.numpy(),y.data.numpy())    #打印数据
# plt.show()"""
搭建网络
"""
class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet = Net(1,10,1)
# print(net)
plt.ion()   #实时打印的
plt.show()
"""
优化网络
"""
optimizer = torch.optim.SGD(net.parameters(),lr=0.5)
loss_func = torch.nn.MSELoss()      #MSELoss是用在线性预测
#打印环节
for t in range(100):prediction = net(x)loss = loss_func(prediction,y)optimizer.zero_grad()loss.backward()optimizer.step()if t % 5 ==0:plt.cla()plt.scatter(x.data.numpy(),y.data.numpy())plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)plt.text(0.5,0,'Loss=%.4f' % loss.item(),fontdict={'size':20,'color':'red'})       #注意莫老师这里loss.data[0]得换成loss.item()plt.pause(0.1)plt.ioff()
plt.show()

3.2Classification分类

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt"""
创建数据
"""
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100)
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100)
x = torch.cat((x0,x1),0).type(torch.FloatTensor)
y = torch.cat((y0,y1),).type(torch.LongTensor)x,y = Variable(x),Variable(y)# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
# plt.show()"""
网络搭建
"""
class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet = Net(2,10,2)
# print(net)plt.ion()   #实时打印的
plt.show()optimizer = torch.optim.SGD(net.parameters(),lr=0.02)
loss_func = torch.nn.CrossEntropyLoss()     #CrossEntropyLoss用在分类的损失函数中"""
结果打印
"""
for t in range(100):out = net(x)loss = loss_func(out,y)optimizer.zero_grad()loss.backward()optimizer.step()if t % 2 == 0:plt.cla()prediction = torch.max(F.softmax(out),1)[1] #输出的结果在第二位,因为输出是二维,例如输出结果为[0,1],是指最大值为0,类型是1pred_y = prediction.data.numpy().squeeze()target_y = y.data.numpy()plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0,cmap='RdYlGn')accuracy = sum(pred_y == target_y) / 200plt.text(1.5,-4,'Accuracy=%.2f'%accuracy,fontdict={'size':20,'color':'red'})plt.pause(0.1)plt.ioff()
plt.show()

3.3快速搭建法

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt"""
创建数据
"""
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data,1)
y0 = torch.zeros(100)
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100)
x = torch.cat((x0,x1),0).type(torch.FloatTensor)
y = torch.cat((y0,y1),).type(torch.LongTensor)x,y = Variable(x),Variable(y)# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
# plt.show()"""
普通网络搭建
"""
class Net(torch.nn.Module):def __init__(self,n_features,n_hidden,n_out):super(Net, self).__init__()self.hidden = torch.nn.Linear(n_features,n_hidden)self.predict = torch.nn.Linear(n_hidden,n_out)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet1 = Net(2,10,2)"""
快速网络搭建
"""
net2 = torch.nn.Sequential(torch.nn.Linear(2,10),torch.nn.ReLU(),torch.nn.Linear(10,2)
)print(net1)
print(net2)

3.4保存提取

import torch
from torch.autograd import  Variable
import matplotlib.pyplot as plt#fake data
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())
x,y = Variable(x,requires_grad=False),Variable(y,requires_grad=False)"""
保存
"""
def save():net1 = torch.nn.Sequential(torch.nn.Linear(1, 10),torch.nn.ReLU(),torch.nn.Linear(10, 1))optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)loss_func = torch.nn.MSELoss()for t in range(100):prediction = net1(x)loss = loss_func(prediction, y)optimizer.zero_grad()loss.backward()optimizer.step()torch.save(net1,'net.pkl')  #保存网络torch.save(net1.state_dict(),'net_params.pkl')   #保存参数#画图plt.figure(1,figsize=(10,3))plt.subplot(131)plt.title('Net1')plt.scatter(x.data.numpy(),y.data.numpy())plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)"""
提取网络模型
"""
def restore_net():net2 = torch.load('net.pkl')prediction = net2(x)plt.subplot(132)plt.title('Net2')plt.scatter(x.data.numpy(), y.data.numpy())plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)"""
提取网络参数
"""
def restore_params():net3 = torch.nn.Sequential(torch.nn.Linear(1, 10),torch.nn.ReLU(),torch.nn.Linear(10, 1))net3.load_state_dict(torch.load('net_params.pkl'))prediction = net3(x)plt.subplot(133)plt.title('Net3')plt.scatter(x.data.numpy(), y.data.numpy())plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)plt.show()save()
restore_net()
restore_params()

3.5批数据训练

import torch
import torch.utils.data as DataBATCH_SIZE = 5x = torch.linspace(1,10,10)
y = torch.linspace(10,1,10)#torch_dataset = Data.TensorDataset(data_tensor=x,target_tensor=y)  #莫老师使用的这个方法在高版本报错 使用下边的语句可以解决
torch_dataset = Data.TensorDataset(x,y)loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True,#num_workers=2,      #线程数   windows用户这里要去掉 因为windows系统中没有Fork函数,多线程会报错
)for epoch in range(3):for step,(batch_x,batch_y) in enumerate(loader):#trainingprint('Epoch:',epoch,'|Step:',step,'|batch x:',batch_x.numpy(),'|batch y:',batch_y.numpy())

3.6Optimizer优化器

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.utils.data as Data#hyper parameters
LR = 0.01
BATCH_SIZE = 32
EPOCH = 12x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())# plt.scatter(x.numpy(),y.numpy())
# plt.show()torch_dataset = Data.TensorDataset(x,y)
loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True)class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()self.hidden = torch.nn.Linear(1,20)self.predict = torch.nn.Linear(20,1)def forward(self,x):x = F.relu(self.hidden(x))x = self.predict(x)return xnet_SGD = Net()
net_Momentum =  Net()
net_RMSprop =   Net()
net_Adam =  Net()
nets = [net_SGD,net_Momentum,net_RMSprop,net_Adam]opt_SGD = torch.optim.SGD(net_SGD.parameters(),lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(),lr=LR,momentum=0.8)
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(),lr=LR,alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(),lr=LR,betas=(0.9,0.99))optimizers = [opt_SGD,opt_Momentum,opt_RMSprop,opt_Adam]loss_func = torch.nn.MSELoss()
losses_his=[[],[],[],[]]    #记录损失
for epoch in range(EPOCH):print(epoch)for step,(batch_x,batch_y) in enumerate(loader):#     b_x = Variable(batch_x)   #新版本pytorch不用这个了#     b_y = Variable(batch_y)for net,opt,l_his in zip(nets,optimizers,losses_his):output = net(batch_x)loss = loss_func(output,batch_y)opt.zero_grad()loss.backward()opt.step()l_his.append(loss.item())labels = ['SGD','Momentum','RMSprop','Adam']
for i,l_his in enumerate(losses_his):plt.plot(l_his,label = labels[i])
plt.legend(loc = 'best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.show()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/389444.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

为什么饼图有问题

介绍 (Introduction) It seems as if people are split on pie charts: either you passionately hate them, or you are indifferent. In this article, I am going to explain why pie charts are problematic and, if you fall into the latter category, what you can do w…

New Distinct Substrings(后缀数组)

New Distinct Substrings&#xff08;后缀数组&#xff09; 给定一个字符串&#xff0c;求不相同的子串的个数。\(n<50005\)。 显然&#xff0c;任何一个子串一定是后缀上的前缀。先&#xff08;按套路&#xff09;把后缀排好序&#xff0c;对于当前的后缀\(S_i\)&#xff0…

Android dependency 'com.android.support:support-v4' has different version for the compile (26.1.0...

在项目中加入react-native-camera的时候 出现的错误. 解决方案: 修改 implementation project(:react-native-camera)为 implementation (project(:react-native-camera)) {exclude group: "com.android.support"}查看原文 Could not find play-services-basement.aa…

先知模型 facebook_使用Facebook先知进行犯罪率预测

先知模型 facebookTime series prediction is one of the must-know techniques for any data scientist. Questions like predicting the weather, product sales, customer visit in the shopping center, or amount of inventory to maintain, etc - all about time series …

莫烦Pytorch神经网络第四章代码修改

4.1CNN卷积神经网络 import torch import torch.nn as nn from torch.autograd import Variable import torch.utils.data as Data import torchvision import matplotlib.pyplot as pltEPOCH 1 BATCH_SIZE 50 LR 0.001 DOWNLOAD_MNIST False #如果数据集已经下载到…

github gists 101使代码共享漂亮

If you’ve been going through Medium, looking at technical articles, you’ve undoubtedly seen little windows that look like the below:如果您一直在阅读Medium&#xff0c;并查看技术文章&#xff0c;那么您无疑会看到类似于以下内容的小窗口&#xff1a; def hello_…

loj #6278. 数列分块入门 2

题目 题解 区间修改&#xff0c;询问区间小于c的个数。分块排序&#xff0c;用vector。至于那个块的大小&#xff0c;好像要用到均值不等式 我不太会。。。就开始一个个试&#xff0c;发现sizsqrt(n)/4时最快&#xff01;&#xff01;&#xff01;明天去学一下算分块复杂度的方…

基于Netty的百万级推送服务设计要点

1. 背景1.1. 话题来源最近很多从事移动互联网和物联网开发的同学给我发邮件或者微博私信我&#xff0c;咨询推送服务相关的问题。问题五花八门&#xff0c;在帮助大家答疑解惑的过程中&#xff0c;我也对问题进行了总结&#xff0c;大概可以归纳为如下几类&#xff1a;1&#x…

莫烦Pytorch神经网络第五章代码修改

5.1动态Dynamic import torch from torch import nn import numpy as np import matplotlib.pyplot as plt# torch.manual_seed(1) # reproducible# Hyper Parameters INPUT_SIZE 1 # rnn input size / image width LR 0.02 # learning rateclass…

鲜为人知的6个黑科技网站_6种鲜为人知的熊猫绘图工具

鲜为人知的6个黑科技网站Pandas is the go-to Python library for data analysis and manipulation. It provides numerous functions and methods that expedice the data analysis process.Pandas是用于数据分析和处理的Python库。 它提供了加速数据分析过程的众多功能和方法…

VRRP网关冗余

实验要求 1、R1创建环回口&#xff0c;模拟外网 2、R2&#xff0c;R3使用VRRP技术 3、路由器之间使用EIGRP路由协议  实验拓扑  实验配置  R1(config)#interface loopback 0R1(config-if)#ip address 1.1.1.1 255.255.255.0R1(config-if)#int e0/0R1(config-if)#ip addr…

网页JS获取当前地理位置(省市区)

网页JS获取当前地理位置&#xff08;省市区&#xff09; 一、总结 一句话总结&#xff1a;ip查询接口 二、网页JS获取当前地理位置&#xff08;省市区&#xff09; 眼看2014又要过去了&#xff0c;翻翻今年的文章好像没有写几篇&#xff0c;忙真的或许已经不能成为借口了&#…

大熊猫卸妆后_您不应错过的6大熊猫行动

大熊猫卸妆后数据科学 (Data Science) Pandas is used mainly for reading, cleaning, and extracting insights from data. We will see an advanced use of Pandas which are very important to a Data Scientist. These operations are used to analyze data and manipulate…

数据eda_关于分类和有序数据的EDA

数据eda数据科学和机器学习统计 (STATISTICS FOR DATA SCIENCE AND MACHINE LEARNING) Categorical variables are the ones where the possible values are provided as a set of options, it can be pre-defined or open. An example can be the gender of a person. In the …

PyTorch官方教程中文版:PYTORCH之60MIN入门教程代码学习

Pytorch入门 import torch""" 构建非初始化的矩阵 """x torch.empty(5,3) #print(x)""" 构建随机初始化矩阵 """x torch.rand(5,3)""" 构造一个矩阵全为 0&#xff0c;而且数据类型是 long &qu…

Flexbox 最简单的表单

弹性布局(Flexbox)逐渐流行&#xff0c;越来越多的人开始使用&#xff0c;因为它写Css布局真是太简单了一一、<form>元素表单使用<form>元素<form></form>复制代码上面是一个空的表单&#xff0c;根据HTML标准&#xff0c;它是一个块级元素&#xff0c…

CSS中的盒子模型

一.为什么使用CSS 1.有效的传递页面信息 2.使用CSS美化过的页面文本&#xff0c;使页面漂亮、美观&#xff0c;吸引用户 3.可以很好的突出页面的主题内容&#xff0c;使用户第一眼可以看到页面主要内容 4.具有良好的用户体验 二.字体样式属性 1.font-family:英…

jdk重启后步行_向后介绍步行以一种新颖的方式来预测未来

jdk重启后步行“永远不要做出预测&#xff0c;尤其是关于未来的预测。” (KK Steincke) (“Never Make Predictions, Especially About the Future.” (K. K. Steincke)) Does this picture portray a horse or a car? 这张照片描绘的是马还是汽车&#xff1f; How likely is …

PyTorch官方教程中文版:入门强化教程代码学习

PyTorch之数据加载和处理 from __future__ import print_function, division import os import torch import pandas as pd #用于更容易地进行csv解析 from skimage import io, transform #用于图像的IO和变换 import numpy as np import matplotlib.pyplot a…

css3-2 CSS3选择器和文本字体样式

css3-2 CSS3选择器和文本字体样式 一、总结 一句话总结&#xff1a;是要记下来的&#xff0c;记下来可以省很多事。 1、css的基本选择器中的:first-letter和:first-line是什么意思&#xff1f; :first-letter选择第一个单词&#xff0c;:first-line选择第一行 2、css的伪类选…