import torch
import numpy as np"""
Numpy Torch对比课程
"""
# #tensor与numpy格式数据相互转换
# np_data = np.arange(6).reshape((2,3))
# print(np_data)
#
# torch_data = torch.from_numpy(np_data)
# print('\n',torch_data)
#
# tensor2array = torch_data.numpy()
# print('\n',tensor2array)# #abs绝对值
# data = [-1,-2,1,2]
# tensor = torch.FloatTensor(data) #32bit
# print(abs(tensor))
# print('\n',np.abs(data))
# print('\n',torch.abs(tensor))# #矩阵相乘
# data = [[1,2],[3,4]]
# tensor = torch.FloatTensor(data) #32-bit
# print('\n',np.matmul(data,data))
# print('\n',torch.mm(tensor,tensor))
#
# #注意:numpy.dot实现的是叉乘,tensor.dot是点乘
# data = np.array(data)
# print('\n',data.dot(data))
# print('\n',tensor.dot(tensor)) #注意这里会报错 新版本的要求dot需要一维输入,而我们设定的tensor是二维,旧版本实现的是点乘"""
Variable变量课程
"""# #这节内容我学习了一下 但是无法运行,原因是新版本的torch的Variable函数将返回tensor数据,同时requires_grad将有默认值,无需填写。var.backward()函数可以直接支持使用tensor了。
# from torch.autograd import Variable
#
# tensor = torch.FloatTensor[[1,2],[3,4]]
# variable = Variable(tensor,requires_grad = True)
#
# t_out = torch.mean(tensor*tensor)
# v_out = torch.mean(variable*variable)
#
# v_out.backward()
# print(variable.grad)
# print(variable.data) #tensor形势
# print(variable.data.numpy)"""
激励函数课程
"""
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt#fake data
x = torch.linspace(-5,5,200)
x = Variable(x)
x_np = x.data.numpy() #画图时要转化为numpy格式y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()plt.subplot(221)
plt.plot(x_np,y_relu,c='red',label='relu')
plt.ylim((-1,5))
plt.legend(loc="best")plt.subplot(222)
plt.plot(x_np,y_sigmoid,c='red',label='sigmoid')
plt.ylim((-0.2,1.2))
plt.legend(loc="best")plt.subplot(223)
plt.plot(x_np,y_tanh,c='red',label='tanh')
plt.ylim((-1.2,1.2))
plt.legend(loc="best")plt.subplot(224)
plt.plot(x_np,y_softplus,c='red',label='softplus')
plt.ylim((-0.2,6))
plt.legend(loc="best")plt.show()