24.8.19学习笔记(MNIST,)

pytorch MNIST手写数字识别:

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms# 设定随机种子以保证结果可复现
torch.manual_seed(0)# 定义超参数
batch_size = 32
learning_rate = 0.001
num_epochs = 10# 1. 数据预处理
# 使用transforms.Compose组合多个变换
transform = transforms.Compose([transforms.ToTensor(),  # 将PIL Image或者numpy数组转换为tensor,并将其数值范围从[0, 255]变为[0.0, 1.0]transforms.Normalize((0.1307,), (0.3081,))  # 标准化数据,均值和标准差来自MNIST数据集
])# 加载训练数据
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)# 加载测试数据
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)# 2. 构建模型
class Net(nn.Module):def __init__(self):super(Net, self).__init__()# 输入层到第一个隐藏层self.fc1 = nn.Linear(28 * 28, 512)  # 输入层有28*28个节点,第一个隐藏层有512个节点self.fc2 = nn.Linear(512, 256)  # 第二个隐藏层有256个节点self.fc3 = nn.Linear(256, 128)  # 第三个隐藏层有128个节点self.fc4 = nn.Linear(128, 64)self.fc5 = nn.Linear(64, 10)  # 输出层有10个节点(对应10个数字)def forward(self, x):# 将输入图像展平为一维向量x = x.view(-1, 28 * 28)x = torch.relu(self.fc1(x))  # 通过第一个全连接层和ReLU激活函数x = torch.relu(self.fc2(x))  # 通过第二个全连接层和ReLU激活函数x = torch.relu(self.fc3(x))  # 通过第三个全连接层和ReLU激活函数x = torch.relu(self.fc4(x))  # 通过第4个全连接层和ReLU激活函数x = self.fc5(x)  # 通过输出层return x# 创建模型实例
model = Net()# 3. 设置损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=learning_rate,momentum=0.5)  # 使用随机梯度下降作为优化器# 4. 训练模型
def train(epoch):model.train()  # 设置模型为训练模式for batch_idx, (data, target) in enumerate(train_loader):optimizer.zero_grad()  # 清除梯度output = model(data)  # 前向传播loss = criterion(output, target)  # 计算损失loss.backward()  # 反向传播optimizer.step()  # 更新权重if batch_idx % 150 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))# 5. 测试模型
def test():model.eval()  # 设置模型为评估模式test_loss = 0correct = 0with torch.no_grad():  # 不需要计算梯度for data, target in test_loader:output = model(data)  # 前向传播,得到预测值(每个数字的概率)test_loss += criterion(output, target).item()  # 累加损失,计算损失值pred = output.argmax(dim=1, keepdim=True)  # 一个二维张量,形状为(batch_size, 1),其中每一行包含一个元素correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确数量test_loss /= len(test_loader.dataset)  # 平均损失accuracy = 100. * correct / len(test_loader.dataset)  # 准确率print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, len(test_loader.dataset), accuracy))return accuracy# 开始训练,代码很简洁,写了两个函数,这个地方很巧妙
for epoch in range(1, num_epochs + 1):train(epoch)test()# 打印最终测试集上的准确率
print("Final Test Accuracy: {:.2f}%".format(test()))

输出:

"D:\deep learn\envs\pytorch\python.exe" C:\Users\kk\PycharmProjects\pythonProject2\dataset\train\MNIST.py 
Train Epoch: 1 [0/60000 (0%)]	Loss: 2.309689
Train Epoch: 1 [4800/60000 (8%)]	Loss: 2.310969
Train Epoch: 1 [9600/60000 (16%)]	Loss: 2.305695
Train Epoch: 1 [14400/60000 (24%)]	Loss: 2.290891
Train Epoch: 1 [19200/60000 (32%)]	Loss: 2.284240
Train Epoch: 1 [24000/60000 (40%)]	Loss: 2.256572
Train Epoch: 1 [28800/60000 (48%)]	Loss: 2.258740
Train Epoch: 1 [33600/60000 (56%)]	Loss: 2.281724
Train Epoch: 1 [38400/60000 (64%)]	Loss: 2.259450
Train Epoch: 1 [43200/60000 (72%)]	Loss: 2.222388
Train Epoch: 1 [48000/60000 (80%)]	Loss: 2.193747
Train Epoch: 1 [52800/60000 (88%)]	Loss: 2.152849
Train Epoch: 1 [57600/60000 (96%)]	Loss: 2.134803Test set: Average loss: 0.0668, Accuracy: 2698/10000 (26.98%)Train Epoch: 2 [0/60000 (0%)]	Loss: 2.225060
Train Epoch: 2 [4800/60000 (8%)]	Loss: 2.149050
Train Epoch: 2 [9600/60000 (16%)]	Loss: 2.068803
Train Epoch: 2 [14400/60000 (24%)]	Loss: 2.028979
Train Epoch: 2 [19200/60000 (32%)]	Loss: 1.979294
Train Epoch: 2 [24000/60000 (40%)]	Loss: 1.915839
Train Epoch: 2 [28800/60000 (48%)]	Loss: 2.029020
Train Epoch: 2 [33600/60000 (56%)]	Loss: 1.741296
Train Epoch: 2 [38400/60000 (64%)]	Loss: 1.905341
Train Epoch: 2 [43200/60000 (72%)]	Loss: 1.622296
Train Epoch: 2 [48000/60000 (80%)]	Loss: 1.477951
Train Epoch: 2 [52800/60000 (88%)]	Loss: 1.222387
Train Epoch: 2 [57600/60000 (96%)]	Loss: 1.314141Test set: Average loss: 0.0373, Accuracy: 7225/10000 (72.25%)Train Epoch: 3 [0/60000 (0%)]	Loss: 1.157169
Train Epoch: 3 [4800/60000 (8%)]	Loss: 1.198444
Train Epoch: 3 [9600/60000 (16%)]	Loss: 0.888496
Train Epoch: 3 [14400/60000 (24%)]	Loss: 0.746717
Train Epoch: 3 [19200/60000 (32%)]	Loss: 0.708051
Train Epoch: 3 [24000/60000 (40%)]	Loss: 0.802987
Train Epoch: 3 [28800/60000 (48%)]	Loss: 0.748208
Train Epoch: 3 [33600/60000 (56%)]	Loss: 0.570825
Train Epoch: 3 [38400/60000 (64%)]	Loss: 0.842269
Train Epoch: 3 [43200/60000 (72%)]	Loss: 0.457923
Train Epoch: 3 [48000/60000 (80%)]	Loss: 0.469652
Train Epoch: 3 [52800/60000 (88%)]	Loss: 0.289702
Train Epoch: 3 [57600/60000 (96%)]	Loss: 0.435462Test set: Average loss: 0.0148, Accuracy: 8644/10000 (86.44%)Train Epoch: 4 [0/60000 (0%)]	Loss: 0.678779
Train Epoch: 4 [4800/60000 (8%)]	Loss: 0.653893
Train Epoch: 4 [9600/60000 (16%)]	Loss: 0.330618
Train Epoch: 4 [14400/60000 (24%)]	Loss: 0.517715
Train Epoch: 4 [19200/60000 (32%)]	Loss: 0.339571
Train Epoch: 4 [24000/60000 (40%)]	Loss: 0.250688
Train Epoch: 4 [28800/60000 (48%)]	Loss: 0.555737
Train Epoch: 4 [33600/60000 (56%)]	Loss: 0.485628
Train Epoch: 4 [38400/60000 (64%)]	Loss: 0.341829
Train Epoch: 4 [43200/60000 (72%)]	Loss: 0.294278
Train Epoch: 4 [48000/60000 (80%)]	Loss: 0.210282
Train Epoch: 4 [52800/60000 (88%)]	Loss: 0.418023
Train Epoch: 4 [57600/60000 (96%)]	Loss: 0.639594Test set: Average loss: 0.0114, Accuracy: 8923/10000 (89.23%)Train Epoch: 5 [0/60000 (0%)]	Loss: 0.075756
Train Epoch: 5 [4800/60000 (8%)]	Loss: 0.628859
Train Epoch: 5 [9600/60000 (16%)]	Loss: 0.268495
Train Epoch: 5 [14400/60000 (24%)]	Loss: 0.245660
Train Epoch: 5 [19200/60000 (32%)]	Loss: 0.157376
Train Epoch: 5 [24000/60000 (40%)]	Loss: 0.149876
Train Epoch: 5 [28800/60000 (48%)]	Loss: 0.259019
Train Epoch: 5 [33600/60000 (56%)]	Loss: 0.199592
Train Epoch: 5 [38400/60000 (64%)]	Loss: 0.534609
Train Epoch: 5 [43200/60000 (72%)]	Loss: 0.271385
Train Epoch: 5 [48000/60000 (80%)]	Loss: 0.226728
Train Epoch: 5 [52800/60000 (88%)]	Loss: 0.562785
Train Epoch: 5 [57600/60000 (96%)]	Loss: 0.609851Test set: Average loss: 0.0099, Accuracy: 9046/10000 (90.46%)Train Epoch: 6 [0/60000 (0%)]	Loss: 0.283710
Train Epoch: 6 [4800/60000 (8%)]	Loss: 0.288696
Train Epoch: 6 [9600/60000 (16%)]	Loss: 0.389604
Train Epoch: 6 [14400/60000 (24%)]	Loss: 0.192314
Train Epoch: 6 [19200/60000 (32%)]	Loss: 0.181009
Train Epoch: 6 [24000/60000 (40%)]	Loss: 0.258654
Train Epoch: 6 [28800/60000 (48%)]	Loss: 0.076739
Train Epoch: 6 [33600/60000 (56%)]	Loss: 0.391774
Train Epoch: 6 [38400/60000 (64%)]	Loss: 0.581220
Train Epoch: 6 [43200/60000 (72%)]	Loss: 0.308373
Train Epoch: 6 [48000/60000 (80%)]	Loss: 0.255795
Train Epoch: 6 [52800/60000 (88%)]	Loss: 0.123116
Train Epoch: 6 [57600/60000 (96%)]	Loss: 0.535722Test set: Average loss: 0.0089, Accuracy: 9151/10000 (91.51%)Train Epoch: 7 [0/60000 (0%)]	Loss: 0.195117
Train Epoch: 7 [4800/60000 (8%)]	Loss: 0.334070
Train Epoch: 7 [9600/60000 (16%)]	Loss: 0.426106
Train Epoch: 7 [14400/60000 (24%)]	Loss: 0.547939
Train Epoch: 7 [19200/60000 (32%)]	Loss: 0.621642
Train Epoch: 7 [24000/60000 (40%)]	Loss: 0.271703
Train Epoch: 7 [28800/60000 (48%)]	Loss: 0.427611
Train Epoch: 7 [33600/60000 (56%)]	Loss: 0.134861
Train Epoch: 7 [38400/60000 (64%)]	Loss: 0.331025
Train Epoch: 7 [43200/60000 (72%)]	Loss: 0.235256
Train Epoch: 7 [48000/60000 (80%)]	Loss: 0.293789
Train Epoch: 7 [52800/60000 (88%)]	Loss: 0.846908
Train Epoch: 7 [57600/60000 (96%)]	Loss: 0.340743Test set: Average loss: 0.0080, Accuracy: 9240/10000 (92.40%)Train Epoch: 8 [0/60000 (0%)]	Loss: 0.263807
Train Epoch: 8 [4800/60000 (8%)]	Loss: 0.145736
Train Epoch: 8 [9600/60000 (16%)]	Loss: 0.219392
Train Epoch: 8 [14400/60000 (24%)]	Loss: 0.200102
Train Epoch: 8 [19200/60000 (32%)]	Loss: 0.100795
Train Epoch: 8 [24000/60000 (40%)]	Loss: 0.318653
Train Epoch: 8 [28800/60000 (48%)]	Loss: 0.277175
Train Epoch: 8 [33600/60000 (56%)]	Loss: 0.117883
Train Epoch: 8 [38400/60000 (64%)]	Loss: 0.111346
Train Epoch: 8 [43200/60000 (72%)]	Loss: 0.292880
Train Epoch: 8 [48000/60000 (80%)]	Loss: 0.150133
Train Epoch: 8 [52800/60000 (88%)]	Loss: 0.177790
Train Epoch: 8 [57600/60000 (96%)]	Loss: 0.088824Test set: Average loss: 0.0073, Accuracy: 9299/10000 (92.99%)Train Epoch: 9 [0/60000 (0%)]	Loss: 0.201245
Train Epoch: 9 [4800/60000 (8%)]	Loss: 0.182352
Train Epoch: 9 [9600/60000 (16%)]	Loss: 0.124325
Train Epoch: 9 [14400/60000 (24%)]	Loss: 0.176092
Train Epoch: 9 [19200/60000 (32%)]	Loss: 0.217576
Train Epoch: 9 [24000/60000 (40%)]	Loss: 0.278557
Train Epoch: 9 [28800/60000 (48%)]	Loss: 0.227982
Train Epoch: 9 [33600/60000 (56%)]	Loss: 0.165331
Train Epoch: 9 [38400/60000 (64%)]	Loss: 0.096038
Train Epoch: 9 [43200/60000 (72%)]	Loss: 0.299993
Train Epoch: 9 [48000/60000 (80%)]	Loss: 0.281508
Train Epoch: 9 [52800/60000 (88%)]	Loss: 0.163592
Train Epoch: 9 [57600/60000 (96%)]	Loss: 0.297048Test set: Average loss: 0.0066, Accuracy: 9384/10000 (93.84%)Train Epoch: 10 [0/60000 (0%)]	Loss: 0.224364
Train Epoch: 10 [4800/60000 (8%)]	Loss: 0.126923
Train Epoch: 10 [9600/60000 (16%)]	Loss: 0.219346
Train Epoch: 10 [14400/60000 (24%)]	Loss: 0.247256
Train Epoch: 10 [19200/60000 (32%)]	Loss: 0.156729
Train Epoch: 10 [24000/60000 (40%)]	Loss: 0.060918
Train Epoch: 10 [28800/60000 (48%)]	Loss: 0.074432
Train Epoch: 10 [33600/60000 (56%)]	Loss: 0.064755
Train Epoch: 10 [38400/60000 (64%)]	Loss: 0.139279
Train Epoch: 10 [43200/60000 (72%)]	Loss: 0.245214
Train Epoch: 10 [48000/60000 (80%)]	Loss: 0.314279
Train Epoch: 10 [52800/60000 (88%)]	Loss: 0.150742
Train Epoch: 10 [57600/60000 (96%)]	Loss: 0.076270Test set: Average loss: 0.0060, Accuracy: 9422/10000 (94.22%)Test set: Average loss: 0.0060, Accuracy: 9422/10000 (94.22%)Final Test Accuracy: 94.22%进程已结束,退出代码为 0

Otto Group Product Classification Challenge:

import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset# 设定随机种子以保证结果可复现
torch.manual_seed(0)# 定义超参数
batch_size = 32
learning_rate = 0.001
num_epochs = 10# 1. 数据预处理
# 读取训练数据
train_df = pd.read_csv("C:/Users/kk/PycharmProjects/pythonProject/train.csv")# 分离特征和标签(只操作训练集)
X = train_df.drop(['id', 'target'], axis=1).values
y = train_df['target'].astype('category').cat.codes.values  # 转换为整数编码# 划分训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)# 标准化数据
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)# 转换为PyTorch张量
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.long)
X_val_tensor = torch.tensor(X_val, dtype=torch.float32)
y_val_tensor = torch.tensor(y_val, dtype=torch.long)# 创建数据加载器
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
val_dataset = TensorDataset(X_val_tensor, y_val_tensor)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)# 2. 构建模型
class Net(nn.Module):def __init__(self, input_dim):super(Net, self).__init__()self.fc1 = nn.Linear(input_dim, 256)  # 输入层到第一个隐藏层self.fc2 = nn.Linear(256, 128)  # 第一个隐藏层到第二个隐藏层self.fc3 = nn.Linear(128, 64)  # 第二个隐藏层到第三个隐藏层self.fc4 = nn.Linear(64, 32)  # 第三个隐藏层到第四个隐藏层self.fc5 = nn.Linear(32, 9)  # 第四个隐藏层到输出层self.dropout1 = nn.Dropout(p=0.5)  # 添加Dropout层self.dropout2 = nn.Dropout(p=0.5)  # 添加Dropout层self.dropout3 = nn.Dropout(p=0.5)  # 添加Dropout层self.dropout4 = nn.Dropout(p=0.5)  # 添加Dropout层def forward(self, x):x = torch.relu(self.fc1(x))  # 通过第一个全连接层和ReLU激活函数x = self.dropout1(x)  # 应用Dropoutx = torch.relu(self.fc2(x))  # 通过第二个全连接层和ReLU激活函数x = self.dropout2(x)  # 应用Dropoutx = torch.relu(self.fc3(x))  # 通过第三个全连接层和ReLU激活函数x = self.dropout3(x)  # 应用Dropoutx = torch.relu(self.fc4(x))  # 通过第四个全连接层和ReLU激活函数x = self.dropout4(x)  # 应用Dropoutx = self.fc5(x)  # 通过输出层return x# 创建模型实例
input_dim = X_train_tensor.shape[1]
model = Net(input_dim)# 3. 设置损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=learning_rate)# 4. 训练模型
def train(epoch):model.train()  # 设置模型为训练模式for batch_idx, (data, target) in enumerate(train_loader):optimizer.zero_grad()  # 清除梯度output = model(data)  # 前向传播loss = criterion(output, target)  # 计算损失loss.backward()  # 反向传播optimizer.step()  # 更新权重if batch_idx % 300 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))# 5. 测试模型
def test():model.eval()  # 设置模型为评估模式test_loss = 0correct = 0with torch.no_grad():  # 不需要计算梯度for data, target in val_loader:output = model(data)  # 前向传播,得到预测值(每个类别的概率)test_loss += criterion(output, target).item()  # 累加损失pred = output.argmax(dim=1, keepdim=True)  # 一个二维张量,形状为(batch_size, 1),其中每一行包含一个元素correct += pred.eq(target.view_as(pred)).sum().item()  # 统计正确数量test_loss /= len(val_loader.dataset)  # 平均损失accuracy = 100. * correct / len(val_loader.dataset)  # 准确率print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, len(val_loader.dataset), accuracy))# 开始训练
for epoch in range(1, num_epochs + 1):train(epoch)test()# 6. 处理测试数据
# 读取测试数据
test_df = pd.read_csv("C:/Users/kk/PycharmProjects/pythonProject/test.csv")# 分离特征,不用删target,因为本来就没有,把id删除就行,要纯特征
X_test = test_df.drop('id', axis=1).values# 标准化测试数据
X_test = scaler.transform(X_test)# 转换为PyTorch张量
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)# 7. 预测测试数据
model.eval()
with torch.no_grad():output = model(X_test_tensor)  # 前向传播,得到预测值(每个类别的概率)# 8. 提交结果
# 获取预测概率
probabilities = torch.softmax(output, dim=1).numpy()  # 将概率转换为0001000,类似于这样的结果# 创建提交DataFrame
submission = pd.DataFrame({'id': test_df['id'],'Class_1': probabilities[:, 0],'Class_2': probabilities[:, 1],'Class_3': probabilities[:, 2],'Class_4': probabilities[:, 3],'Class_5': probabilities[:, 4],'Class_6': probabilities[:, 5],'Class_7': probabilities[:, 6],'Class_8': probabilities[:, 7],'Class_9': probabilities[:, 8]
})# 保存提交文件
submission.to_csv('ottohaha.csv', index=False)# 打印完成信息
print("Submission1 file 'submission1.csv' has been created.")

输出:

"D:\deep learn\envs\pytorch\python.exe" C:\Users\kk\PycharmProjects\pythonProject2\dataset\train\dogcat\train.py 
Train Epoch: 1 [0/49502 (0%)]	Loss: 2.219477
Train Epoch: 1 [9600/49502 (19%)]	Loss: 1.185523
Train Epoch: 1 [19200/49502 (39%)]	Loss: 0.827805
Train Epoch: 1 [28800/49502 (58%)]	Loss: 0.812111
Train Epoch: 1 [38400/49502 (78%)]	Loss: 0.678358
Train Epoch: 1 [48000/49502 (97%)]	Loss: 0.602139Validation set: Average loss: 0.0233, Accuracy: 9029/12376 (72.96%)Train Epoch: 2 [0/49502 (0%)]	Loss: 0.587927
Train Epoch: 2 [9600/49502 (19%)]	Loss: 0.894121
Train Epoch: 2 [19200/49502 (39%)]	Loss: 0.926607
Train Epoch: 2 [28800/49502 (58%)]	Loss: 0.752447
Train Epoch: 2 [38400/49502 (78%)]	Loss: 0.563700
Train Epoch: 2 [48000/49502 (97%)]	Loss: 0.749053Validation set: Average loss: 0.0213, Accuracy: 9133/12376 (73.80%)Train Epoch: 3 [0/49502 (0%)]	Loss: 0.943063
Train Epoch: 3 [9600/49502 (19%)]	Loss: 0.886904
Train Epoch: 3 [19200/49502 (39%)]	Loss: 0.726560
Train Epoch: 3 [28800/49502 (58%)]	Loss: 0.659948
Train Epoch: 3 [38400/49502 (78%)]	Loss: 0.745690
Train Epoch: 3 [48000/49502 (97%)]	Loss: 0.529640Validation set: Average loss: 0.0209, Accuracy: 9178/12376 (74.16%)Train Epoch: 4 [0/49502 (0%)]	Loss: 1.031278
Train Epoch: 4 [9600/49502 (19%)]	Loss: 0.561578
Train Epoch: 4 [19200/49502 (39%)]	Loss: 0.948739
Train Epoch: 4 [28800/49502 (58%)]	Loss: 0.688323
Train Epoch: 4 [38400/49502 (78%)]	Loss: 0.588122
Train Epoch: 4 [48000/49502 (97%)]	Loss: 0.772149Validation set: Average loss: 0.0202, Accuracy: 9301/12376 (75.15%)Train Epoch: 5 [0/49502 (0%)]	Loss: 0.599225
Train Epoch: 5 [9600/49502 (19%)]	Loss: 0.706988
Train Epoch: 5 [19200/49502 (39%)]	Loss: 0.737061
Train Epoch: 5 [28800/49502 (58%)]	Loss: 0.830560
Train Epoch: 5 [38400/49502 (78%)]	Loss: 0.694044
Train Epoch: 5 [48000/49502 (97%)]	Loss: 0.556151Validation set: Average loss: 0.0199, Accuracy: 9355/12376 (75.59%)Train Epoch: 6 [0/49502 (0%)]	Loss: 0.393021
Train Epoch: 6 [9600/49502 (19%)]	Loss: 0.736266
Train Epoch: 6 [19200/49502 (39%)]	Loss: 0.893262
Train Epoch: 6 [28800/49502 (58%)]	Loss: 0.800717
Train Epoch: 6 [38400/49502 (78%)]	Loss: 0.747830
Train Epoch: 6 [48000/49502 (97%)]	Loss: 0.695366Validation set: Average loss: 0.0200, Accuracy: 9292/12376 (75.08%)Train Epoch: 7 [0/49502 (0%)]	Loss: 1.168882
Train Epoch: 7 [9600/49502 (19%)]	Loss: 0.769432
Train Epoch: 7 [19200/49502 (39%)]	Loss: 0.561796
Train Epoch: 7 [28800/49502 (58%)]	Loss: 0.564427
Train Epoch: 7 [38400/49502 (78%)]	Loss: 0.608078
Train Epoch: 7 [48000/49502 (97%)]	Loss: 0.935349Validation set: Average loss: 0.0193, Accuracy: 9414/12376 (76.07%)Train Epoch: 8 [0/49502 (0%)]	Loss: 0.671977
Train Epoch: 8 [9600/49502 (19%)]	Loss: 0.751692
Train Epoch: 8 [19200/49502 (39%)]	Loss: 0.873173
Train Epoch: 8 [28800/49502 (58%)]	Loss: 0.727809
Train Epoch: 8 [38400/49502 (78%)]	Loss: 0.514163
Train Epoch: 8 [48000/49502 (97%)]	Loss: 0.611271Validation set: Average loss: 0.0193, Accuracy: 9405/12376 (75.99%)Train Epoch: 9 [0/49502 (0%)]	Loss: 0.487758
Train Epoch: 9 [9600/49502 (19%)]	Loss: 1.141270
Train Epoch: 9 [19200/49502 (39%)]	Loss: 0.712523
Train Epoch: 9 [28800/49502 (58%)]	Loss: 0.570099
Train Epoch: 9 [38400/49502 (78%)]	Loss: 0.683906
Train Epoch: 9 [48000/49502 (97%)]	Loss: 0.772561Validation set: Average loss: 0.0189, Accuracy: 9443/12376 (76.30%)Train Epoch: 10 [0/49502 (0%)]	Loss: 0.647482
Train Epoch: 10 [9600/49502 (19%)]	Loss: 0.729250
Train Epoch: 10 [19200/49502 (39%)]	Loss: 0.622458
Train Epoch: 10 [28800/49502 (58%)]	Loss: 0.719655
Train Epoch: 10 [38400/49502 (78%)]	Loss: 0.520825
Train Epoch: 10 [48000/49502 (97%)]	Loss: 0.653330Validation set: Average loss: 0.0189, Accuracy: 9520/12376 (76.92%)Submission1 file 'submission1.csv' has been created.进程已结束,退出代码为 0

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/pingmian/51869.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

机器学习第十一章--特征选择与稀疏学习

一、子集搜索与评价 我们将属性称为 “特征”(feature),对当前学习任务有用的属性称为 “相关特征”(relevant feature)、没什么用的属性称为 “无关特征”(irrelevant feature).从给定的特征集合中选择出相关特征子集的过程&…

C++竞赛初阶L1-13-第五单元-循环嵌套(29~30课)535: T456454 数字统计

题目内容 请统计某个给定范围 [L,R] 的所有整数中,数字 2 出现的次数。 比如给定范围 [2,22],数字 2 在数 2 中出现了 1 次,在数 12 中出现 1 次,在数 20 中出现 1 次,在数 21 中出现 1 次,在数 22 中出现…

【开发语言】编译型语言和解释性语言有啥区别?

作为一名从业多年的程序员,对于编译型语言和解释型语言之间的区别有着深入的理解。这两种类型的编程语言在将源代码转换成可执行代码的过程中采用了不同的机制,这导致了它们在执行效率、跨平台性、安全性以及开发效率等方面存在一些差异。 编译型语言(Compiled Languages)…

Java项目集成RocketMQ

文章目录 1.调整MQ的配置1.进入bin目录2.关闭broker和namesrv3.查看进程确认关闭4.编辑配置文件broker.conf,配置brokerIP15.开放端口109116.重新启动1.进入bin目录2.启动mqnamesrv和mqbroker1.启动 NameServer 并将输出重定向到 mqnamesrv.log2.**启动 Broker 并将…

easyexcel--导入导出实现自定义格式转换

自定义格式 我们在数据库设计的时候经常会有枚举类型,如0表示普通用户,1表示VIP用户等,这在excel导入的时候,我们会填普通用户而不是0,这样就需要用到自定义格式把普通用户转换成0,我写了一个通用的抽象类…

深度学习基础—RMSprop算法与Adam 优化算法

1.RMSprop算法 1.1.算法流程 除了动量梯度下降法,RMSprop算法也可以加快梯度下降,这个算法的算法流程如下:深度学习基础—动量梯度下降法http://t.csdnimg.cn/zeGRo 1.2.算法原理 和动量梯度下降不同的是,对dW和db的变成了平方项…

什么是视频比特率?与视频时长是什么关系

​ ‌比特率是指单位时间内传输或处理的比特的数量,单位为‌bps(‌bit per second)。‌ 比特率经常用于描述在电信和计算领域中数据传输的速度,也可以作为衡量音频和视频文件数据率的指标。比特率越高,传送的数据越大,音频或视频…

springsecurity 登录认证一(ajax)

一、准备工作 1.1 导入依赖 因springboot 3.0 以上版本只能支持java17 顾使用2.5.0 版本 <parent><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-parent</artifactId><version>2.5.0</version><…

代码随想录 day 42 动态规划 买卖股票

第九章 动态规划part09 188.买卖股票的最佳时机IV 本题是123.买卖股票的最佳时机III 的进阶版 视频讲解&#xff1a;https://www.bilibili.com/video/BV16M411U7XJ https://programmercarl.com/0188.%E4%B9%B0%E5%8D%96%E8%82%A1%E7%A5%A8%E7%9A%84%E6%9C%80%E4%BD%B3%E6%97%…

鸿蒙开发入门day05-ArkTs语言(接口与关键字)

(创作不易&#xff0c;感谢有你&#xff0c;你的支持&#xff0c;就是我前行的最大动力&#xff0c;如果看完对你有帮助&#xff0c;还请三连支持一波哇ヾ(&#xff20;^∇^&#xff20;)ノ&#xff09; 目录 ArkTS语言介绍 接口 接口属性 接口继承 泛型类型和函数 泛型…

R语言统计分析——回归诊断2

参考资料&#xff1a;R语言实战【第2版】 R语言的car包提供的大量函数&#xff0c;大大增强了拟合和评价回归模型的能力。如下&#xff1a; 函数目的qqPlot()分位数比较图durbinWatsonTest()对误差自相关性做Durbin-Watson检验crPlots()成分与残差图ncvTest()对非恒定的误差方…

transformer-explainer

安装和启动 找到这个项目&#xff0c;然后装好了。 这个项目的目的如名字。 https://github.com/poloclub/transformer-explainerTransformer Explained: Learn How LLM Transformer Models Work with Interactive Visualization - poloclub/transformer-explainerhttps:/…

顶顶通呼叫中心中间件-一句话识别语音识别安装步骤

顶顶通呼叫中心中间件-一句话模型安装步骤&#xff0c;对接mod_vad。一句话识别&#xff08;http接口提交录音文件识别&#xff09; 一、安装一句话模型 一句话识别&#xff08;http接口提交录音文件识别&#xff09;&#xff0c;比如对接mod_vad(老电话机器人接口) curl -s…

web开发,过滤器,前后端交互

目录 web开发概述 web开发环境搭建 Servlet概述 Servlet的作用&#xff1a; Servlet创建和使用 Servlet生命周期 http请求 过滤器 过滤器的使用场景&#xff1a; 通过Filter接口来实现&#xff1a; 前后端项目之间的交互&#xff1a; 1、同步请求 2、异步请求 优化…

docker-compose安装MongoDB 7.0.12

文章目录 一. Mac1.1 创建目录1.2 docker-compose.yaml默认不开启relSet开启relSet&#xff08;数据同步&#xff09; 1.3 部署1.4 卸载1.5 replSet配置1.5.1 初始化replSet1.5.2 创建超管用户1.5.3 验证用户1.5.4 查看replSet状态 二. Centos72.1 创建目录2.2 docker-compose.…

JZ51 数组中的逆序对

数组中的逆序对_牛客题霸_牛客网 描述 在数组中的两个数字&#xff0c;如果前面一个数字大于后面的数字&#xff0c;则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数P。并将P对1000000007取模的结果输出。 即输出P mod 1000000007 数据范围&#xff…

Xv6虚拟内存(三):进程地址空间

阅读材料 Xv6代码&#xff1a;memlayout.h、proc.h、proc.c教材3.6节 进程地址空间初始化 proc_pagetable函数 该函数用于初始化一个用户进程的地址空间&#xff0c;返回该地址空间的根页表基地址。该函数只干两件事&#xff1a;映射trampoline页到最高虚拟地址处&#xff0…

使用 AWS EKS 部署 Easysearch

随着企业对数据搜索和分析需求的增加&#xff0c;高效的搜索引擎解决方案变得越来越重要。Easysearch 作为一款强大的企业级搜索引擎&#xff0c;可以帮助企业快速构建高性能、可扩展的数据检索系统。在云计算的背景下&#xff0c;使用容器化技术来部署和管理这些解决方案已经成…

【软件测试】功能测试理论基础

目录 项目的测试流程&#x1f3f4; 需求评审 评审形式 测试人员在需求评审中职责 测试计划与方案 测试计划 问题 测试方案&#x1f3f4; 测试计划与方案的对比 功能测试设计&#x1f3f4; 测试设计的步骤 项目的测试流程&#x1f3f4; 作用&#xff1a; 有序有效开展…

Flink消费Kafka数据积压排查解决

0、背景 有个Flink任务每天不定时会出现数据积压&#xff0c;无论是白天还是数据量很少的夜里&#xff0c;且积压的数据量会越来越多&#xff0c;得不到缓解&#xff0c;只能每日在积压告警后重启&#xff0c;重启之后消费能力一点毛病没有&#xff0c;积压迅速缓解&#xff0…