使用卷积神经网络进行模型构建,代码如下:
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, random_split# 读取数据,并跳过标题行
df = pd.read_csv('train.csv', header=None, skiprows=1)# 数据预处理
labels = df.iloc[:, 0].values.astype(int)
pixels = df.iloc[:, 1:].values.astype(float)# 重塑数据并归一化像素值
pixels = pixels.reshape(-1, 28, 28).astype('float32') / 255.0# 自定义Dataset类
class HandwrittenDigitsDataset(Dataset):def __init__(self, images, labels=None):self.images = torch.tensor(images, dtype=torch.float32)if labels is not None:self.labels = torch.tensor(labels, dtype=torch.long)else:self.labels = Nonedef __len__(self):return len(self.images)def __getitem__(self, idx):image = self.images[idx].unsqueeze(0) # 加入通道维度if self.labels is not None:label = self.labels[idx]return image, labelelse:return image# 创建Dataset对象
dataset = HandwrittenDigitsDataset(pixels, labels)# 拆分数据集
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])# 创建DataLoader
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)# 定义CNN模型
class CNNModel(nn.Module):def __init__(self):super(CNNModel, self).__init__()self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)self.pool = nn.MaxPool2d(kernel_size=2, stride=2)self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)self.fc1 = nn.Linear(64 * 7 * 7, 128)self.fc2 = nn.Linear(128, 10)def forward(self, x):x = self.pool(F.relu(self.conv1(x)))x = self.pool(F.relu(self.conv2(x)))x = x.view(-1, 64 * 7 * 7)x = F.relu(self.fc1(x))x = self.fc2(x)return x# 实例化模型
model = CNNModel()# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)# 训练模型
num_epochs = 10
for epoch in range(num_epochs):model.train()running_loss = 0.0for images, labels in train_loader:optimizer.zero_grad()outputs = model(images)loss = criterion(outputs, labels)loss.backward()optimizer.step()running_loss += loss.item()print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(train_loader):.4f}')# 评估模型
model.eval()
correct = 0
total = 0
with torch.no_grad():for images, labels in test_loader:outputs = model(images)_, predicted = torch.max(outputs.data, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f'Accuracy of the model on the test set: {100 * correct / total:.2f}%')# 读取预测数据,并跳过标题行
predict_df = pd.read_csv('test.csv', header=None, skiprows=1)# 数据预处理
predict_pixels = predict_df.values.astype(float)# 重塑数据并归一化像素值
predict_pixels = predict_pixels.reshape(-1, 28, 28).astype('float32') / 255.0# 创建预测Dataset对象
predict_dataset = HandwrittenDigitsDataset(predict_pixels)# 创建DataLoader
predict_loader = DataLoader(predict_dataset, batch_size=64, shuffle=False)# 进行预测
predicted_labels = []
model.eval()
with torch.no_grad():for images in predict_loader:outputs = model(images)_, predicted = torch.max(outputs.data, 1)predicted_labels.extend(predicted.numpy())# 将预测结果添加回原始数据
predict_df['label'] = predicted_labels# print(predict_df.loc[0])
predict_df['ImageId'] = [i for i in range(1,len(predict_df) + 1)]
predict_df['Label'] = predict_df['label']predict_df = predict_df[['ImageId','Label']]
print(predict_df.loc[0])predict_df.to_csv("ans.csv",index=False)