TensorRT推理端到端
- 1.参考链接
- 2.宿主机上安装CUDA 12.4.1
- 3.安装nvidia-container-toolkit
- 4.创建ghcr.io/intel/llvm/ubuntu2204_base容器
- 5.容器内安装CUDA 12.4.1 + TensorRT10.1.0
- 6.安装依赖
- 7.准备resnet50模型
- 8.准备bert模型
- 9.准备yolov5m模型
- 10.编译TensorRT推理程序
- 11.onnx模型CPU推理,生成输出、输出数据对照数据
- 12.TensorRT C++推理跟CPU输出对比MSE
- 13.清理
本文演示TensorRT推理端到端
主要内容
- 生成onnx模型
- onnxruntime cpu推理,保存输入、输出做为对照参考文件
- TensorRT C++ API推理,跟对照文件计算MSE
1.参考链接
- TensorRT 10.5.0 Installation Guide
2.宿主机上安装CUDA 12.4.1
wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run
sudo apt-get --purge -y remove 'nvidia*'
bash cuda_12.4.1_550.54.15_linux.run
3.安装nvidia-container-toolkit
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo systemctl restart docker
4.创建ghcr.io/intel/llvm/ubuntu2204_base容器
docker stop ai_model_dev
docker rm ai_model_dev
docker run --gpus all --shm-size=32g -ti -e NVIDIA_VISIBLE_DEVICES=all --privileged --net=host --name ai_model_dev -it -v $PWD:/home -w /home ghcr.io/intel/llvm/ubuntu2204_base /bin/bash
docker start ai_model_dev
docker exec -ti ai_model_dev /bin/bash
设置代理[可选]
export proxy="http://192.168.30.26:808"
export http_proxy=$proxy
export https_proxy=$proxy
5.容器内安装CUDA 12.4.1 + TensorRT10.1.0
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
sudo mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb
sudo dpkg -i cuda-repo-ubuntu2204-12-4-local_12.4.1-550.54.15-1_amd64.deb
sudo cp /var/cuda-repo-ubuntu2204-12-4-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get update
sudo apt-get -y install cuda-toolkit-12-4wget https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.1.0/local_repo/nv-tensorrt-local-repo-ubuntu2204-10.1.0-cuda-12.4_1.0-1_amd64.deb
dpkg -i nv-tensorrt-local-repo-ubuntu2204-10.1.0-cuda-12.4_1.0-1_amd64.deb
dpkg -i /var/nv-tensorrt-local-repo-ubuntu2204-10.1.0-cuda-12.4/*.deb
6.安装依赖
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple/
pip install requests pillow onnx transformers onnxruntime
wget https://download.pytorch.org/whl/cpu/torch-2.3.1%2Bcpu-cp310-cp310-linux_x86_64.whl
pip install torch-2.3.1+cpu-cp310-cp310-linux_x86_64.whl
wget https://download.pytorch.org/whl/cpu/torchvision-0.18.1%2Bcpu-cp310-cp310-linux_x86_64.whl
pip install torchvision-0.18.1+cpu-cp310-cp310-linux_x86_64.whl# 编译支持cuda的opencv【可选】
git clone -b 4.x https://github.com/opencv/opencv_contrib
git clone -b 4.x https://github.com/opencv/opencvcd opencv
rm build -rf
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=Release \-D CMAKE_INSTALL_PREFIX=/usr/local \-D OPENCV_EXTRA_MODULES_PATH=/home/opencv_contrib/modules/ \-D WITH_CUDA=ON \-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda \-D OPENCV_ENABLE_NONFREE=ON \-D BUILD_opencv_python3=ON \-D WITH_TBB=ON \-D BUILD_NEWP=ON \-D BUILD_EXAMPLES=OFF ..
make -j
7.准备resnet50模型
import requests
from PIL import Image
from io import BytesIO
import torchvision.transforms as transforms
import torch
import torchvision.models as models# 读取图片
image = Image.open("YellowLabradorLooking_new.jpg")# 定义预处理流程
preprocess = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])# 应用预处理
img_t = preprocess(image)
input_tensor = torch.unsqueeze(img_t, 0)
print("Image downloaded and preprocessed successfully.")#with open('resnet50_input.bin', 'wb') as f:
# f.write(input_tensor.numpy().tobytes())# 加载预训练的ResNet50模型
model = models.resnet50(pretrained=True)
model.eval() # 将模型设为评估模式# 执行前向推理
with torch.no_grad():output = model(input_tensor)#with open('resnet50_output.bin', 'wb') as f:
# f.write(output.numpy().tobytes())# 获取预测结果
predicted = torch.argmax(output, 1)# 加载ImageNet的类别索引
with open("imagenet_classes.txt") as f:idx_to_class = [line.strip() for line in f.readlines()]# 输出预测的类别名
predicted_class = idx_to_class[predicted]
print(f"Index:{predicted} Predicted class: {predicted_class}")input_names = ["input"]
output_names = ["output"]
torch.onnx.export(model, input_tensor, "resnet50.onnx", verbose=False, input_names=input_names, output_names=output_names)
python resnet50.py
8.准备bert模型
import torch
from transformers import BertTokenizer, BertModel# 1. 定义仅返回 pooler_output 的自定义模型
class BertPoolerOutputModel(torch.nn.Module):def __init__(self):super(BertPoolerOutputModel, self).__init__()self.bert = BertModel.from_pretrained('bert-base-uncased')def forward(self, input_ids, attention_mask=None, token_type_ids=None):# 获取 BERT 模型的输出outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)# 仅返回 pooler_outputpooler_output = outputs.pooler_output # [batch_size, hidden_size]return pooler_output# 2. 实例化自定义模型和分词器
model = BertPoolerOutputModel()# 加载模型和 tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertPoolerOutputModel()
model.eval()# 准备输入数据
text = "Hello, my dog is cute"
inputs = tokenizer(text, return_tensors="pt",max_length=512, truncation=True, padding='max_length')
print(inputs['input_ids'].shape)'''
with open('bert-base-uncased-input_ids.bin', 'wb') as f:f.write(inputs['input_ids'].numpy().tobytes())with open('bert-base-uncased-attention_mask.bin', 'wb') as f:f.write(inputs['attention_mask'].numpy().tobytes())
'''output=model(inputs['input_ids'], inputs['attention_mask'])
print(output.shape)
'''
with open('bert-base-uncased-output.bin', 'wb') as f:f.write(output.detach().numpy().tobytes())
'''
# 导出为 ONNX 格式
torch.onnx.export(model,(inputs['input_ids'], inputs['attention_mask']),"bert-base.onnx",export_params=True,opset_version=14,do_constant_folding=True,input_names=['input_ids', 'attention_mask'],output_names=['pooler_output']
)
python bert_base.py
9.准备yolov5m模型
rm yolov5 -rf
git clone https://github.com/ultralytics/yolov5.git
cd yolov5
pip install -r requirements.txt
wget https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt
python export.py --weights yolov5m.pt --include onnx --img 640
mv yolov5m.onnx ../
cd ..
10.编译TensorRT推理程序
#include <iostream>
#include <fstream>
#include <cassert>
#include <string>
#include <vector>
#include <map>
#include <functional>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <NvOnnxParser.h>/*** @brief 自定义日志器类,用于记录 TensorRT 的日志信息*/
class Logger : public nvinfer1::ILogger {
public:/*** @brief 实现日志记录函数* @param severity 日志级别* @param msg 日志信息*/void log(Severity severity, const char* msg) noexcept override {// 只记录警告及以上级别的日志if (severity <= Severity::kWARNING) {std::cout << msg << std::endl;}}
};// 全局日志器实例
static Logger gLogger;/*** @brief 计算数据的哈希值,用于缓存模型* @param data 数据指针* @param size 数据大小* @return size_t 哈希值*/
size_t computeHash(const void* data, std::size_t si