1. 新建 cmake 工程 vs2022_cmake_sampleOnnxMNIST_test( 如何新建 cmake 工程,请参考博客:Visual Studio 2022新建 cmake 工程测试 opencv helloworld )
2. 删除默认生成的 vs2022_cmake_sampleOnnxMNIST_test.h 头文件
3. 修改默认生成的 vs2022_cmake_sampleOnnxMNIST_test.cpp 文件,将内容替换为目录:
D:\install\tensorRT\TensorRT-8.6.1.6\samples\sampleOnnxMNIST
下的 sampleOnnxMNIST.cpp 文件的内容。
4. 发现 D:\install\tensorRT\TensorRT-8.6.1.6\samples\sampleOnnxMNIST 中的 sample_onnx_mnist.vcxproj 文件中有如下内容:
<ItemGroup><ClCompile Include="sampleOnnxMNIST.cpp" /><ClCompile Include="../common/getopt.c" /><ClCompile Include="../common/logger.cpp" /></ItemGroup>
于是将:
D:\install\tensorRT\TensorRT-8.6.1.6\samples\common
下的 getopt.c和 logger.cpp文件复制到该工程中( 即 vs2022_cmake_sampleOnnxMNIST_test.cpp 所在的目录 ),并修改 CMakeLists.txt 中的 add_executable 指令,修改为如下所示:
add_executable("vs2022_cmake_sampleOnnxMNIST_test" "vs2022_cmake_sampleOnnxMNIST_test.cpp" "getopt.c" "logger.cpp")
5. 用 Everything 查找 D:\install\tensorRT\TensorRT-8.6.1.6\samples\sampleOnnxMNIST 中的 sample_onnx_mnist.vcxproj 文件中的 标签 <AdditionalDependencies> 中的 :
nvinfer.lib;
nvinfer_plugin.lib;
nvonnxparser.lib;
nvparsers.lib;
cudnn.lib;
cublas.lib;
cudart.lib;
这几个文件所在的目录,发现都是在:
C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/
目录中,于是修改 CMakeLists.txt,在 add_executable 指令后面新加:
target_link_libraries("vs2022_cmake_sampleOnnxMNIST_test" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvinfer.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvinfer_plugin.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvonnxparser.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvparsers.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cudnn.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cublas.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cudart.lib")
指令用来连接库cuda( 还是 tensorRT的??? )的库文件。
6. 发现 D:\install\tensorRT\TensorRT-8.6.1.6\samples\sampleOnnxMNIST\sample_onnx_mnist.vcxproj 文件中的 <AdditionalIncludeDirectories > 标签中有如下内容:
..\..\include;
..\common;
..\common\windows;
$(CUDA_PATH)\include;
于是在 CMakeLists.txt 中加入:
include_directories( "D:/install/tensorRT/TensorRT-8.6.1.6/samples/common" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/include" )
用来加入头文件。
ps:"..\..\include" 表示的是 D:\install\tensorRT\TensorRT-8.6.1.6\include,为啥没有将其加入到 include_directories 指令中呢?因为安装 tensorRT 的时候已经将该目录下的头文件复制到:
C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/include
中了。
7. 最终的 CMakeLists.txt 如下所示:
cmake_minimum_required (VERSION 3.8)add_compile_options( "$<$<C_COMPILER_ID:MSVC>:/utf-8>" )
add_compile_options( "$<$<CXX_COMPILER_ID:MSVC>:/utf-8>" )# 如果支持,请为 MSVC 编译器启用热重载。
if (POLICY CMP0141)cmake_policy(SET CMP0141 NEW)set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT "$<IF:$<AND:$<C_COMPILER_ID:MSVC>,$<CXX_COMPILER_ID:MSVC>>,$<$<CONFIG:Debug,RelWithDebInfo>:EditAndContinue>,$<$<CONFIG:Debug,RelWithDebInfo>:ProgramDatabase>>")
endif()project("vs2022_cmake_sampleOnnxMNIST_test")#set( tensorrt_libs kernel32.lib
# user32.lib
# gdi32.lib
# winspool.lib
# comdlg32.lib
# advapi32.lib
# shell32.lib
# ole32.lib
# oleaut32.lib
# uuid.lib
# odbc32.lib
# odbccp32.lib #nvinfer #nvinfer_plugin.lib#nvonnxparser.lib#nvparsers.lib#cudnn.lib#cublas.lib#cudart.lib
# )include_directories( "D:/install/tensorRT/TensorRT-8.6.1.6/samples/common" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/include" )# 将源代码添加到此项目的可执行文件。
add_executable("vs2022_cmake_sampleOnnxMNIST_test" "vs2022_cmake_sampleOnnxMNIST_test.cpp" "getopt.c" "logger.cpp")# link_directories( #"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64" #"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.26100.0/um/x64"#"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.26100.0/ucrt/x64"#"D:/install/VisualStudio2022_comm/VC/Tools/MSVC/14.40.33807/lib/x64"#"C:/Program Files (x86)/Windows Kits/NETFXSDK/4.8/Lib/um/x64"
# )
target_link_libraries("vs2022_cmake_sampleOnnxMNIST_test" "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvinfer.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvinfer_plugin.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvonnxparser.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/nvparsers.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cudnn.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cublas.lib""C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.6/lib/x64/cudart.lib")if (CMAKE_VERSION VERSION_GREATER 3.12)set_property(TARGET vs2022_cmake_sampleOnnxMNIST_test PROPERTY CXX_STANDARD 20)
endif()
8. 编译、执行成功: