CMakeLists.txt
cmake_minimum_required(VERSION 3.0) # CMake最低版本要求,低于2.6的构建过程会被终止set(CMAKE_CXX_STANDARD 14) #opencv4以上加此句
set(CMAKE_CXX_STANDARD 14)
project(cmake_test) # 定义工程名称
find_package(OpenCV REQUIRED)
# find_package(PCL REQUIRED)
find_package(CUDA REQUIRED)
find_package(TensorRT REQUIRED)
include_directories(${PROJECT_SOURCE_DIR}/include# ${PCL_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS} ${TensorRT_INCLUDE_DIRS}${TensorRT_INCLUDE_DIRS}/../samples/common)
add_executable(infer src/main.cpp)target_link_libraries( infer ${OpenCV_LIBS} # ${PCL_LIBRARIES} ${CUDA_LIBRARIES} ${TensorRT_LIBS}) # 链接库
infer.h
#include <iostream>
#include <vector>
#include <list>
using namespace std;string names[] = {"xxxxx"}; // 修改自己的标签struct BOX
{float x;float y;float width;float height;
};struct Object
{BOX box;int label;float confidence;
};bool cmp(Object &obj1, Object &obj2){return obj1.confidence > obj2.confidence;
}vector<list<Object>> NMS(std::vector<Object> objs, float iou_thres = 0.45){//第一步:将所有矩形框按照不同的类别标签分组,组内按照置信度高低得分进行排序;list<Object> obj_l;vector<list<Object>> NMS_List;int a = 0;for(int i = 0; i < 1; i++){for(auto j : objs){if(j.label == i){obj_l.push_back(j);obj_l.sort(cmp); //依据置信度升序排序a = 1;}}if(a == 1){NMS_List.push_back(std::move(obj_l));a = 0;}}//第二步:计算IOUfloat x1, y1, x1_w, y1_h,x2, y2, x2_w, y2_h;float x_box, y_box, x_w_box, y_h_box, w_box, h_box;float S1,S2,SBOX,res_iou;int row = NMS_List.size(); //行数 列数:NMS_List[0].size()int tmp;for(int i = 0; i < row ; i++) //不同分类的循环{tmp = 0;list<Object>::iterator it = NMS_List[i].begin();while(it != --NMS_List[i].end()){x1 = it->box.x;y1 = it->box.y;x1_w = x1 + it->box.width;y1_h = y1 + it->box.height;while(it != --NMS_List[i].end()){it++;x2 = it->box.x;y2 = it->box.y;x2_w = x2 + it->box.width;y2_h = y2 + it->box.height;//交集左上角坐标x_box,y_box 框1-x1和框2-x2的最大值 框1-y1和框2-y2的最大值x_box = std::max(x1, x2);y_box = std::max(y1, y2);//交集右下角坐标x_w_box,y_h_box 框1-x1_w和框2-x2_w的最小值 框1-y1_h和框2-y2_h的最小值x_w_box = std::min(x1_w, x2_w);y_h_box = std::min(y1_h, y2_h);//交集框宽高w_box = x_w_box - x_box;h_box = y_h_box - y_box;//无交集情况if(w_box <= 0 || h_box <= 0){it = NMS_List[i].erase(it);if(it == NMS_List[i].end()){break;}it--;continue;}//有交集,计算IOUS1 = (x1_w - x1) * (y1_h - y1);S2 = (x2_w - x2) * (y2_h - y2);SBOX = w_box * h_box;if((res_iou = SBOX / (S1 + S2 - SBOX)) > iou_thres){it = NMS_List[i].erase(it);if(it == NMS_List[i].end()){break;}it--;}}it = NMS_List[i].begin();if(it == --NMS_List[i].end()){break;}tmp++;for(int z = 0; z < tmp; z++){it++;if(it == --NMS_List[i].end()){break;}}}}return NMS_List;
}
main.cpp
#include<fstream>
#include<iostream>
#include<opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
#include <iomanip> //保留小数#include "NvInfer.h"
#include "logging.h"#include "infer.h"using namespace std;
using namespace nvinfer1;class MyLogger : public nvinfer1::ILogger {public:explicit MyLogger(nvinfer1::ILogger::Severity severity =nvinfer1::ILogger::Severity::kWARNING): severity_(severity) {}void log(nvinfer1::ILogger::Severity severity,const char *msg) noexcept override {if (severity <= severity_) {std::cerr << msg << std::endl;}}nvinfer1::ILogger::Severity severity_;
};int main()
{//一、图像处理const int model_width = 640;const int model_height = 640;string image_path = "../data/12.png"; //填写自己图片路径float* input_blob = new float[model_height * model_width * 3];cv::Mat input_image = cv::imread(image_path);cv::Mat resize_image;const float ratio = std::min(model_width / (input_image.cols * 1.0f),model_height / (input_image.rows * 1.0f));// 等比例缩放const int border_width = input_image.cols * ratio;const int border_height = input_image.rows * ratio;// 计算偏移值const int x_offset = (model_width - border_width) / 2;const int y_offset = (model_height - border_height) / 2;//将输入图像缩放至resize_imagecv::resize(input_image, resize_image, cv::Size(border_width, border_height));//复制图像并且制作边界cv::copyMakeBorder(resize_image, resize_image, y_offset, y_offset, x_offset,x_offset, cv::BORDER_CONSTANT, cv::Scalar(114, 114, 114));// 转换为RGB格式cv::cvtColor(resize_image, resize_image, cv::COLOR_BGR2RGB);// cv::imshow("12", resize_image);// cv::waitKey(0);//归一化const int channels = resize_image.channels();const int width = resize_image.cols;const int height = resize_image.rows;for (int c = 0; c < channels; c++) {for (int h = 0; h < height; h++) {for (int w = 0; w < width; w++) {input_blob[c * width * height + h * width + w] =resize_image.at<cv::Vec3b>(h, w)[c] / 255.0f; //at<Vec3b> 是 OpenCV 中用于访问图像像素的一种方法,使用 at<Vec3b> 获取彩色图像中特定位置的像素颜色值}}}//二、模型反序列化MyLogger logger;//读取trt信息const std::string engine_file_path = "../data/best.engine"; //填写自己trt文件路径std::stringstream engine_file_stream;engine_file_stream.seekg(0, engine_file_stream.beg); //从起始位置偏移0个字节,指针移动到文件流的开头std::ifstream ifs(engine_file_path);engine_file_stream << ifs.rdbuf();ifs.close();engine_file_stream.seekg(0, std::ios::end); //先把文件输入流指针定位到文档末尾来获取文档的长度const int model_size = engine_file_stream.tellg(); //获取文件流的总长度engine_file_stream.seekg(0, std::ios::beg);void *model_mem = malloc(model_size); //开辟一样长的空间engine_file_stream.read(static_cast<char *>(model_mem), model_size); //将内容读取到model_mem中nvinfer1::IRuntime *runtime = nvinfer1::createInferRuntime(logger);nvinfer1::ICudaEngine *engine = runtime->deserializeCudaEngine(model_mem, model_size);free(model_mem);//三、模型推理nvinfer1::IExecutionContext *context = engine->createExecutionContext();void *buffers[2];// 获取模型输入尺寸并分配GPU内存nvinfer1::Dims input_dim = engine->getBindingDimensions(0);int input_size = 1;for (int j = 0; j < input_dim.nbDims; ++j) {input_size *= input_dim.d[j];}cudaMalloc(&buffers[0], input_size * sizeof(float));// 获取模型输出尺寸并分配GPU内存nvinfer1::Dims output_dim = engine->getBindingDimensions(1);int output_size = 1;for (int j = 0; j < output_dim.nbDims; ++j) {output_size *= output_dim.d[j];}cudaMalloc(&buffers[1], output_size * sizeof(float));// 给模型输出数据分配相应的CPU内存float *output_buffer = new float[output_size];//cudaStream_t stream;cudaStreamCreate(&stream);// 拷贝输入数据cudaMemcpyAsync(buffers[0], input_blob,input_size * sizeof(float),cudaMemcpyHostToDevice, stream);// 投入数据流、执行推理if(context->enqueueV2(buffers, stream, nullptr)){cout << "enqueueV2执行推理成功" << endl;}else{cout << "enqueueV2执行推理失败" << endl;return -1;}// 拷贝输出数据cudaMemcpyAsync(output_buffer, buffers[1],output_size * sizeof(float),cudaMemcpyDeviceToHost, stream);cudaStreamSynchronize(stream);delete context;delete engine;delete runtime;delete[] input_blob;//四、输出结果output_buffer,放入objs xywh为中心点坐标 和宽高int nc = 1; // 自己的识别目标种类数float *ptr = output_buffer;std::vector<Object> objs;for (int i = 0; i < 25200; ++i) {const float objectness = ptr[4];if (objectness >= 0.45f) {const int label = std::max_element(ptr + 5, ptr + 5 + nc) - (ptr + 5); //std::max_element返回范围内的最大元素const float confidence = ptr[5 + label] * objectness;if (confidence >= 0.25f) {const float bx = ptr[0];const float by = ptr[1];const float bw = ptr[2];const float bh = ptr[3];// std::cout << bx << "," << by << "," << bw << "," << bh << std::endl;Object obj;// 还原图像尺寸中box的尺寸比例,这里要减掉偏移值,并把box中心点坐标xy转成左上角坐标xyobj.box.x = (bx - bw * 0.5f - x_offset) / ratio;obj.box.y = (by - bh * 0.5f - y_offset) / ratio;obj.box.width = bw / ratio;obj.box.height = bh / ratio;obj.label = label;obj.confidence = confidence;// std::cout << obj.box.x << "," << obj.box.y << "," << obj.box.width << "," << obj.box.height// << "," << obj.label << "," << obj.confidence << std::endl;objs.push_back(std::move(obj));}}ptr += (5+nc);} // i loop//五、NMS非极大值抑制vector<list<Object>> finalll = NMS(objs);//六、画框int row = finalll.size();for(int i = 0; i < row; i++){list<Object>::iterator it = finalll[i].begin();while(it != finalll[i].end()){cv::Point topLeft(it->box.x, it->box.y);cv::Point bottomRight(it->box.x + it->box.width, it->box.y + it->box.height);cv::rectangle(input_image, topLeft, bottomRight, cv::Scalar(0, 0, 255), 2);std::stringstream buff;buff.precision(2); //覆盖默认精度,置信度保留2位小数buff.setf(std::ios::fixed);buff << it->confidence;string text =names[it->label] + " " + buff.str();cv::putText(input_image, text, topLeft, 0, 1, cv::Scalar(0, 255, 0), 2);it++;}}cv::imwrite("pig.jpg", input_image);return 0;
}