目标跟踪算法(bytetrack)-tensorrt部署教程

一、本机安装python环境

conda create -n bytetrace_env python=3.8
activate bytetrace_env
conda install pytorch torchvision cudatoolkit=10.1 -c

检测GPU是否可用,不可用不行

import torch
print(torch.cuda.is_available())

安装bytetrack

git clone https://github.com/ifzhang/ByteTrack.git
cd ByteTrack
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
python setup.py develop

在这里插入图片描述
上述即安装成功。
安装pycocotools

pip install cython
pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

或者(Linux)

pip install git+https://gitee.com/pursuit_zhangyu/cocoapi.git#subdirectory=PythonAPI

windows下

pip install pycocotools-windows

二、安装tensorrt环境

下载tensorrt包
TensorRT-8.4.3.1.Windows10.x86_64.cuda-10.2.cudnn8.4.zip
在这里插入图片描述
将所有的dll复制到
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin
并设置环境变量。
在这里插入图片描述
虚拟环境中python版本为python3.8

pip install tensorrt-8.4.3.1-cp38-none-win_amd64.whl

三、转换模型

https://pan.baidu.com/s/1PiP1kQfgxAIrnGUbFP6Wfg
qflm
获取bytetrack_s_mot17.pth.tar并创建pretrained进行存放

python tools/trt.py -f exps/example/mot/yolox_s_mix_det.py -c pretrained/bytetrack_s_mot17.pth.tar

最后在D:\git_clone\ByteTrack-main\YOLOX_outputs\yolox_s_mix_det目录下生成tensorrt模型与pth模型:
在这里插入图片描述

四、cmake生成eigen库并使用VS2015编译

https://pan.baidu.com/s/15kEfCxpy-T7tz60msxxExg
ueq4
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

五、下载opencv450

https://nchc.dl.sourceforge.net/project/opencvlibrary/4.5.0/opencv-4.5.0-vc14_vc15.exe?viasf=1
安装D:\opencv450

六、cmake生成bytetrack并使用VS2015编译

修改CMakeLists.txt

cmake_minimum_required(VERSION 2.6)project(bytetrack)add_definitions(-std=c++11)option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)find_package(CUDA REQUIRED)include_directories(${PROJECT_SOURCE_DIR}/include)
include_directories(D:\VS2015_CUDA\TensorRT\eigen-3.3.9\build\install\include\eigen3)
link_directories(${PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# cudnn
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# tensorrt
include_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\include)
link_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\lib)set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -D_MWAITXINTRIN_H_INCLUDED")set(OpenCV_INCLUDE_DIRS_DIRS D:\opencv450\build\include)
set(OpenCV_LIBS D:\opencv450\build\x64\vc14\lib)
include_directories(${OpenCV_INCLUDE_DIRS})file(GLOB My_Source_Files ${PROJECT_SOURCE_DIR}/src/*.cpp)
add_executable(bytetrack ${My_Source_Files})
target_link_libraries(bytetrack nvinfer)
target_link_libraries(bytetrack cudart)
target_link_libraries(bytetrack ${OpenCV_LIBS})add_definitions(-O2 -pthread)

使用CMake进行配置生成。
打开VS2015工程进行配置
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
opencv可以自己加进去编译。
在这里插入图片描述
修改bytetrack.cpp

int img_w = cap.get(cv::CAP_PROP_FRAME_WIDTH);
int img_h = cap.get(cv::CAP_PROP_FRAME_HEIGHT);

这里有个bug,高的参数不对,会影响后面的demo.mp4。
编译生成
在这里插入图片描述

七、使用Dependency Walker查看exe依赖

在这里插入图片描述
在这里插入图片描述

D:\VS2015_CUDA\TensorRT\cpp\model_trt.engine -i D:\VS2015_CUDA\TensorRT\cpp\palace.mp4

八、windows源码

bytetrack.cpp

#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "BYTETracker.h"#define CHECK(status) \do\{\auto ret = (status);\if (ret != 0)\{\cerr << "Cuda failure: " << ret << endl;\abort();\}\} while (0)#define DEVICE 0  // GPU id
#define NMS_THRESH 0.7
#define BBOX_CONF_THRESH 0.1using namespace nvinfer1;// stuff we know about the network and the input/output blobs
static const int INPUT_W = 1088;
static const int INPUT_H = 608;
const char* INPUT_BLOB_NAME = "input_0";
const char* OUTPUT_BLOB_NAME = "output_0";
static Logger gLogger;Mat static_resize(Mat& img) {float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));// r = std::min(r, 1.0f);int unpad_w = r * img.cols;int unpad_h = r * img.rows;Mat re(unpad_h, unpad_w, CV_8UC3);resize(img, re, re.size());Mat out(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114));re.copyTo(out(Rect(0, 0, re.cols, re.rows)));return out;
}struct GridAndStride
{int grid0;int grid1;int stride;
};static void generate_grids_and_stride(const int target_w, const int target_h, vector<int>& strides, vector<GridAndStride>& grid_strides)
{for (auto stride : strides){GridAndStride GS;int num_grid_w = target_w / stride;int num_grid_h = target_h / stride;for (int g1 = 0; g1 < num_grid_h; g1++){for (int g0 = 0; g0 < num_grid_w; g0++){GS.grid0 = g0;GS.grid1 = g1;GS.stride = stride;grid_strides.push_back(GS);}}}
}static inline float intersection_area(const Object& a, const Object& b)
{Rect_<float> inter = a.rect & b.rect;return inter.area();
}static void qsort_descent_inplace(vector<Object>& faceobjects, int left, int right)
{int i = left;int j = right;float p = faceobjects[(left + right) / 2].prob;while (i <= j){while (faceobjects[i].prob > p)i++;while (faceobjects[j].prob < p)j--;if (i <= j){// swapswap(faceobjects[i], faceobjects[j]);i++;j--;}}#pragma omp parallel sections{#pragma omp section{if (left < j) qsort_descent_inplace(faceobjects, left, j);}#pragma omp section{if (i < right) qsort_descent_inplace(faceobjects, i, right);}}
}static void qsort_descent_inplace(vector<Object>& objects)
{if (objects.empty())return;qsort_descent_inplace(objects, 0, objects.size() - 1);
}static void nms_sorted_bboxes(const vector<Object>& faceobjects, vector<int>& picked, float nms_threshold)
{picked.clear();const int n = faceobjects.size();vector<float> areas(n);for (int i = 0; i < n; i++){areas[i] = faceobjects[i].rect.area();}for (int i = 0; i < n; i++){const Object& a = faceobjects[i];int keep = 1;for (int j = 0; j < (int)picked.size(); j++){const Object& b = faceobjects[picked[j]];// intersection over unionfloat inter_area = intersection_area(a, b);float union_area = areas[i] + areas[picked[j]] - inter_area;// float IoU = inter_area / union_areaif (inter_area / union_area > nms_threshold)keep = 0;}if (keep)picked.push_back(i);}
}static void generate_yolox_proposals(vector<GridAndStride> grid_strides, float* feat_blob, float prob_threshold, vector<Object>& objects)
{const int num_class = 1;const int num_anchors = grid_strides.size();for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++){const int grid0 = grid_strides[anchor_idx].grid0;const int grid1 = grid_strides[anchor_idx].grid1;const int stride = grid_strides[anchor_idx].stride;const int basic_pos = anchor_idx * (num_class + 5);// yolox/models/yolo_head.py decode logicfloat x_center = (feat_blob[basic_pos+0] + grid0) * stride;float y_center = (feat_blob[basic_pos+1] + grid1) * stride;float w = exp(feat_blob[basic_pos+2]) * stride;float h = exp(feat_blob[basic_pos+3]) * stride;float x0 = x_center - w * 0.5f;float y0 = y_center - h * 0.5f;float box_objectness = feat_blob[basic_pos+4];for (int class_idx = 0; class_idx < num_class; class_idx++){float box_cls_score = feat_blob[basic_pos + 5 + class_idx];float box_prob = box_objectness * box_cls_score;if (box_prob > prob_threshold){Object obj;obj.rect.x = x0;obj.rect.y = y0;obj.rect.width = w;obj.rect.height = h;obj.label = class_idx;obj.prob = box_prob;objects.push_back(obj);}} // class loop} // point anchor loop
}float* blobFromImage(Mat& img){cvtColor(img, img, COLOR_BGR2RGB);float* blob = new float[img.total()*3];int channels = 3;int img_h = img.rows;int img_w = img.cols;vector<float> mean = {0.485f, 0.456f, 0.406f};vector<float> std = {0.229f, 0.224f, 0.225f};for (size_t c = 0; c < channels; c++) {for (size_t  h = 0; h < img_h; h++) {for (size_t w = 0; w < img_w; w++) {blob[c * img_w * img_h + h * img_w + w] =(((float)img.at<Vec3b>(h, w)[c]) / 255.0f - mean[c]) / std[c];}}}return blob;
}static void decode_outputs(float* prob, vector<Object>& objects, float scale, const int img_w, const int img_h) {vector<Object> proposals;vector<int> strides = {8, 16, 32};vector<GridAndStride> grid_strides;generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides);generate_yolox_proposals(grid_strides, prob,  BBOX_CONF_THRESH, proposals);//std::cout << "num of boxes before nms: " << proposals.size() << std::endl;qsort_descent_inplace(proposals);vector<int> picked;nms_sorted_bboxes(proposals, picked, NMS_THRESH);int count = picked.size();//std::cout << "num of boxes: " << count << std::endl;objects.resize(count);for (int i = 0; i < count; i++){objects[i] = proposals[picked[i]];// adjust offset to original unpaddedfloat x0 = (objects[i].rect.x) / scale;float y0 = (objects[i].rect.y) / scale;float x1 = (objects[i].rect.x + objects[i].rect.width) / scale;float y1 = (objects[i].rect.y + objects[i].rect.height) / scale;// clip// x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);// y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);// x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);// y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);objects[i].rect.x = x0;objects[i].rect.y = y0;objects[i].rect.width = x1 - x0;objects[i].rect.height = y1 - y0;}
}const float color_list[80][3] =
{{0.000, 0.447, 0.741},{0.850, 0.325, 0.098},{0.929, 0.694, 0.125},{0.494, 0.184, 0.556},{0.466, 0.674, 0.188},{0.301, 0.745, 0.933},{0.635, 0.078, 0.184},{0.300, 0.300, 0.300},{0.600, 0.600, 0.600},{1.000, 0.000, 0.000},{1.000, 0.500, 0.000},{0.749, 0.749, 0.000},{0.000, 1.000, 0.000},{0.000, 0.000, 1.000},{0.667, 0.000, 1.000},{0.333, 0.333, 0.000},{0.333, 0.667, 0.000},{0.333, 1.000, 0.000},{0.667, 0.333, 0.000},{0.667, 0.667, 0.000},{0.667, 1.000, 0.000},{1.000, 0.333, 0.000},{1.000, 0.667, 0.000},{1.000, 1.000, 0.000},{0.000, 0.333, 0.500},{0.000, 0.667, 0.500},{0.000, 1.000, 0.500},{0.333, 0.000, 0.500},{0.333, 0.333, 0.500},{0.333, 0.667, 0.500},{0.333, 1.000, 0.500},{0.667, 0.000, 0.500},{0.667, 0.333, 0.500},{0.667, 0.667, 0.500},{0.667, 1.000, 0.500},{1.000, 0.000, 0.500},{1.000, 0.333, 0.500},{1.000, 0.667, 0.500},{1.000, 1.000, 0.500},{0.000, 0.333, 1.000},{0.000, 0.667, 1.000},{0.000, 1.000, 1.000},{0.333, 0.000, 1.000},{0.333, 0.333, 1.000},{0.333, 0.667, 1.000},{0.333, 1.000, 1.000},{0.667, 0.000, 1.000},{0.667, 0.333, 1.000},{0.667, 0.667, 1.000},{0.667, 1.000, 1.000},{1.000, 0.000, 1.000},{1.000, 0.333, 1.000},{1.000, 0.667, 1.000},{0.333, 0.000, 0.000},{0.500, 0.000, 0.000},{0.667, 0.000, 0.000},{0.833, 0.000, 0.000},{1.000, 0.000, 0.000},{0.000, 0.167, 0.000},{0.000, 0.333, 0.000},{0.000, 0.500, 0.000},{0.000, 0.667, 0.000},{0.000, 0.833, 0.000},{0.000, 1.000, 0.000},{0.000, 0.000, 0.167},{0.000, 0.000, 0.333},{0.000, 0.000, 0.500},{0.000, 0.000, 0.667},{0.000, 0.000, 0.833},{0.000, 0.000, 1.000},{0.000, 0.000, 0.000},{0.143, 0.143, 0.143},{0.286, 0.286, 0.286},{0.429, 0.429, 0.429},{0.571, 0.571, 0.571},{0.714, 0.714, 0.714},{0.857, 0.857, 0.857},{0.000, 0.447, 0.741},{0.314, 0.717, 0.741},{0.50, 0.5, 0}
};void doInference(IExecutionContext& context, float* input, float* output, const int output_size, Size input_shape) {const ICudaEngine& engine = context.getEngine();// Pointers to input and output device buffers to pass to engine.// Engine requires exactly IEngine::getNbBindings() number of buffers.assert(engine.getNbBindings() == 2);void* buffers[2];// In order to bind the buffers, we need to know the names of the input and output tensors.// Note that indices are guaranteed to be less than IEngine::getNbBindings()const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);int mBatchSize = engine.getMaxBatchSize();// Create GPU buffers on deviceCHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));// Create streamcudaStream_t stream;CHECK(cudaStreamCreate(&stream));// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to hostCHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));context.enqueue(1, buffers, stream, nullptr);CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));cudaStreamSynchronize(stream);// Release stream and bufferscudaStreamDestroy(stream);CHECK(cudaFree(buffers[inputIndex]));CHECK(cudaFree(buffers[outputIndex]));
}int main(int argc, char** argv) {cudaSetDevice(DEVICE);// create a model using the API directly and serialize it to a streamchar *trtModelStream{nullptr};size_t size{0};if (argc == 4 && string(argv[2]) == "-i") {const string engine_file_path {argv[1]};ifstream file(engine_file_path, ios::binary);if (file.good()) {file.seekg(0, file.end);size = file.tellg();file.seekg(0, file.beg);trtModelStream = new char[size];assert(trtModelStream);file.read(trtModelStream, size);file.close();}} else {cerr << "arguments not right!" &

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/bicheng/31569.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

ELK Kibana搜索框模糊搜索包含不包含

默认是KQL,点击切换Lucene搜索&#xff0c;搜索日志中包含Exception关键字&#xff0c;不包含BizException、IllegalArgumentException、DATA_SYNC_EXCEPTION关键字的日志&#xff0c;如下&#xff1a; message: *Exception AND !(message : *BizException OR message : *Ille…

数据库复习——模式分解

模式分解这边主要包括无损分解和保持函数依赖的分解两种形式&#xff0c;简单整理一下。 无损分解 把一个 R R R 分成 ρ { R 1 , R 2 , ⋯ , R k } \rho \{R_1,R_2,\cdots,R_k\} ρ{R1​,R2​,⋯,Rk​}&#xff0c;然后通过自然连接 R 1 ⋈ R 2 ⋈ ⋯ ⋈ R k R_1\bowtie R…

容器之布局容器的演示

代码; #include <gtk-2.0/gtk/gtk.h> #include <glib-2.0/glib.h> #include <gtk-2.0/gdk/gdkkeysyms.h> #include <stdio.h>void change_image(GtkFileChooserButton *filebutton, // GdkEvent *event,GtkImage *image) {gtk_image_set_from_file(im…

鸿蒙开发通信与连接:【@ohos.wifiext (WLAN)】

WLAN 说明&#xff1a; 本模块首批接口从API version 8开始支持。后续版本的新增接口&#xff0c;采用上角标单独标记接口的起始版本。 该文档中的接口只供非通用类型产品使用&#xff0c;如路由器等&#xff0c;对于常规类型产品&#xff0c;不应该使用这些接口。 导入模块 …

手写方法实现整型例如:123与字符串例如:“123“相互转化(下篇)

目录 一、前言 二、整型转化为字符串 1. 初始化变量 2.数字1转字符1 3.取出value中的每一项数字 4.将字符放入字符数组中 5.最终代码 三、最后 一、前言 本篇文章紧跟上篇文章&#xff0c;本片内容为整型转化为字符串类型。至于我为什么要分两篇文章&#xff0c;主要…

艺术创作加速器:三款AI绘画软件,让你的工作效率倍增!

在数字化浪潮的推动下&#xff0c;艺术创作正迎来革命性的变化。AI绘画软件的出现&#xff0c;不仅为艺术家提供了全新的创作工具&#xff0c;也为艺术爱好者开辟了一片创意的新天地。这些软件利用人工智能技术&#xff0c;根据用户的简单描述或草图&#xff0c;快速生成独特的…

视频汇聚安防综合管理系统EasyCVR平台GB28181设备注册未上线的原因排查与解决

视频汇聚安防综合管理平台EasyCVR视频监控系统基于云边端架构&#xff0c;可支持海量视频汇聚集中管理&#xff0c;能提供视频监控直播、云端录像、云存储、录像检索与回看、告警&#xff08;协议告警/智能告警/1400视图库告警&#xff09;、平台级联、AI智能分析接入等视频能力…

从艳彩山水到艳彩艺术 薛永年:郭泰来艳彩艺术填补了中国美术史的空白

薛永年先生 自6月12日开展以来&#xff0c;郭泰来现代艺术大展杭州如火如荼地进行着&#xff0c;吸引了众多艺术爱好者和专业人士前往。毫不夸张地说&#xff0c;总统和清洁工人都能在他的作品中找到自己心中的那一块共振带并与之产生强烈的共鸣&#xff0c;这便是郭泰来先生的…

矩阵中严格递增的单元格数

题目链接&#xff1a;leetcode:矩阵中严格递增的单元格数 描述 给你一个下标从 1 开始、大小为 m x n 的整数矩阵 mat&#xff0c;你可以选择任一单元格作为 起始单元格 。 从起始单元格出发&#xff0c;你可以移动到 同一行或同一列 中的任何其他单元格&#xff0c;但前提是目…

【JavaEE进阶】Spring统一功能处理:拦截器的使用

目录 1.什么是拦截器? 2.拦截器的使用 2.1定义拦截器 2.2 注册配置拦截器 3.拦截器详解 3.1 拦截路径 3.2 拦截器的执行流程 4. 使用拦截器实现登录校验 4.1 定义拦截器 4.2 注册配置拦截器 1.什么是拦截器? 拦截器是Spring框架提供的核心功能之⼀, 主要用来拦截用…

AI赋能前端:你的Chrome 控制台需要AI(爱)

像会永生那样去学习,像明天就要死亡那样去生活。——圣雄甘地 大家好,我是柒八九。一个专注于前端开发技术/Rust及AI应用知识分享的Coder 此篇文章所涉及到的技术有 AI(Gemini)ChromeDevTool🪜魔法接码平台因为,行文字数所限,有些概念可能会一带而过亦或者提供对应的学习…

[【机器学习】深度概率模型(DPM)原理和文本分类实践

1.引言 1.1.DPM模型简介 深度概率模型&#xff08;Deep Probabilistic Models&#xff09; 是结合了深度学习和概率论的一类模型。这类模型通过使用深度学习架构&#xff08;如神经网络&#xff09;来构建复杂的概率分布&#xff0c;从而能够处理不确定性并进行预测。深度概率…

刘亦菲新剧玫瑰的故事

刘亦菲新剧《玫瑰的故事》&#xff1a;开放结局&#xff0c;无限遐想 当刘亦菲再次踏入荧屏&#xff0c;与导演汪俊携手打造的《玫瑰的故事》便引发了无数观众的期待与关注。这部剧不仅汇聚了众多实力派演员&#xff0c;更以其独特的剧情和精致的制作成为了近期热门的话题。《…

网络编程(TCP协议,UDP协议)

目录 网络编程三要素 IP IPv4 InetAddress类 端口号 协议 UDP协议 UDP协议发送数据 UDP协议接收数据 UDP的三种通信方式(代码实现) TCP协议 TCP通信程序 三次握手和四次挥手 练习 1、客户端:多次发送数据服务器:接收多次接收数据&#xff0c;并打印 2、客户端…

cad批量打印pdf怎么弄?介绍三种打印方法

cad批量打印pdf怎么弄&#xff1f;在CAD设计领域&#xff0c;批量打印PDF文件是一项常见且至关重要的任务。面对大量的CAD图纸&#xff0c;如何高效地进行转换和打印&#xff0c;成为了设计师们亟待解决的问题。今天&#xff0c;我们就来推荐三款能够批量打印PDF的CAD软件&…

最新扣子(Coze)实战案例:扣子卡片的制作及使用,完全免费教程

&#x1f9d9;‍♂️ 大家好&#xff0c;我是斜杠君&#xff0c;手把手教你搭建扣子AI应用。 &#x1f4dc; 本教程是《AI应用开发系列教程之扣子(Coze)实战教程》&#xff0c;完全免费学习。 &#x1f440; 关注斜杠君&#xff0c;可获取完整版教程。&#x1f44d;&#x1f3f…

对红酒数据集,分别采用决策树算法和随机森林算法进行分类。

1.导入所需要的包 from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split 2.导入数据&#xff0c;并且对随机森林和决策数进…

后端程序员的Linux命令指南

后端程序员的终极命令指南&#xff1a;考考自己是不是真正掌握Linux的使用 欢迎各位穿着格子衬衫&#xff0c;常年抱着键盘睡觉的后端小伙伴们&#x1f44b;&#x1f44b;&#x1f44b;&#xff01;今天&#xff0c;考考你们是不是掌握以下让你们在日后在服务器上叱咤风云的命令…

关于微信小程序取消获取用户昵称的一些思考

官方说明&#xff0c;有部分小程序乱用授权&#xff0c;强迫用户提交头像和昵称。 核心是微信担心用户信息被滥用。 其一 &#xff0c;微信头像经常是本人真是照片&#xff0c;在现在人工智能算法的加持下&#xff0c;人脸数据太容易被套取。 其二&#xff0c;微信名称同理&…

CVPR 2024第三弹:小编与李飞飞教授惊喜同框,CVPR之家乐队火爆演奏惊艳全场

CVPR 2024第三弹&#xff1a;小编与李飞飞教授惊喜同框&#xff0c;"CVPR之家"乐队火爆演奏惊艳全场&#xff01; 会议之眼 快讯 2024 年 CVPR &#xff08;Computer Vision and Pattern Recogntion Conference) 即国际计算机视觉与模式识别会议&#xff0c;于6月1…