TensorRT plugins and ONNX parser编译

https://github.com/NVIDIA/TensorRT是TensorRT plugins and ONNX parser,并不包含TensorRT的nvinfer库(libinfer.sonvinfer.dll),此部分并未开源,只能使用官方支持的平台、环境https://developer.nvidia.com/tensorrt/download/10x。
在这里插入图片描述

编译https://github.com/NVIDIA/TensorRT只能生成libnvinfer_plugin.solibnvinfer_vc_plugin.solibnvonnxparser.so。详见编译的成果物:
在这里插入图片描述
具体说明如官方介绍:
This repository contains the Open Source Software (OSS) components of NVIDIA TensorRT. It includes the sources for TensorRT plugins and ONNX parser, as well as sample applications demonstrating usage and capabilities of the TensorRT platform. These open source software components are a subset of the TensorRT General Availability (GA) release with some extensions and bug-fixes.

1. TensorRT plugins and ONNX parser编译

​git clone --recursive https://github.com/NVIDIA/TensorRT
注意加--recursive,否则TensorRT-10.0.0\parsers\onnxTensorRT-10.0.0\parsers\onnx\third_party\onnx下都是空的,还要再手动去github下载。
假设上述代码下载时完整的,编译的方法是修改CMakeLists.txt适配自己的硬件平台即可,当前使用的版本是v10.0.0。
如下是修改后的CMakeLists.txt

#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
include(cmake/modules/set_ifndef.cmake)
include(cmake/modules/find_library_create_target.cmake)set_ifndef(TRT_LIB_DIR ${CMAKE_BINARY_DIR})
set_ifndef(TRT_OUT_DIR ${CMAKE_BINARY_DIR})# Converts Windows paths
if(CMAKE_VERSION VERSION_LESS 3.20)file(TO_CMAKE_PATH "${TRT_LIB_DIR}" TRT_LIB_DIR)file(TO_CMAKE_PATH "${TRT_OUT_DIR}" TRT_OUT_DIR)
else()cmake_path(SET TRT_LIB_DIR ${TRT_LIB_DIR})cmake_path(SET TRT_OUT_DIR ${TRT_OUT_DIR})
endif()# Required to export symbols to build *.libs
if(WIN32)add_compile_definitions(TENSORRT_BUILD_LIB 1)
endif()# Set output paths
set(RUNTIME_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for runtime target files")
set(LIBRARY_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for library target files")
set(ARCHIVE_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for archive target files")if(WIN32)set(STATIC_LIB_EXT "lib")
else()set(STATIC_LIB_EXT "a")
endif()file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*")foreach(TYPE MAJOR MINOR PATCH BUILD)string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS})string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
endforeach(TYPE)set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version")
set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version")
set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version")
message("Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}")if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)find_program(CMAKE_CXX_COMPILER NAMES $ENV{CXX} g++)
endif()set(CMAKE_SKIP_BUILD_RPATH True)project(TensorRTLANGUAGES CXX CUDAVERSION ${TRT_VERSION}DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)set(CMAKE_INSTALL_PREFIX ${TRT_LIB_DIR}/../ CACHE PATH "TensorRT installation" FORCE)
endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)option(BUILD_PLUGINS "Build TensorRT plugin" ON)
option(BUILD_PARSERS "Build TensorRT parsers" ON)
option(BUILD_SAMPLES "Build TensorRT samples" ON)# C++14
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)if(NOT MSVC)set(CMAKE_CXX_FLAGS "-Wno-deprecated-declarations ${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
else()set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
endif()############################################################################################
# Cross-compilation settingsset(TRT_PLATFORM_ID aarch64)        # added by garrylau,参考自己设备的架构,对于NVIDIA tegra可安装jtop之后在”7INFO“中查看
set_ifndef(TRT_PLATFORM_ID "x86_64")
message(STATUS "Targeting TRT Platform: ${TRT_PLATFORM_ID}")############################################################################################
# Debug settingsset(TRT_DEBUG_POSTFIX _debug CACHE STRING "suffix for debug builds")if (CMAKE_BUILD_TYPE STREQUAL "Debug")message("Building in debug mode ${DEBUG_POSTFIX}")
endif()############################################################################################
# Dependencies#set(DEFAULT_CUDA_VERSION 12.2.0)        # added by garrylau,参考自己本地CUDA的版本
#set(DEFAULT_CUDNN_VERSION 8.9)          # added by garrylau,参考自己本地cudnn的版本
set(DEFAULT_CUDA_VERSION 11.4.239)
set(DEFAULT_CUDNN_VERSION 8.4.1)
set(DEFAULT_PROTOBUF_VERSION 3.20.1)# Dependency Version Resolution
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
message(STATUS "CUDA version set to ${CUDA_VERSION}")
set_ifndef(CUDNN_VERSION ${DEFAULT_CUDNN_VERSION})
message(STATUS "cuDNN version set to ${CUDNN_VERSION}")
set_ifndef(PROTOBUF_VERSION ${DEFAULT_PROTOBUF_VERSION})
message(STATUS "Protobuf version set to ${PROTOBUF_VERSION}")set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if (BUILD_PLUGINS OR BUILD_PARSERS)include(third_party/protobuf.cmake)
endif()
if(NOT CUB_ROOT_DIR)if (CUDA_VERSION VERSION_LESS 11.0)set(CUB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/cub CACHE STRING "directory of CUB installation")endif()
endif()## find_package(CUDA) is broken for cross-compilation. Enable CUDA language instead.
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)find_package(CUDA ${CUDA_VERSION} REQUIRED)
endif()include_directories(${CUDA_INCLUDE_DIRS}
)
if(BUILD_PARSERS)configure_protobuf(${PROTOBUF_VERSION})
endif()find_library_create_target(nvinfer nvinfer SHARED ${TRT_LIB_DIR})find_library(CUDART_LIB cudart_static HINTS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib/x64 lib64)if (NOT MSVC)find_library(RT_LIB rt)
endif()set(CUDA_LIBRARIES ${CUDART_LIB})############################################################################################
# CUDA targetsset(GPU_ARCHS 72)  # added by garrylau,参考自己设备的CUDA Arch BIN,对于NVIDIA tegra可安装jtop之后在”7INFO“中查看
if (DEFINED GPU_ARCHS)message(STATUS "GPU_ARCHS defined as ${GPU_ARCHS}. Generating CUDA code for SM ${GPU_ARCHS}")separate_arguments(GPU_ARCHS)
else()list(APPEND GPU_ARCHS7075)string(REGEX MATCH "aarch64" IS_ARM "${TRT_PLATFORM_ID}")if (CUDA_VERSION VERSION_GREATER_EQUAL 11.0)# Ampere GPU (SM80) support is only available in CUDA versions > 11.0list(APPEND GPU_ARCHS 80)endif()if (CUDA_VERSION VERSION_GREATER_EQUAL 11.1)list(APPEND GPU_ARCHS 86)endif()message(STATUS "GPU_ARCHS is not defined. Generating CUDA code for default SMs: ${GPU_ARCHS}")
endif()
set(BERT_GENCODES)
# Generate SASS for each architecture
foreach(arch ${GPU_ARCHS})if (${arch} GREATER_EQUAL 70)set(BERT_GENCODES "${BERT_GENCODES} -gencode arch=compute_${arch},code=sm_${arch}")endif()set(GENCODES "${GENCODES} -gencode arch=compute_${arch},code=sm_${arch}")
endforeach()# Generate PTX for the last architecture in the list.
list(GET GPU_ARCHS -1 LATEST_SM)
set(GENCODES "${GENCODES} -gencode arch=compute_${LATEST_SM},code=compute_${LATEST_SM}")
if (${LATEST_SM} GREATER_EQUAL 70)set(BERT_GENCODES "${BERT_GENCODES} -gencode arch=compute_${LATEST_SM},code=compute_${LATEST_SM}")
endif()if(NOT MSVC)set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler -Wno-deprecated-declarations")
else()set(CMAKE_CUDA_SEPARABLE_COMPILATION ON)set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler")
endif()############################################################################################
# TensorRTif(BUILD_PLUGINS)add_subdirectory(plugin)
else()find_library_create_target(nvinfer_plugin nvinfer_plugin SHARED ${TRT_OUT_DIR} ${TRT_LIB_DIR})
endif()if(BUILD_PARSERS)add_subdirectory(parsers)
else()find_library_create_target(nvonnxparser nvonnxparser SHARED ${TRT_OUT_DIR} ${TRT_LIB_DIR})
endif()if(BUILD_SAMPLES)add_subdirectory(samples)
endif()

如下是原版的CMakeLists.txt

#
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
include(cmake/modules/set_ifndef.cmake)
include(cmake/modules/find_library_create_target.cmake)set_ifndef(TRT_LIB_DIR ${CMAKE_BINARY_DIR})
set_ifndef(TRT_OUT_DIR ${CMAKE_BINARY_DIR})# Converts Windows paths
if(CMAKE_VERSION VERSION_LESS 3.20)file(TO_CMAKE_PATH "${TRT_LIB_DIR}" TRT_LIB_DIR)file(TO_CMAKE_PATH "${TRT_OUT_DIR}" TRT_OUT_DIR)
else()cmake_path(SET TRT_LIB_DIR ${TRT_LIB_DIR})cmake_path(SET TRT_OUT_DIR ${TRT_OUT_DIR})
endif()# Required to export symbols to build *.libs
if(WIN32)add_compile_definitions(TENSORRT_BUILD_LIB 1)
endif()# Set output paths
set(RUNTIME_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for runtime target files")
set(LIBRARY_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for library target files")
set(ARCHIVE_OUTPUT_DIRECTORY ${TRT_OUT_DIR} CACHE PATH "Output directory for archive target files")if(WIN32)set(STATIC_LIB_EXT "lib")
else()set(STATIC_LIB_EXT "a")
endif()file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/include/NvInferVersion.h" VERSION_STRINGS REGEX "#define NV_TENSORRT_.*")foreach(TYPE MAJOR MINOR PATCH BUILD)string(REGEX MATCH "NV_TENSORRT_${TYPE} [0-9]+" TRT_TYPE_STRING ${VERSION_STRINGS})string(REGEX MATCH "[0-9]+" TRT_${TYPE} ${TRT_TYPE_STRING})
endforeach(TYPE)set(TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "TensorRT project version")
set(ONNX2TRT_VERSION "${TRT_MAJOR}.${TRT_MINOR}.${TRT_PATCH}" CACHE STRING "ONNX2TRT project version")
set(TRT_SOVERSION "${TRT_MAJOR}" CACHE STRING "TensorRT library so version")
message("Building for TensorRT version: ${TRT_VERSION}, library version: ${TRT_SOVERSION}")if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)find_program(CMAKE_CXX_COMPILER NAMES $ENV{CXX} g++)
endif()set(CMAKE_SKIP_BUILD_RPATH True)project(TensorRTLANGUAGES CXX CUDAVERSION ${TRT_VERSION}DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)set(CMAKE_INSTALL_PREFIX ${TRT_LIB_DIR}/../ CACHE PATH "TensorRT installation" FORCE)
endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)option(BUILD_PLUGINS "Build TensorRT plugin" ON)
option(BUILD_PARSERS "Build TensorRT parsers" ON)
option(BUILD_SAMPLES "Build TensorRT samples" ON)# C++14
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)if(NOT MSVC)set(CMAKE_CXX_FLAGS "-Wno-deprecated-declarations ${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
else()set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_SYSTEM=cmake_oss")
endif()############################################################################################
# Cross-compilation settingsset_ifndef(TRT_PLATFORM_ID "x86_64")
message(STATUS "Targeting TRT Platform: ${TRT_PLATFORM_ID}")############################################################################################
# Debug settingsset(TRT_DEBUG_POSTFIX _debug CACHE STRING "suffix for debug builds")if (CMAKE_BUILD_TYPE STREQUAL "Debug")message("Building in debug mode ${DEBUG_POSTFIX}")
endif()############################################################################################
# Dependenciesset(DEFAULT_CUDA_VERSION 12.2.0)
set(DEFAULT_CUDNN_VERSION 8.9)
set(DEFAULT_PROTOBUF_VERSION 3.20.1)# Dependency Version Resolution
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
message(STATUS "CUDA version set to ${CUDA_VERSION}")
set_ifndef(CUDNN_VERSION ${DEFAULT_CUDNN_VERSION})
message(STATUS "cuDNN version set to ${CUDNN_VERSION}")
set_ifndef(PROTOBUF_VERSION ${DEFAULT_PROTOBUF_VERSION})
message(STATUS "Protobuf version set to ${PROTOBUF_VERSION}")set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if (BUILD_PLUGINS OR BUILD_PARSERS)include(third_party/protobuf.cmake)
endif()
if(NOT CUB_ROOT_DIR)if (CUDA_VERSION VERSION_LESS 11.0)set(CUB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/cub CACHE STRING "directory of CUB installation")endif()
endif()## find_package(CUDA) is broken for cross-compilation. Enable CUDA language instead.
if(NOT DEFINED CMAKE_TOOLCHAIN_FILE)find_package(CUDA ${CUDA_VERSION} REQUIRED)
endif()include_directories(${CUDA_INCLUDE_DIRS}
)
if(BUILD_PARSERS)configure_protobuf(${PROTOBUF_VERSION})
endif()find_library_create_target(nvinfer nvinfer SHARED ${TRT_LIB_DIR})find_library(CUDART_LIB cudart_static HINTS ${CUDA_TOOLKIT_ROOT_DIR} PATH_SUFFIXES lib lib/x64 lib64)if (NOT MSVC)find_library(RT_LIB rt)
endif()set(CUDA_LIBRARIES ${CUDART_LIB})############################################################################################
# CUDA targetsif (DEFINED GPU_ARCHS)message(STATUS "GPU_ARCHS defined as ${GPU_ARCHS}. Generating CUDA code for SM ${GPU_ARCHS}")separate_arguments(GPU_ARCHS)
else()list(APPEND GPU_ARCHS7075)string(REGEX MATCH "aarch64" IS_ARM "${TRT_PLATFORM_ID}")if (CUDA_VERSION VERSION_GREATER_EQUAL 11.0)# Ampere GPU (SM80) support is only available in CUDA versions > 11.0list(APPEND GPU_ARCHS 80)endif()if (CUDA_VERSION VERSION_GREATER_EQUAL 11.1)list(APPEND GPU_ARCHS 86)endif()message(STATUS "GPU_ARCHS is not defined. Generating CUDA code for default SMs: ${GPU_ARCHS}")
endif()
set(BERT_GENCODES)
# Generate SASS for each architecture
foreach(arch ${GPU_ARCHS})if (${arch} GREATER_EQUAL 70)set(BERT_GENCODES "${BERT_GENCODES} -gencode arch=compute_${arch},code=sm_${arch}")endif()set(GENCODES "${GENCODES} -gencode arch=compute_${arch},code=sm_${arch}")
endforeach()# Generate PTX for the last architecture in the list.
list(GET GPU_ARCHS -1 LATEST_SM)
set(GENCODES "${GENCODES} -gencode arch=compute_${LATEST_SM},code=compute_${LATEST_SM}")
if (${LATEST_SM} GREATER_EQUAL 70)set(BERT_GENCODES "${BERT_GENCODES} -gencode arch=compute_${LATEST_SM},code=compute_${LATEST_SM}")
endif()if(NOT MSVC)set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler -Wno-deprecated-declarations")
else()set(CMAKE_CUDA_SEPARABLE_COMPILATION ON)set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcompiler")
endif()############################################################################################
# TensorRTif(BUILD_PLUGINS)add_subdirectory(plugin)
else()find_library_create_target(nvinfer_plugin nvinfer_plugin SHARED ${TRT_OUT_DIR} ${TRT_LIB_DIR})
endif()if(BUILD_PARSERS)add_subdirectory(parsers)
else()find_library_create_target(nvonnxparser nvonnxparser SHARED ${TRT_OUT_DIR} ${TRT_LIB_DIR})
endif()if(BUILD_SAMPLES)add_subdirectory(samples)
endif()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/pingmian/2635.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

科技改变视听4K 120HZ高刷新率的投影、电视、电影终有用武之地

早在1888年,法国生理学家埃蒂安朱尔马莱就发明了一套盒式摄像机,能以120帧/s的速度在一条纸膜上曝光照片,但是当时没有相匹配的放映设备。而马莱的另一套拍摄设备是60帧/s的规格,并且图像质量非常好。 受此启发,雷诺的…

【软件测试基础】黑盒测试(知识点 + 习题 + 答案)

《 软件测试基础持续更新中》 对于黑盒测试这一章,等价类划分、边界值测试、决策表、场景法,这四种是最容易出大题的,其他几种考察频率很低。下述的一些例题只是经典例题,掌握方法后,还要多加练习! 目录 3…

极快!宝藏EI,2-4周录用,接受范围广!

本周投稿推荐 SSCI • 2/4区经管类,2.5-3.0(录用率99%) SCIE(CCF推荐) • 计算机类,2.0-3.0(最快18天录用) SCIE(CCF-C类) • IEEE旗下,1/2…

短信视频提取批量工具,免COOKIE,博主视频下载抓取,爬虫

痛点:关于看了好多市面的软件,必须要先登录自己的Dy号才能 然后找到自己的COOKIE 放入软件才可以继续搜索,并且无法避免长时间使用 会导致无法正常显示页面的问题。 有没有一种方法 直接可以使用软件,不用设置的COOKIE的方法呢 …

文献速递:肺癌早期诊断---利用低剂量CT扫描的三维概率深度学习系统用于肺癌的检测与诊

Title 题目 A 3D Probabilistic Deep Learning System forDetection and Diagnosis of Lung Cancer Using Low-Dose CT Scans 利用低剂量CT扫描的三维概率深度学习系统用于肺癌的检测与诊 01文献速递介绍 肺癌既是最常见的癌症之一,也是导致癌症死亡的主要原因之…

【笔记】应对Chrome更新导致Chromedriver失效的解决方案:Chrome For Test

随着网络应用和网站的不断发展,自动化测试变得越来越重要,而Selenium成为了许多开发者和测试人员的首选工具之一。然而,对于使用Selenium来进行网站测试的人来说,Chrome浏览器的频繁更新可能会成为一个头疼的问题。每当Chrome更新…

Docker容器:镜像与容器管理命令

目录 一、镜像管理命令 1、搜索镜像 2、获取镜像 3、镜像加速下载 4、查看下载的镜像文件信息 5、查看下载到本地的所有镜像 6、获取指定镜像的详细信息 7、为本地的镜像添加新的标签 8、删除镜像 8.1 删除指定的镜像 8.2 批量删除多个镜像 9、导出镜像与导入镜像 …

Day 32 122.买卖股票的最佳时机II 55. 跳跃游戏 45.跳跃游戏II

买卖股票的最佳时期Ⅱ 给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。 设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。 注意:你不能同时参与多笔交易(你…

云上如何实现 Autoscaling: AutoMQ 的实战经验与教训

01 背景 弹性是云原生、Serverless 的基础。AutoMQ 从软件设计之初即考虑将弹性作为产品的核心特质。对于 Apache Kafka 而言,由于其存储架构诞生于 IDC 时代,针对物理硬件设计,存储层强依赖本地存储,已不能很好地适应现在云的时…

Linux:Centos7.x系统,无效的密码问题处理

一、情景说明 我新创建了Centos7系统,在使用的过程中,我需要创建一个test账号 那么,同时我就要给这个账号设置一个密码 为了方便,我设置成123456 就报错了 二、解决办法 其实这个问题很容易处理,不需要像其他帖子说…

项目报错com.mall.common.domain.request那么就说明你的项目里面是找不到导入类的包名或者路径

当你的项目里面一直报错是找不到导入类的包名或者路径的时候:com.mall.common.domain.request 这个问题我们阔以分为几个角度来想 1、包路径错误:确保com.mall.common.domain.request这个包路径在项目中是正确的。可能的情况是包名写错了,或…

识别有效的IP地址和掩码并进行分类统计

问题概要 请解析IP地址和对应的掩码,进行分类识别。要求按照A/B/C/D/E类地址归类,不合法的地址和掩码单独归类。 所有的IP地址划分为 A,B,C,D,E五类 A类地址从1.0.0.0到126.255.255.255; B类地址从128.0.0.0到191.255.255.255; C类地址从192.0.0.0到223.…

大模型检索召回系统:RAG技术的全面调查与未来展望

随着人工智能技术的飞速发展,大型语言模型(LLMs)在自然语言处理(NLP)领域取得了显著成就。然而,这些模型在处理特定领域或知识密集型任务时仍面临挑战,如产生错误信息或“幻觉”。为了克服这些难…

MC33665 + MC33774 控制流程及 TPL3 帧结构介绍

一. 概述: MC33665A:通用电池管理通信网关和变压器物理层 (TPL) 收发器。该设备通过标准通信协议转发来自不同 TPL(NXP 的隔离菊花链协议)端口的消息,标准通信协议可确保与市场上可用的微控制器兼容。 MC33774&…

Fork for Mac v2.42 激活版 Git客户端

Fork for Mac是一款运行在Mac平台上的Git客户端,Fork Mac版具备基本的取、推、提交、修改、创建和删除分支和标签、创建和删除远程备份等功能,还有实用的差异查看器,你可以通过清晰的视图快速发现源代码中的更改。 Fork for Mac v2.42 激活版…

Golang | Leetcode Golang题解之第42题接雨水

题目&#xff1a; 题解: func trap(height []int) (ans int) {n : len(height)if n 0 {return}leftMax : make([]int, n)leftMax[0] height[0]for i : 1; i < n; i {leftMax[i] max(leftMax[i-1], height[i])}rightMax : make([]int, n)rightMax[n-1] height[n-1]for i…

git提交注释规范插件

1、前言 为什么要注重代码提交规范&#xff1f; 在团队协作开发时&#xff0c;每个人提交代码时都会写 commit message。 每个人都有自己的书写风格&#xff0c;翻看我们组的git log, 可以说是五花八门&#xff0c;十分不利于阅读和维护。 一般项目开发都是多分支共存&#x…

Seal^_^【送书活动第2期】——《Flink入门与实战》

Seal^_^【送书活动第2期】——《Flink入门与实战》 一、参与方式二、本期推荐图书2.1 作者简介2.2 编辑推荐2.3 前 言2.4 本书特点2.5 内容简介2.6 本书适用读者2.7 书籍目录 三、正版购买 一、参与方式 评论&#xff1a;"掌握Flink&#xff0c;驭大数据&#xff0c;实战…

Ubuntu下部署gerrit+报错分析(超详细)

Ubuntu下部署gerrit代码平台 之前安装过几次 最后都在Apache代理这里失败了&#xff0c;如下图&#xff0c;总是gerrit.config与Apache2.config配置有问题&#xff0c;后面换了使用ngnix代理&#xff0c;简单多了 安装Mysql、gerrit、jdk、git 这一步也是非必须得&#xff0…

【c++】list类接口函数介绍与深度剖析模拟实现

&#x1f525;个人主页&#xff1a;Quitecoder &#x1f525;专栏&#xff1a;c笔记仓 朋友们大家好&#xff0c;本篇文章来到list有关部分&#xff0c;这一部分函数与前面的类似&#xff0c;我们简单讲解&#xff0c;重难点在模拟实现时的迭代器有关实现 目录 1.List介绍2.接…