亚博microros小车-原生ubuntu支持系列:26手势控制小车基础运动

背景知识

手指检测:亚博microros小车-原生ubuntu支持系列:4-手部检测-CSDN博客

程序功能说明

功能开启后,摄像头捕获图像,识别手势来控制小车移动。

手势 “5”小车前进
拳头小车后退
手势 “1”小车向左
手势 “2”小车向右

运行:

启动小车代理、图像代理

ros2 run yahboom_esp32ai_car HandCtrl

日志:

fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 1]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 0, 0, 0, 0]
fingers:  [0, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 1]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]

src/yahboom_esp32ai_car/yahboom_esp32ai_car/目录下新建media_library.py 

#!/usr/bin/env python3
# encoding: utf-8
import base64
import math
import rclpy
from rclpy.node import Node
import cv2 as cv
import mediapipe as mp
from time import sleep
from std_msgs.msg import Int32, Bool,UInt16
from yahboomcar_msgs.msg import *
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Imagedef get_dist(point1, point2):'''获取两点之间的距离(x1-x2)**2+(y1-y2)**2=dist**2'''x1, y1 = point1x2, y2 = point2return abs(math.sqrt(math.pow(abs(y1 - y2), 2) + math.pow(abs(x1 - x2), 2)))def calc_angle(pt1, pt2, pt3):'''求中间点的夹角cos(B)=(a^2+c^2-b^2)/2ac'''a = get_dist(pt1, pt2)b = get_dist(pt2, pt3)c = get_dist(pt1, pt3)try:radian = math.acos((math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b))angle = radian / math.pi * 180except: angle = 0return angleclass HandDetector:def __init__(self, mode=False, maxHands=1, detectorCon=0.5, trackCon=0.5):self.mpHand = mp.solutions.handsself.draw = Falseself.mpDraw = mp.solutions.drawing_utilsself.hands = self.mpHand.Hands(static_image_mode=mode,max_num_hands=maxHands,min_detection_confidence=detectorCon,min_tracking_confidence=trackCon)def findHands(self, frame):lmList = []self.cxList = []self.cyList = []bbox = 0, 0, 0, 0img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.hands.process(img_RGB)if self.results.multi_hand_landmarks:for i in range(len(self.results.multi_hand_landmarks)):if not self.draw: self.mpDraw.draw_landmarks(frame, self.results.multi_hand_landmarks[i], self.mpHand.HAND_CONNECTIONS)for id, lm in enumerate(self.results.multi_hand_landmarks[i].landmark):h, w, c = frame.shapecx, cy = int(lm.x * w), int(lm.y * h)# print(id, lm.x, lm.y, lm.z)lmList.append([id, cx, cy])self.cxList.append(cx)self.cyList.append(cy)if len(self.cxList) != 0 and len(self.cyList) != 0:xmin, xmax = min(self.cxList), max(self.cxList)ymin, ymax = min(self.cyList), max(self.cyList)bbox = xmin, ymin, xmax, ymaxif self.draw: cv.rectangle(frame, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20), (0, 255, 0), 2)return frame, lmList, bboxdef fingersUp(self,lmList):fingers = []point1 = lmList[4][1:3]point2 = lmList[3][1:3]point3 = lmList[2][1:3]point4 = lmList[1][1:3]# Thumbif (abs(calc_angle(point1, point2, point3)) > 150.0) and (abs(calc_angle(point2, point3, point4)) > 150.0): fingers.append(1)else: fingers.append(0)# 4 fingertipIds = [4, 8, 12, 16, 20]for id in range(1, 5):if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]: fingers.append(1)else: fingers.append(0)return fingersdef ThumbTOforefinger(self,lmList):point1 = lmList[4][1:3]point2 = lmList[0][1:3]point3 = lmList[8][1:3]return abs(calc_angle(point1, point2, point3))def get_gesture(self,lmList):gesture = ""fingers = self.fingersUp(lmList)if fingers[2] == fingers[3] == fingers[4] == 1:if self.ThumbTOforefinger(lmList) < 10: gesture = "OK"if fingers[1] == fingers[2] == 1 and sum(fingers) == 2: gesture = "Yes"try:if self.cyList[4] == max(self.cyList): gesture = "Thumb_down"except Exception as e: print("e: ", e)return gestureclass PoseDetector:def __init__(self, mode=False, smooth=True, detectionCon=0.5, trackCon=0.5):self.mpPose = mp.solutions.poseself.mpDraw = mp.solutions.drawing_utilsself.pose = self.mpPose.Pose(static_image_mode=mode,smooth_landmarks=smooth,min_detection_confidence=detectionCon,min_tracking_confidence=trackCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=6)self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)def pubPosePoint(self, frame, draw=True):pointArray = []img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.pose.process(img_RGB)if self.results.pose_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.pose_landmarks.landmark):h, w, c = frame.shapepointArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, pointArrayclass Holistic:def __init__(self, staticMode=False, landmarks=True, detectionCon=0.5, trackingCon=0.5):self.mpHolistic = mp.solutions.holisticself.mpFaceMesh = mp.solutions.face_meshself.mpHands = mp.solutions.handsself.mpPose = mp.solutions.poseself.mpDraw = mp.solutions.drawing_utilsself.mpholistic = self.mpHolistic.Holistic(static_image_mode=staticMode,smooth_landmarks=landmarks,min_detection_confidence=detectionCon,min_tracking_confidence=trackingCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=3)self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)def findHolistic(self, frame, draw=True):poseptArray = []lhandptArray = []rhandptArray = []h, w, c = frame.shapeimg_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.mpholistic.process(img_RGB)if self.results.pose_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.pose_landmarks.landmark):poseptArray.append([id, lm.x * w, lm.y * h, lm.z])if self.results.left_hand_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.left_hand_landmarks, self.mpHands.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.left_hand_landmarks.landmark):lhandptArray.append([id, lm.x * w, lm.y * h, lm.z])if self.results.right_hand_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.right_hand_landmarks, self.mpHands.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.right_hand_landmarks.landmark):rhandptArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, poseptArray, lhandptArray, rhandptArrayclass FaceMesh:def __init__(self, staticMode=False, maxFaces=1, minDetectionCon=0.5, minTrackingCon=0.5):self.mpDraw = mp.solutions.drawing_utilsself.mpFaceMesh = mp.solutions.face_meshself.faceMesh = self.mpFaceMesh.FaceMesh(static_image_mode=staticMode,max_num_faces=maxFaces,min_detection_confidence=minDetectionCon,min_tracking_confidence=minTrackingCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=3)self.drawSpec = self.mpDraw.DrawingSpec(color=(0, 255, 0), thickness=1, circle_radius=1)def pubFaceMeshPoint(self, frame, draw=True):pointArray = []h, w, c = frame.shapeimgRGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.faceMesh.process(imgRGB)if self.results.multi_face_landmarks:for i in range(len(self.results.multi_face_landmarks)):if draw: self.mpDraw.draw_landmarks(frame, self.results.multi_face_landmarks[i], self.mpFaceMesh.FACE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.multi_face_landmarks[i].landmark):pointArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, pointArrayclass Media_ROS(Node):def __init__(self):super().__init__("mediapipe")#rclpy.on_shutdowm(self.cancel)self.Joy_active = Trueself.pub_cmdVel = self.create_publisher(Twist,"/cmd_vel",1)self.pub_Buzzer = self.create_publisher(UInt16,"/beep",1)self.pub_img = self.create_publisher(Image,'/image',500)def JoyStateCallback(self, msg):if not isinstance(msg, Bool): returnself.Joy_active = not self.Joy_activeself.pub_vel(0, 0)def pub_vel(self, x, y, z):twist = Twist()twist.linear.x = xtwist.linear.y = ytwist.angular.z = zself.pub_cmdVel.publish(twist)def pub_buzzer(self, status):self.pub_Buzzer.publish(status)def RobotBuzzer(self):self.pub_buzzer(True)sleep(1)self.pub_buzzer(False)sleep(1)self.pub_buzzer(False)for i in range(2):self.pub_vel(0, 0)sleep(0.1)# def pub_arm(self, joints, id=6, angle=180, runtime=500):#     arm_joint = ArmJoint()#     arm_joint.id = id#     arm_joint.angle = angle#     arm_joint.run_time = runtime#     if len(joints) != 0: arm_joint.joints = joints#     else: arm_joint.joints = []#     self.pubPoint.publish(arm_joint)#     # rospy.loginfo(arm_joint)def pub_imgMsg(self, frame):pic_base64 = base64.b64encode(frame)image = Image()size = frame.shapeimage.height = size[0]image.width = size[1]#image.channels = size[2]image.data = pic_base64self.pub_img.publish(image)def cancel(self):self.pub_cmdVel.publish(Twist())self.pub_cmdVel.unregister()self.pub_img.unregister()self.pub_Buzzer.unregister()#self.pubPoint.unregister()class SinglePID:def __init__(self, P=0.1, I=0.0, D=0.1):self.Kp = Pself.Ki = Iself.Kd = DImage_Msgprint("init_pid: ", P, I, D)self.pid_reset()def Set_pid(self, P, I, D):self.Kp = Pself.Ki = Iself.Kd = Dprint("set_pid: ", P, I, D)self.pid_reset()def pid_compute(self, target, current):self.error = target - currentself.intergral += self.errorself.derivative = self.error - self.prevErrorresult = self.Kp * self.error + self.Ki * self.intergral + self.Kd * self.derivativeself.prevError = self.errorreturn resultdef pid_reset(self):self.error = 0self.intergral = 0self.derivative = 0self.prevError = 0

新建HandCtrl.py ,这是亚博源文件,有bug,跑小车的自己调一下吧

#!/usr/bin/env python3
# encoding: utf-8
import threading
import cv2 as cv
import numpy as np
from yahboom_esp32ai_car.media_library import *
import  time
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int32, Bool,UInt16
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImagefrom rclpy.time import Time
import datetimeclass PoseCtrlArm(Node):def __init__(self,name):super().__init__(name)self.pub_Servo1 = self.create_publisher(Int32,"servo_s1" , 10)self.pub_Servo2 = self.create_publisher(Int32,"servo_s2" , 10)self.x = 0self.y = 45servo1_angle = Int32()servo2_angle = Int32()servo1_angle.data = self.xservo2_angle.data = self.yself.car_status = Trueself.stop_status = 0self.locking = Falseself.pose_detector = Holistic()self.hand_detector = HandDetector()self.pTime = self.index = 0self.media_ros = Media_ROS()self.event = threading.Event()self.event.set()#确保角度正常处于中间for i in range(10):self.pub_Servo1.publish(servo1_angle)self.pub_Servo2.publish(servo2_angle)time.sleep(0.1)def process(self, frame):#frame = cv.flip(frame, 1)if self.media_ros.Joy_active:frame, lmList, _ = self.hand_detector.findHands(frame)#检测手if len(lmList) != 0:threading.Thread(target=self.hand_threading, args=(lmList,)).start()else:self.media_ros.pub_vel(0.0, 0.0,0.0)self.media_ros.pub_imgMsg(frame)return framedef hand_threading(self, lmList):if self.event.is_set():self.event.clear()self.stop_status = 0self.index = 0fingers = self.hand_detector.fingersUp(lmList)#检测手指print("fingers: ", fingers)if sum(fingers) == 5: #前进self.media_ros.pub_vel(0.3, 0.0,0.0)time.sleep(0.5)elif sum(fingers) == 0: #后退self.media_ros.pub_vel(-0.3, 0.0,0.0)time.sleep(0.5)elif sum(fingers) == 1 and fingers[1] == 1: #左self.media_ros.pub_vel(0.0, 0.0, 0.5)time.sleep(0.5)elif sum(fingers) == 2 and fingers[1] == 1 and fingers[2] == 1: #右self.media_ros.pub_vel(0.0, 0.0, -0.5)time.sleep(0.5)else:self.media_ros.pub_vel(0.0, 0.0, 0.0)self.event.set()class MY_Picture(Node):def __init__(self, name):super().__init__(name)self.bridge = CvBridge()self.sub_img = self.create_subscription(CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像self.pose_ctrl_arm = PoseCtrlArm('posectrlarm')self.last_stamp = Noneself.new_seconds = 0self.fps_seconds = 1def handleTopic(self, msg):self.last_stamp = msg.header.stamp  if self.last_stamp:total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanosecondsdelta = datetime.timedelta(seconds=total_secs * 1e-9)seconds = delta.total_seconds()*100if self.new_seconds != 0:self.fps_seconds = seconds - self.new_secondsself.new_seconds = seconds#保留这次的值start = time.time()frame = self.bridge.compressed_imgmsg_to_cv2(msg)frame = cv.resize(frame, (640, 480))cv.waitKey(1)frame = self.pose_ctrl_arm.process(frame)end = time.time()fps = 1 / ((end - start)+self.fps_seconds)text = "FPS : " + str(int(fps))cv.putText(frame, text, (10,20), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)if len(binary) != 0: cv.imshow('frame', ManyImgs(1, ([frame, binary])))else:cv.imshow('frame', frame)def main():rclpy.init() esp_img = MY_Picture("My_Picture")print("start it")try:rclpy.spin(esp_img)except KeyboardInterrupt:passfinally:esp_img.destroy_node()rclpy.shutdown()

可以看出首先检测到手,得到lmList的值,然后传入fingersUp函数。fingersUp函数是用来检测哪些手指是伸直的,伸直的手指的值为1,这里的具体代码也可以看media_library,py函数,里边有详细的解释,其实就是判断手指关节的xy值来判断时候伸直了。sum(fingers)函数是来计算伸直手指的数量,fingers[]可以用来枚举手指,比如说食指,咱们就是用fingers[1]来表示。

流程图如下

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/pingmian/69571.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

在人工智能领域 ⊕、⊗和 ⊙ 符号是什么含义?

我们经常在论文中看到 ⊕、⊗和 ⊙ 符号&#xff0c;那么有下面两个问题&#xff1a; 这三个符号有什么作用呢&#xff1f; 如何在论文中正确使用这三个数学符号 1. 两种符号的解释 1.1 逐元素相加&#xff1a;⊕ ⊕ 在论文中表示逐元素相加&#xff0c;如果用两个矩阵表示&a…

NineData云原生智能数据管理平台新功能发布|2025年1月版

本月发布 14 项更新&#xff0c;其中重点发布 6 项、功能优化 7 项、安全性更新 1 项。 重点发布 数据库 Devops - 数据导出功能增强 支持 AWS ElastiCache 数据源&#xff1a;现已支持通过 SQL 查询语句或直接通过库表导出 AWS ElastiCache 数据&#xff0c;方便用户快速提取…

蓝桥与力扣刷题(226 翻转二叉树)

题目&#xff1a;给你一棵二叉树的根节点 root &#xff0c;翻转这棵二叉树&#xff0c;并返回其根节点。 示例 1&#xff1a; 输入&#xff1a;root [4,2,7,1,3,6,9] 输出&#xff1a;[4,7,2,9,6,3,1]示例 2&#xff1a; 输入&#xff1a;root [2,1,3] 输出&#xff1a;[2,…

C# OpenCV机器视觉:OSTU算法实现背景差分的自适应分割

在一个热闹的科技公司里&#xff0c;阿强是一个负责图像分析的员工。他的日常工作就是从各种复杂的图像中提取出有用的信息&#xff0c;可这可不是一件轻松的事情哦 最近&#xff0c;阿强接到了一个艰巨的任务&#xff1a;要从一堆嘈杂的监控图像中分离出运动的物体&#xff0c…

amis组件crud使用踩坑

crud注意 过滤条件参数同步地址栏 默认 CRUD 会将过滤条件参数同步至浏览器地址栏中&#xff0c;比如搜索条件、当前页数&#xff0c;这也做的目的是刷新页面的时候还能进入之前的分页。 但也会导致地址栏中的参数数据合并到顶层的数据链中&#xff0c;例如&#xff1a;自动…

解决 ollama._types.ResponseError 问题

原因 在对问题进行分析后&#xff0c;我认为原因是之前为了在服务器上下载模型&#xff0c;我设置了启动时自动配置的网络代理。然而&#xff0c;ollama在运行时采用了该代理配置&#xff0c;而不是默认的API URL&#xff08;“http://localhost:11434”&#xff09;。因此&am…

用户认证实验

一&#xff0c;拓扑图: 第一步&#xff1a;先开启防火墙 第二步&#xff1a;sw2配置&#xff1a; [sw2] vlan batch 10 20 interface GigabitEthernet0/0/2 port link-type access port default vlan 10 interface GigabitEthernet0/0/3 port link-type access port defau…

活动预告 | 为 AI 新纪元做好准备:助力安全的业务转型

课程介绍 随着现代办公模式的不断演变和 AI 技术的迅速发展&#xff0c;企业在享受效率提升的同时&#xff0c;也面临着信息安全与数据保护的严峻挑战。在利用 AI 技术释放业务潜力的同时&#xff0c;如何确保数据质量与安全已成为企业发展的关键议题。 在本次线上课程中&…

【再谈设计模式】中介者模式 - 协调对象间交互的枢纽

一、引言 在软件工程&#xff0c;软件开发过程中&#xff0c;复杂的软件系统&#xff0c;对象之间的交互往往错综复杂。当众多对象相互依赖、频繁通信时&#xff0c;系统的耦合度会急剧上升&#xff0c;导致代码难以维护、扩展和理解。就像在一个大型社交聚会中&#xff0c;如果…

网络工程师 (29)CSMA/CD协议

前言 CSMA/CD协议&#xff0c;即载波监听多路访问/碰撞检测&#xff08;Carrier Sense Multiple Access with Collision Detection&#xff09;协议&#xff0c;是一种在计算机网络中&#xff0c;特别是在以太网环境下&#xff0c;用于管理多个设备共享同一物理传输介质的重要…

软件项目验收测试有哪些类型?

在信息技术行业&#xff0c;软件项目的成功不仅依赖于开发能力&#xff0c;更在于准确的验收测试。验收测试是软件开发生命周期中的重要一环。其主要目的是验证软件系统是否符合用户需求和预期。在这一阶段&#xff0c;最终用户能够直观地判断软件是否满足其业务需求。 软件项…

Python截图轻量化工具

一、兼容局限性 这是用Python做的截图工具&#xff0c;不过由于使用了ctypes调用了Windows的API, 同时访问了Windows中"C:/Windows/Cursors/"中的.cur光标样式文件, 这个工具只适用于Windows环境&#xff1b; 如果要提升其跨平台性的话&#xff0c;需要考虑替换cty…

【Deepseek私有化部署】解决 Anything LLM 上传文档一直转圈上传失败问题

这里写自定义目录标题 一、问题描述二、原因分析&#xff08;一&#xff09;Embedder 在 Anything LLM 中的核心作用&#xff08;二&#xff09;默认配置与 Deepseek 的适配问题&#xff08;三&#xff09;未正确配置 nomic - embed - text 引发的异常 三、解决途径&#xff08…

神经网络|(九)概率论基础知识-泊松分布及python仿真

【1】引言 在前序学习进程中&#xff0c;我们已经知晓二项分布是多重伯努利分布&#xff0c;二伯努利分布对应的是可以无限重复、结果只有两种可能的随机试验。 相关文章链接为&#xff1a; 神经网络|(八)概率论基础知识-二项分布及python仿真-CSDN博客 上述文章还调用nump…

《从0到1CTFer成长之路》逆向工程个人笔记--静态分析

上一篇文章&#xff1a;《从0到1CTFer成长之路》逆向工程个人笔记--逆向工程基础 IDA 使用入门 加载文件 打开 IDA&#xff0c;点击 GO&#xff0c;即可把程序拖拽到 IDA 中 IDA 分为 32bit 和 64bit 两种架构&#xff0c;选择哪种结构&#xff0c;可以在把程序拖拽到 IDA 后…

【机器学习】训练(Training)、验证(Validation)和测试(Testing)

机器学习中训练(Training)、验证(Validation)和测试(Testing)这三个阶段的作用和关系。 1. 训练阶段 (Training) - 使用训练集数据来训练模型 - 模型通过学习训练数据的特征和模式来调整其内部参数 - 这个阶段模型会不断优化以减少预测误差 - 通常使用最大的数据集比例&…

解锁 DeepSeek 模型高效部署密码:蓝耘平台深度剖析与实战应用

&#x1f496;亲爱的朋友们&#xff0c;热烈欢迎来到 青云交的博客&#xff01;能与诸位在此相逢&#xff0c;我倍感荣幸。在这飞速更迭的时代&#xff0c;我们都渴望一方心灵净土&#xff0c;而 我的博客 正是这样温暖的所在。这里为你呈上趣味与实用兼具的知识&#xff0c;也…

IDEA升级出现问题Failed to prepare an update Temp directory inside installation

IDEA升级出现问题"Failed to prepare an update Temp directory inside installation…" 问题来源&#xff1a; 之前修改了IDEA的默认配置文件路径&#xff0c;然后升级新版本时就无法升级&#xff0c;提示"Failed to prepare an update Temp directory insid…

查询已经运行的 Docker 容器启动命令

一、导语 使用 get_command_4_run_container 查询 docker 容器的启动命令 获取镜像 docker pull cucker/get_command_4_run_container 查看容器命令 docker run --rm -v /var/run/docker.sock:/var/run/docker.sock cucker/get_command_4_run_container 容器id或容器名 …

【C++高并发服务器WebServer】-14:Select详解及实现

本文目录 一、BIO模型二、非阻塞NIO忙轮询三、IO多路复用四、Select()多路复用实现 明确一下IO多路复用的概念&#xff1a;IO多路复用能够使得程序同时监听多个文件描述符&#xff08;文件描述符fd对应的是内核读写缓冲区&#xff09;&#xff0c;能够提升程序的性能。 Linux下…