python姿态识别+Tensflow1.12+pyqt5+UI

python姿态识别+Tensflow1.12+pyqt5+UI

import datetimefrom PyQt5.QtCore import QCoreApplication
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
from vedio import vediofrom HumanPoseRec import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets'''
Example of usage:(1) Test on video file:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type video \--data_path data_test/exercise.avi \--output_folder output(2) Test on a folder of images:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type folder \--data_path data_test/apple/ \--output_folder output(3) Test on web camera:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type webcam \--data_path 0 \--output_folder output'''SRC_DATA_TYPE = "webcam"
SRC_DATA_PATH = "0"
SRC_MODEL_PATH = r'D:\pysave2023\Action-Recognition\model\trained_classifier.pickle'
output_folder = "output/"
ith_img = -1
predict_label = {}
if True:  # Include project pathimport sysimport osROOT = os.path.dirname(os.path.abspath(__file__)) + "/../"CURR_PATH = os.path.dirname(os.path.abspath(__file__)) + "/"sys.path.append(ROOT)import utils.lib_images_io as lib_images_ioimport utils.lib_plot as lib_plotimport utils.lib_commons as lib_commonsfrom utils.lib_openpose import SkeletonDetectorfrom utils.lib_tracker import Trackerfrom utils.lib_tracker import Trackerfrom utils.lib_classifier import ClassifierOnlineTestfrom utils.lib_classifier import *  # Import all sklearn related libraries# -- Command-line inputdef get_dst_folder_name(src_data_type, src_data_path):global folder_nametry:if src_data_type == "video":  # /root/data/video.avi --> videofolder_name = os.path.basename(src_data_path).split(".")[-2]elif src_data_type == "folder":  # /root/data/video/ --> videofolder_name = src_data_path.rstrip("/").split("/")[-1]elif src_data_type == "webcam":# month-day-hour-minute-seconds, e.g.: 02-26-15-51-12folder_name = lib_commons.get_time_string()except:passreturn folder_nameclass MultiPersonClassifier(object):''' This is a wrapper around ClassifierOnlineTestfor recognizing actions of multiple people.'''def __init__(self, model_path, classes):self.dict_id2clf = {}  # human id -> classifier of this person# Define a function for creating classifier for new people.self._create_classifier = lambda human_id: ClassifierOnlineTest(model_path, classes, WINDOW_SIZE, human_id)def classify(self, dict_id2skeleton):''' Classify the action type of each skeleton in dict_id2skeleton '''# Clear people not in viewold_ids = set(self.dict_id2clf)cur_ids = set(dict_id2skeleton)humans_not_in_view = list(old_ids - cur_ids)for human in humans_not_in_view:del self.dict_id2clf[human]# Predict each person's actionid2label = {}for id, skeleton in dict_id2skeleton.items():if id not in self.dict_id2clf:  # add this new personself.dict_id2clf[id] = self._create_classifier(id)classifier = self.dict_id2clf[id]id2label[id] = classifier.predict(skeleton)  # predict label# print("\n\nPredicting label for human{}".format(id))# print("  skeleton: {}".format(skeleton))# print("  label: {}".format(id2label[id]))return id2labeldef get_classifier(self, id):''' Get the classifier based on the person id.Arguments:id {int or "min"}'''if len(self.dict_id2clf) == 0:return Noneif id == 'min':id = min(self.dict_id2clf.keys())return self.dict_id2clf[id]def remove_skeletons_with_few_joints(skeletons):''' Remove bad skeletons before sending to the tracker '''good_skeletons = []for skeleton in skeletons:px = skeleton[2:2 + 13 * 2:2]py = skeleton[3:2 + 13 * 2:2]num_valid_joints = len([x for x in px if x != 0])num_leg_joints = len([x for x in px[-6:] if x != 0])total_size = max(py) - min(py)# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# IF JOINTS ARE MISSING, TRY CHANGING THESE VALUES:# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!if num_valid_joints >= 5 and total_size >= 0.1 and num_leg_joints >= 0:# add this skeleton only when all requirements are satisfiedgood_skeletons.append(skeleton)return good_skeletonscfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s5_test.py"]CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]# Action recognition: number of frames used to extract features.
WINDOW_SIZE = int(cfg_all["features"]["window_size"])
dict_id2label = {}
scale_h = 0
# Output folder
DST_FOLDER_NAME = get_dst_folder_name(SRC_DATA_TYPE, SRC_DATA_PATH)
DST_FOLDER = output_folder + "/" + DST_FOLDER_NAME + "/"
DST_SKELETON_FOLDER_NAME = cfg["output"]["skeleton_folder_name"]
DST_VIDEO_NAME = cfg["output"]["video_name"]
# framerate of output video.avi
DST_VIDEO_FPS = float(cfg["output"]["video_fps"])# Video setttings# If data_type is webcam, set the max frame rate.
SRC_WEBCAM_MAX_FPS = float(cfg["settings"]["source"]["webcam_max_framerate"])# If data_type is video, set the sampling interval.
# For example, if it's 3, then the video will be read 3 times faster.
SRC_VIDEO_SAMPLE_INTERVAL = int(cfg["settings"]["source"]["video_sample_interval"])# Openpose settings
OPENPOSE_MODEL = cfg["settings"]["openpose"]["model"]
OPENPOSE_IMG_SIZE = cfg["settings"]["openpose"]["img_size"]# Display settings
img_disp_desired_rows = int(cfg["settings"]["display"]["desired_rows"])
skeleton_detector = SkeletonDetector(OPENPOSE_MODEL, OPENPOSE_IMG_SIZE)multiperson_tracker = Tracker()
multiperson_classifier = MultiPersonClassifier(SRC_MODEL_PATH, CLASSES)os.makedirs(DST_FOLDER, exist_ok=True)
os.makedirs(DST_FOLDER + DST_SKELETON_FOLDER_NAME, exist_ok=True)# video writer
video_writer = lib_images_io.VideoWriter(DST_FOLDER + DST_VIDEO_NAME, DST_VIDEO_FPS)
video_writer2 = lib_images_io.VideoWriter(DST_FOLDER + DST_VIDEO_NAME, DST_VIDEO_FPS)
sssr = []def draw_result_img(img_disp, ith_img, humans, dict_id2skeleton, skeleton_detector, multiperson_classifier):''' Draw skeletons, labels, and prediction scores onto image for display '''global sssr# Resize to a proper size for displayr, c = img_disp.shape[0:2]desired_cols = int(1.0 * c * (img_disp_desired_rows / r))img_disp = cv2.resize(img_disp,dsize=(desired_cols, img_disp_desired_rows))# Draw all people's skeletonskeleton_detector.draw(img_disp, humans)# Draw bounding box and label of each personif len(dict_id2skeleton):for id, label in dict_id2label.items():skeleton = dict_id2skeleton[id]# scale the y data back to originalskeleton[1::2] = skeleton[1::2] / scale_h# print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")lib_plot.draw_action_result(img_disp, id, skeleton, label)# Add blank to the left for displaying prediction scores of each class# img_disp = lib_plot.add_white_region_to_left_of_image(img_disp)cv2.putText(img_disp, "Frame:" + str(ith_img),(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,color=(0, 0, 0), thickness=2)# Draw predicting score for only 1 personif len(dict_id2skeleton):classifier_of_a_person = multiperson_classifier.get_classifier(id='min')# 好像是卡在了这里sssr = classifier_of_a_person.draw_scores_onto_image(img_disp)print('-------------------------------------------------')print(sssr)return img_dispdef get_the_skeleton_data_to_save_to_disk(dict_id2skeleton):'''In each image, for each skeleton, save the:human_id, label, and the skeleton positions of length 18*2.So the total length per row is 2+36=38'''skels_to_save = []for human_id in dict_id2skeleton.keys():label = dict_id2label[human_id]skeleton = dict_id2skeleton[human_id]skels_to_save.append([[human_id, label] + skeleton.tolist()])return skels_to_saveclass Main(Ui_MainWindow, QMainWindow):def __init__(self):super().__init__()self.setupUi(self)self.vedio = vedio()self.timer_camera = QtCore.QTimer()self.timer_video = QtCore.QTimer()# 定时器函数self.timer_camera.timeout.connect(self.show_camera)self.timer_video.timeout.connect(self.show_video)self.button()self.label.setPixmap(QtGui.QPixmap('img.png').scaled(self.label.width(), self.label.height()))self.pushButton.clicked.connect(self.playVedio)def playVedio(self):self.vedio.show()self.vedio.slotStart()def setscore(self):try:self.actionname1.setText(str(float(sssr[0]) * 100))self.actionname2.setText(str(float(sssr[1]) * 100))self.actionname3.setText(str(float(sssr[2]) * 100))self.actionname4.setText(str(float(sssr[3]) * 100))self.actionname5.setText(str(float(sssr[4]) * 100))self.actionname6.setText(str(float(sssr[5]) * 100))self.actionname7.setText(str(float(sssr[6]) * 100))self.actionname8.setText(str(float(sssr[7]) * 100))self.actionname9.setText(str(float(sssr[8]) * 100))except:passdef camera_init(self):# 打开设置摄像头对象videoSourceIndex = 0self.cap1 = cv2.VideoCapture(0, cv2.CAP_DSHOW + videoSourceIndex)#self.cap = cv2.VideoCapture(0)self.CAM_NUM = 0# 显示摄像头界面def camera(self):if not self.timer_camera.isActive():flag = self.cap.open(self.CAM_NUM)if not flag:msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",buttons=QtWidgets.QMessageBox.Ok,defaultButton=QtWidgets.QMessageBox.Ok)else:self.timer_camera.start(50)else:self.timer_camera.stop()self.cap.release()def show_camera(self):global dict_id2label, scale_h, ith_imgtry:# -- Read image# 这里读视频帧进来flag, img = self.cap.read()ith_img += 1img_disp = img.copy()print(f"\nProcessing {ith_img}th image ...")# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Draw# 这里得到处理完后的图像img_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)if len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])# -- Display image, and write to video.avi# 这里把图像img_disp显示到界面,感觉是这的问题,因为img_disp一直都是有数据的show = cv2.cvtColor(img_disp, cv2.COLOR_BGR2RGB)showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],QtGui.QImage.Format_RGB888)self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))self.setscore()video_writer.write(img_disp)# -- Get skeleton data and save to fileskels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)finally:pass# video_writer.stop()# print("Program ends")def show_video(self):global dict_id2label, scale_h, ith_imgif images_loader.has_image():ith_img += 1try:# -- Read imageimg = images_loader.read_image()img_disp = img.copy()print(f"\nProcessing {ith_img}th image ...")# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Drawimg_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)# Print label of a personif len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])# -- Display image, and write to video.avishow = cv2.cvtColor(img_disp, cv2.COLOR_BGR2RGB)showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],QtGui.QImage.Format_RGB888)self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))self.setscore()video_writer2.write(img_disp)# -- Get skeleton data and save to fileskels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)finally:passdef button(self):self.action_3.triggered.connect(self.videoMode)self.action_4.triggered.connect(self.cameraMode)self.action_2.triggered.connect(self.reset)self.action_5.triggered.connect(self.photoMode)self.action.triggered.connect(self.save)self.actionexit.triggered.connect(QCoreApplication.instance().quit)def save(self):video_writer.stop()video_writer2.stop()print("Program ends")passdef videoMode(self):global ith_img, images_loader, SRC_DATA_typeith_img = -1try:self.timer_camera.stop()self.cap.release()except:pass# Output folderSRC_DATA_type = "video"SRC_DATA_path = QFileDialog.getOpenFileNames(self, '选择动作视频', '', '')[0]try:DST_FOLDER_name = get_dst_folder_name(SRC_DATA_type, str(SRC_DATA_path[0]))DST_folder = output_folder + "/" + DST_FOLDER_name + "/"DST_SKELETON_FOLDER_name = cfg["output"]["skeleton_folder_name"]os.makedirs(DST_folder, exist_ok=True)os.makedirs(DST_folder + DST_SKELETON_FOLDER_name, exist_ok=True)images_loader = lib_images_io.ReadFromVideo(SRC_DATA_path[0],sample_interval=SRC_VIDEO_SAMPLE_INTERVAL)self.timer_video.start(30)except:passdef cameraMode(self):global ith_imgith_img = -1try:self.timer_video.stop()except:passself.camera_init()self.camera()def photoMode(self):global ith_img, dict_id2label, scale_hith_img = 0dict_id2label = {}scale_h = 0try:self.timer_camera.stop()self.cap.release()except:passtry:self.timer_video.stop()except:passtry:self.filename = QFileDialog.getOpenFileNames(self, "打开图片", "./","*.jpg;;*.png;;All Files(*)")[0][0]print(self.filename)img = cv2.imread(self.filename)img_disp = img.copy()# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Drawimg_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)# Print label of a personif len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])cv2.imwrite('photoMode.png', img_disp)photo = QtGui.QPixmap('photoMode.png').scaled(self.label.width(), self.label.height())self.label.setPixmap(photo)self.setscore()except:passdef reset(self):global ith_img, dict_id2label, scale_hith_img = 0dict_id2label = {}scale_h = 0try:self.timer_camera.stop()self.cap.release()except:passtry:self.timer_video.stop()except:passself.label.setPixmap(QtGui.QPixmap('img.png'))if __name__ == "__main__":app = QApplication(sys.argv)win = Main()win.show()sys.exit(app.exec_())

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/848636.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

如何使用Python提取Excel中固定单元格的内容

正文共:1234 字 31 图,预估阅读时间:1 分钟 之前转载了颜总的Python环境搭建文章(Python办公自动化—环境搭建),很久过去了,颜总也没有再更新。现在我有一个需求,就是我手上有大量格…

Android Studio 下载 | 安装 | 下载 SDK | 中文插件 | 卸载

文章目录 下载安装包安装下载 SDK中文插件卸载 下载安装包 下载 Android Studio 和应用工具 - Android 开发者 | Android Developers 点击下载 Android Studio。 在协议最下方勾选同意(第一次用最好还是看一眼),再点击 Dowload Android Stu…

JS 转化中文首字母拼音

引入 chinapy.js文件下载地址 链接:百度云连接 提取码:zrjk import { makePy } from /utils/chinapy; let chinaPyName makePy(黄飞鸿) console.log(chinaPyName) // hfh

css文字超出元素省略,单行、多行省略

通用CSS .box {width: 500px;border: 1px solid red;padding: 10px;line-height: 24px;} 1.单行省略 .singe-line {text-overflow: ellipsis;overflow: hidden;word-break: break-all;white-space: nowrap;}<p>单行省略</p><div class"singe-line box&qu…

选择核密度估计带宽LSCV与DPI方法对比

最小二乘交叉验证&#xff08;Least Squares Cross-Validation, LSCV&#xff09;和直接插入法&#xff08;Direct Plug-in, DPI&#xff09;是两种常用于选择核密度估计带宽的方法。 原理与定义 最小二乘交叉验证&#xff08;LSCV&#xff09; 原理&#xff1a;通过交叉验证…

二叉树的中序遍历-力扣

二叉树的中序遍历&#xff0c;指首先遍历左节点&#xff0c;然后遍历中间节点&#xff0c;最后遍历右节点&#xff0c;按照这个顺序进行递归即可。 /*** Definition for a binary tree node.* struct TreeNode {* int val;* TreeNode *left;* TreeNode *right;* …

【前端】JS示例:实现在一个数组(arrSstd)中查找基于uapid的值,并根据该值是否存在来决定是更新roleid还是添加新数据

要实现在一个数组&#xff08;arrSstd&#xff09;中查找基于uapid的值&#xff0c;并根据该值是否存在来决定是更新roleid还是添加新数据&#xff0c;你可以使用Array.prototype.find或Array.prototype.findIndex方法来检查是否存在具有特定uapid的对象。如果找到&#xff0c;…

GraphQL(5):使用Mutations修改数据

GraphQL中的使用Mutation查询&#xff0c;下面介绍详细操作&#xff0c;新建一个mutation.js文件。 1 定义schema 1.1 定义模型对象 &#xff08;1&#xff09;定义输入模型 input AccountInput {name: Stringage: Intsex: Stringdepartment: String} &#xff08;2&#xf…

攻防世界---misc---Ditf

1、题目描述&#xff0c;下载附件&#xff0c;是一张图片 2、打开图片&#xff0c;发现宽高不一样&#xff0c;猜测可能是需要修改宽高&#xff0c;用winhex分析&#xff0c;没有发现奇怪的地方 3、用kali中的binwalk命令&#xff0c;分析图片&#xff0c;发现有个压缩包&#…

基于SpringBoot+Vue二手家电管理平台设计和实现(源码+LW+调试文档+讲解等)

&#x1f497;博主介绍&#xff1a;✌全网粉丝1W,CSDN作者、博客专家、全栈领域优质创作者&#xff0c;博客之星、平台优质作者、专注于Java、小程序技术领域和毕业项目实战✌&#x1f497; &#x1f31f;文末获取源码数据库&#x1f31f; 感兴趣的可以先收藏起来&#xff0c;还…

【Python基础知识】安装第三方库的两种方式——在线与离线

在线 电脑有网络的情况下&#xff0c;直接换镜像&#xff0c;然后pip install就行。 离线 我遇到了一种情况是&#xff0c;学校里的服务器连不上网&#xff0c;导致没法在线pip install&#xff0c;所以只能通过离线的方式下载。步骤如下&#xff1a; 找一台有网络的电脑&a…

va_start和va_end使用介绍

一 概述 采用C语言编程的时候&#xff0c;函数中形式参数的数目通常是确定的&#xff0c;在调用时要依次给出与形式参数对应的所有实际参数。但在某些情况下希望函数的参数个数可以根据需要确定。典型的例子有大家熟悉的函数printf()、scanf()和系统调用execl()等。那么它们是怎…

Python语言分为:深度解析Python的四大维度、五大特色、六大应用场景及七大发展趋势

Python语言分为&#xff1a;深度解析Python的四大维度、五大特色、六大应用场景及七大发展趋势 Python&#xff0c;作为当今最受欢迎的编程语言之一&#xff0c;其广泛的应用领域和独特的语言特性使其成为编程领域的明星。本文将从四个方面、五个方面、六个方面和七个方面&…

基于jeecgboot-vue3的Flowable流程-待办任务(一)

因为这个项目license问题无法开源&#xff0c;更多技术支持与服务请加入我的知识星球。 1、ToDo.data.ts的数据信息如下 import {BasicColumn} from //components/Table; import {FormSchema} from //components/Table; import { rules} from //utils/helper/validator; impor…

Hyper-V 安装 CentOS Stream 9【图文教程】

文章目录 准备工作:选择 CentOS 版本第 1 步:下载官方下载阿里云镜像下载第 2 步:创建虚拟机第 3 步:安装虚拟机修改安全配置安装虚拟机配置网路第 4 步:使用 SSH 连接查看 IP 地址使用 SSH 连接参考🚀 目标:通过 Hyper-V 在本地搭建一个 CentOS 虚拟机。 准备工作:选…

c# iText使用

引入包 用nuget安装itext和itext.bouncy-castle-adapter包&#xff1a; 创建pdf string path "a.pdf"; PdfWriter writer new PdfWriter(path); PdfDocument pdfDoc new PdfDocument(writer); var docnew Document(pdfDoc); Paragraph p new Paragraph(&quo…

Python03:python代码初体验2

1、变量命名规范 1&#xff09;字母&#xff08;Unicode字符&#xff09;、数字、下划线&#xff0c;不能使用特殊字符&#xff0c;数字不能开头 2&#xff09;变量名是区分大小写的&#xff08;大小写敏感&#xff0c;x和X是两个不同的变量名&#xff09; 3&#xff09;不能使…

【打印功能】js简单实现表格样式的数据打印,按样式打印出来

效果图 代码部分&#xff0c;简单三步 1&#xff0c;html部分&#xff1a;写一个表格&#xff0c;然后数据填进去 <div id"printable-area" v-show"false"><div><div style"text-align: center;height: 40px;line-height: 40px;font…

语音深度鉴伪识别项目实战:基于深度学习的语音深度鉴伪识别算法模型(三)音频去噪算法大全+Python源码应用

前言 深度学习技术在当今技术市场上面尚有余力和开发空间的&#xff0c;主流落地领域主要有&#xff1a;视觉&#xff0c;听觉&#xff0c;AIGC这三大板块。 目前视觉板块的框架和主流技术在我上一篇基于Yolov7-LPRNet的动态车牌目标识别算法模型已有较为详细的解说。与AIGC相…

IDEA快捷生成左边变量,取消final关键字

一&#xff1a;问题 如图&#xff1a;每次使用CtrlALtV或.var自动生成左边变量都会有final关键字&#xff0c;然后需要删除很麻烦 二&#xff1a;解决 以下步骤设置取消生成final关键字 1.先 生成变量&#xff1a;我是使用.var自动生成左边变量 2.使用快捷键ALtshiftO或者点…