[AI智能摄像头]RV1126部署yolov5并加速

导出onnx模型

yolov5官方地址 git clone https://github.com/ultralytics/yolov5

利用官方命令导出python export.py --weights yolov5n.pt --include onnx

利用代码导出

import os
import sys
os.chdir(sys.path[0])
import onnx
import torch
sys.path.append('..')
from models.common import DetectMultiBackend
from models.experimental import attempt_load
DEVICE='cuda' if torch.cuda.is_available else 'cpu'
def main():"""create model """input = torch.randn(1, 3, 640, 640, requires_grad=False).float().to(torch.device(DEVICE))model = attempt_load('./model/yolov5n.pt', device=DEVICE, inplace=True, fuse=True)  # load FP32 model#model = DetectMultiBackend('./model/yolov5n.pt', data=input)model.to(DEVICE)torch.onnx.export(model,input,'yolov5n_self.onnx', # name of the exported onnx modelexport_params=True,opset_version=12,do_constant_folding=False, input_names=["images"])
if __name__=="__main__":main()

onnx模型测试

import os
import sys
os.chdir(sys.path[0])
import onnxruntime
import torch
import torchvision
import numpy as np
import time
import cv2
sys.path.append('..')
from ultralytics.utils.plotting import Annotator, colorsONNX_MODEL="./yolov5n.onnx"
DEVICE='cuda' if torch.cuda.is_available() else 'cpu'def xywh2xyxy(x):"""Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right."""y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)y[..., 0] = x[..., 0] - x[..., 2] / 2  # top left xy[..., 1] = x[..., 1] - x[..., 3] / 2  # top left yy[..., 2] = x[..., 0] + x[..., 2] / 2  # bottom right xy[..., 3] = x[..., 1] + x[..., 3] / 2  # bottom right yreturn ydef box_iou(box1, box2, eps=1e-7):# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py"""Return intersection-over-union (Jaccard index) of boxes.Both sets of boxes are expected to be in (x1, y1, x2, y2) format.Arguments:box1 (Tensor[N, 4])box2 (Tensor[M, 4])Returns:iou (Tensor[N, M]): the NxM matrix containing the pairwiseIoU values for every element in boxes1 and boxes2"""# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)# IoU = inter / (area1 + area2 - inter)return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)def non_max_suppression(prediction,conf_thres=0.25,iou_thres=0.45,classes=None,agnostic=False,multi_label=False,labels=(),max_det=300,nm=0,  # number of masks
):"""Non-Maximum Suppression (NMS) on inference results to reject overlapping detections.Returns:list of detections, on (n,6) tensor per image [xyxy, conf, cls]"""# Checksassert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"device = prediction.devicemps = "mps" in device.type  # Apple MPSif mps:  # MPS not fully supported yet, convert tensors to CPU before NMSprediction = prediction.cpu()bs = prediction.shape[0]  # batch sizenc = prediction.shape[2] - nm - 5  # number of classesxc = prediction[..., 4] > conf_thres  # candidates# Settings# min_wh = 2  # (pixels) minimum box width and heightmax_wh = 7680  # (pixels) maximum box width and heightmax_nms = 30000  # maximum number of boxes into torchvision.ops.nms()time_limit = 0.5 + 0.05 * bs  # seconds to quit afterredundant = True  # require redundant detectionsmulti_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)merge = False  # use merge-NMSt = time.time()mi = 5 + nc  # mask start indexoutput = [torch.zeros((0, 6 + nm), device=prediction.device)] * bsfor xi, x in enumerate(prediction):  # image index, image inference# Apply constraints# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-heightx = x[xc[xi]]  # confidence# Cat apriori labels if autolabellingif labels and len(labels[xi]):lb = labels[xi]v = torch.zeros((len(lb), nc + nm + 5), device=x.device)v[:, :4] = lb[:, 1:5]  # boxv[:, 4] = 1.0  # confv[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # clsx = torch.cat((x, v), 0)# If none remain process next imageif not x.shape[0]:continue# Compute confx[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf# Box/Maskbox = xywh2xyxy(x[:, :4])  # center_x, center_y, width, height) to (x1, y1, x2, y2)mask = x[:, mi:]  # zero columns if no masks# Detections matrix nx6 (xyxy, conf, cls)if multi_label:i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).Tx = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)else:  # best class onlyconf, j = x[:, 5:mi].max(1, keepdim=True)x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]# Filter by classif classes is not None:x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]# Apply finite constraint# if not torch.isfinite(x).all():#     x = x[torch.isfinite(x).all(1)]# Check shapen = x.shape[0]  # number of boxesif not n:  # no boxescontinuex = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence and remove excess boxes# Batched NMSc = x[:, 5:6] * (0 if agnostic else max_wh)  # classesboxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scoresi = torchvision.ops.nms(boxes, scores, iou_thres)  # NMSi = i[:max_det]  # limit detectionsif merge and (1 < n < 3e3):  # Merge NMS (boxes merged using weighted mean)# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrixweights = iou * scores[None]  # box weightsx[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxesif redundant:i = i[iou.sum(1) > 1]  # require redundancyoutput[xi] = x[i]if mps:output[xi] = output[xi].to(device)if (time.time() - t) > time_limit:break  # time limit exceededreturn outputdef draw_bbox(image, result, color=(0, 0, 255), thickness=2):# img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)image = image.copy()for point in result:x1,y1,x2,y2=pointcv2.rectangle(image, (x1, y1), (x2, y2), color, thickness)return imagedef main():input=torch.load("input.pt").to('cpu')input_array=np.array(input)onnx_model = onnxruntime.InferenceSession(ONNX_MODEL)input_name = onnx_model.get_inputs()[0].nameout = onnx_model.run(None, {input_name:input_array})out_tensor = torch.tensor(out).to(DEVICE)pred = non_max_suppression(out_tensor,0.25,0.45,classes=None,agnostic=False,max_det=1000)# Process predictionsfor i, det in enumerate(pred):  # per imageim0_=cv2.imread('../data/images/bus.jpg')im0=im0_.reshape(1,3,640,640)names=torch.load('name.pt')annotator = Annotator(im0, line_width=3, example=str(names))coord=[]image=im0.reshape(640,640,3)if len(det):# Rescale boxes from img_size to im0 size#det[:, :4] = scale_boxes(im0.shape[2:], det[:, :4], im0.shape).round()# Write resultsfor *xyxy, conf, cls in reversed(det):# Add bbox to imagec = int(cls)  # integer classlabel = f"{names[c]} {conf:.2f}"# 创建两个顶点坐标子数组,并将它们组合成一个列表``coord.append([int(xyxy[0].item()), int(xyxy[1].item()),int(xyxy[2].item()), int(xyxy[3].item())])image=draw_bbox(image,coord)# Stream resultssave_success =cv2.imwrite('result.jpg', image)print(f"save image end {save_success}")if __name__=="__main__":main()

测试结果

板端部署

环境准备

搭建好rknntoolkit以及rknpu环境

大致流程

模型转换

新建export_rknn.py用于将onnx模型转化为rknn模型

import os
import sys
os.chdir(sys.path[0])
import numpy as np
import cv2
from rknn.api import RKNN
import torchvision
import torch
import timeONNX_MODEL = './model/yolov5n.onnx'
RKNN_MODEL = './model/yolov5n.rknn'def main():"""Create RKNN object"""rknn = RKNN()if not os.path.exists(ONNX_MODEL):print('model not exist')exit(-1)"""pre-process config"""print('--> Config model')rknn.config(reorder_channel='0 1 2',mean_values=[[0, 0, 0]],std_values=[[255, 255, 255]],optimization_level=0,target_platform = ['rv1126'],output_optimize=1,quantize_input_node=True)print('done')"""Load ONNX model"""print('--> Loading model')ret = rknn.load_onnx(model=ONNX_MODEL,inputs=['images'],input_size_list = [[3, 640, 640]],outputs=['output0'])if ret != 0:print('Load yolov5 failed!')exit(ret)print('done')"""Build model"""print('--> Building model')#ret = rknn.build(do_quantization=True,dataset='./data/data.txt')ret = rknn.build(do_quantization=False,pre_compile=True)if ret != 0:print('Build yolov5 failed!')exit(ret)print('done')"""Export RKNN model"""print('--> Export RKNN model')ret = rknn.export_rknn(RKNN_MODEL)if ret != 0:print('Export yolov5rknn failed!')exit(ret)print('done')if __name__=="__main__":main()

新建test_rknn.py用于测试rknn模型

import os
import sys
os.chdir(sys.path[0])
import numpy as np
import cv2
from rknn.api import RKNN
import torchvision
import torch
import timeRKNN_MODEL = './model/yolov5n.rknn'
DATA='./data/bus.jpg'def xywh2xyxy(x):coord=[]for x_ in x:xl=x_[0]-x_[2]/2yl=x_[1]-x_[3]/2xr=x_[0]+x_[2]/2yr=x_[1]+x_[3]/2coord.append([xl,yl,xr,yr])coord=torch.tensor(coord).to(x.device)return coord
def box_iou(box1, box2, eps=1e-7):# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)(a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)# IoU = inter / (area1 + area2 - inter)return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)
def non_max_suppression(prediction,conf_thres=0.25,iou_thres=0.45,classes=None,agnostic=False,multi_label=False,labels=(),max_det=300,nm=0,  # number of masks
):# Checksassert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"device = prediction.devicemps = "mps" in device.type  # Apple MPSif mps:  # MPS not fully supported yet, convert tensors to CPU before NMSprediction = prediction.cpu()bs = prediction.shape[0]  # batch sizenc = prediction.shape[2] - nm - 5  # number of classesxc = prediction[..., 4] > conf_thres  # candidatescount_true = torch.sum(xc.type(torch.int))# Settings# min_wh = 2  # (pixels) minimum box width and heightmax_wh = 7680  # (pixels) maximum box width and heightmax_nms = 30000  # maximum number of boxes into torchvision.ops.nms()time_limit = 0.5 + 0.05 * bs  # seconds to quit afterredundant = True  # require redundant detectionsmulti_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)merge = False  # use merge-NMSt = time.time()mi = 5 + nc  # mask start indexoutput = [torch.zeros((0, 6 + nm), device=prediction.device)] * bsfor xi, x in enumerate(prediction):  # image index, image inference# Apply constraints# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-heightx = x[xc[xi]]  # confidence# Cat apriori labels if autolabellingif labels and len(labels[xi]):lb = labels[xi]v = torch.zeros((len(lb), nc + nm + 5), device=x.device)v[:, :4] = lb[:, 1:5]  # boxv[:, 4] = 1.0  # confv[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # clsx = torch.cat((x, v), 0)# If none remain process next imageif not x.shape[0]:continue# Compute confx[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf# Box/Maskbox = xywh2xyxy(x[:, :4])  # center_x, center_y, width, height) to (x1, y1, x2, y2)mask = x[:, mi:]  # zero columns if no masks# Detections matrix nx6 (xyxy, conf, cls)if multi_label:i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).Tx = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)else:  # best class onlyconf, j = x[:, 5:mi].max(1, keepdim=True)x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]# Filter by classif classes is not None:x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]# Apply finite constraint# if not torch.isfinite(x).all():#     x = x[torch.isfinite(x).all(1)]# Check shapen = x.shape[0]  # number of boxesif not n:  # no boxescontinuex = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence and remove excess boxes# Batched NMSc = x[:, 5:6] * (0 if agnostic else max_wh)  # classesboxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scoresi = torchvision.ops.nms(boxes, scores, iou_thres)  # NMSi = i[:max_det]  # limit detectionsif merge and (1 < n < 3e3):  # Merge NMS (boxes merged using weighted mean)# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrixweights = iou * scores[None]  # box weightsx[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxesif redundant:i = i[iou.sum(1) > 1]  # require redundancyoutput[xi] = x[i]if mps:output[xi] = output[xi].to(device)if (time.time() - t) > time_limit:break  # time limit exceededreturn output
def draw_bbox(image, result, color=(0, 0, 255), thickness=2):# img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)image = image.copy()for point in result:x1,y1,x2,y2=pointcv2.rectangle(image, (x1, y1), (x2, y2), color, thickness)return imagedef main():# Create RKNN objectrknn = RKNN()rknn.list_devices()#load rknn modelret = rknn.load_rknn(path=RKNN_MODEL)if ret != 0:print('load rknn failed')exit(ret)# init runtime environmentprint('--> Init runtime environment')ret = rknn.init_runtime(target='rv1126', device_id='86d4fdeb7f3af5b1',perf_debug=True,eval_mem=True)if ret != 0:print('Init runtime environment failed')exit(ret)print('done')# Set inputsimage=cv2.imread('./data/bus.jpg')image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)# Inferenceprint('--> Running model')outputs = rknn.inference(inputs=[image])#post processout_tensor = torch.tensor(outputs)pred = non_max_suppression(out_tensor,0.25,0.45,classes=None,agnostic=False,max_det=1000)# Process predictionsfor i, det in enumerate(pred):  # per imageim0_=cv2.imread(DATA)im0=im0_.reshape(1,3,640,640)coord=[]image=im0.reshape(640,640,3)if len(det):"""Write results"""for *xyxy, conf, cls in reversed(det):c = int(cls)  # integer classcoord.append([int(xyxy[0].item()), int(xyxy[1].item()),int(xyxy[2].item()), int(xyxy[3].item())])print(f"[{coord[0][0]},{coord[0][1]},{coord[0][2]},{coord[0][3]}]:ID is {c}")image=draw_bbox(image,coord)# Stream resultssave_success =cv2.imwrite('result.jpg', image)print(f"save image end {save_success}")rknn.release()if __name__=="__main__":main()

板端cpp推理代码编写

拷贝一份template改名为yolov5,目录结构如下

前处理代码

void PreProcess(cv::Mat *image)
{cv::cvtColor(*image, *image, cv::COLOR_BGR2RGB);
}

后处理代码

1:输出维度为[1,25200,85],其中85的前四个为中心点的x,y以及框的宽和高,第五个为框的置信度,后面80个为类别的置信度(有80个类别);

2:25200=(80∗80+40∗40+20∗20)∗3,stride为8、16、32,640/8=80,640/16=40,640/32=20

3:NMS删除冗余候选框

1:IOU交并比:检测两个框重叠程度=交集面积/并集面积

2:主要步骤:

  1. 首先筛选出大于阈值的所有候选框(>0.4)
  2. 接着针对每一个种类将候选框进行分类
  3. 找到第n个类别进行循环操作
  4. 先找到置信度最大的框,放到保留区
  5. 和候选区的其他框计算交并比(IOU),若大于iou阈值则删除
  6. 再从候选区找到第二大的候选框放到保留区
  7. 重复4操作,直至候选区没有框
  8. 重复3操作,直至所有类别
float iou(Bbox box1, Bbox box2) {/*  iou=交并比*/int x1 = max(box1.x, box2.x);int y1 = max(box1.y, box2.y);int x2 = min(box1.x + box1.w, box2.x + box2.w);int y2 = min(box1.y + box1.h, box2.y + box2.h);int w = max(0, x2 - x1);int h = max(0, y2 - y1);float over_area = w * h;return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
}bool judge_in_lst(int index, vector<int> index_lst) {//若index在列表index_lst中则返回true,否则返回falseif (index_lst.size() > 0) {for (int i = 0; i < int(index_lst.size()); i++) {if (index == index_lst.at(i)) {return true;}}}return false;
}int get_max_index(vector<Detection> pre_detection) {//返回最大置信度值对应的索引值int index;float conf;if (pre_detection.size() > 0) {index = 0;conf = pre_detection.at(0).conf;for (int i = 0; i < int(pre_detection.size()); i++) {if (conf < pre_detection.at(i).conf) {index = i;conf = pre_detection.at(i).conf;}}return index;}else {return -1;}
}vector<int> nms(vector<Detection> pre_detection, float iou_thr)
{/*返回需保存box的pre_detection对应位置索引值*/int index;vector<Detection> pre_detection_new;//Detection det_best;Bbox box_best, box;float iou_value;vector<int> keep_index;vector<int> del_index;bool keep_bool;bool del_bool;if (pre_detection.size() > 0) {pre_detection_new.clear();// 循环将预测结果建立索引for (int i = 0; i < int(pre_detection.size()); i++) {pre_detection.at(i).index = i;pre_detection_new.push_back(pre_detection.at(i));}//循环遍历获得保留box位置索引-相对输入pre_detection位置while (pre_detection_new.size() > 0) {index = get_max_index(pre_detection_new);if (index >= 0) {keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置// 更新最佳保留boxbox_best.x = pre_detection_new.at(index).bbox[0];box_best.y = pre_detection_new.at(index).bbox[1];box_best.w = pre_detection_new.at(index).bbox[2];box_best.h = pre_detection_new.at(index).bbox[3];for (int j = 0; j < int(pre_detection.size()); j++) {keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);del_bool = judge_in_lst(pre_detection.at(j).index, del_index);if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算ioubox.x = pre_detection.at(j).bbox[0];box.y = pre_detection.at(j).bbox[1];box.w = pre_detection.at(j).bbox[2];box.h = pre_detection.at(j).bbox[3];iou_value = iou(box_best, box);if (iou_value > iou_thr) {del_index.push_back(j); //记录大于阈值将删除对应的位置}}}//更新pre_detection_newpre_detection_new.clear();for (int j = 0; j < int(pre_detection.size()); j++) {keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);del_bool = judge_in_lst(pre_detection.at(j).index, del_index);if ((!keep_bool) && (!del_bool)) {pre_detection_new.push_back(pre_detection.at(j));}}}}}del_index.clear();del_index.shrink_to_fit();pre_detection_new.clear();pre_detection_new.shrink_to_fit();return  keep_index;}vector<Detection> PostProcess(float* prob,float conf_thr=0.3,float nms_thr=0.5)
{vector<Detection> pre_results;vector<int> nms_keep_index;vector<Detection> results;bool keep_bool;Detection pre_res;float conf;int tmp_idx;float tmp_cls_score;for (int i = 0; i < 25200; i++) {tmp_idx = i * (CLSNUM + 5);pre_res.bbox[0] = prob[tmp_idx + 0];  //cxpre_res.bbox[1] = prob[tmp_idx + 1];  //cypre_res.bbox[2] = prob[tmp_idx + 2];  //wpre_res.bbox[3] = prob[tmp_idx + 3];  //hconf = prob[tmp_idx + 4];  // 是为目标的置信度tmp_cls_score = prob[tmp_idx + 5] * conf; //conf_thr*nms_thrpre_res.class_id = 0;pre_res.conf = 0;// 这个过程相当于从除了前面5列,在后面的cla_num个数据中找出score最大的值作为pre_res.conf,对应的列作为类idfor (int j = 1; j < CLSNUM; j++) {     tmp_idx = i * (CLSNUM + 5) + 5 + j; //获得对应类别索引if (tmp_cls_score < prob[tmp_idx] * conf){tmp_cls_score = prob[tmp_idx] * conf;pre_res.class_id = j;pre_res.conf = tmp_cls_score;}}if (conf >= conf_thr) {pre_results.push_back(pre_res);}}//使用nms,返回对应结果的索引nms_keep_index=nms(pre_results,nms_thr);// 茛据nms找到的索引,将结果取出来作为最终结果for (int i = 0; i < int(pre_results.size()); i++) {keep_bool = judge_in_lst(i, nms_keep_index);if (keep_bool) {results.push_back(pre_results.at(i));}}pre_results.clear();pre_results.shrink_to_fit();nms_keep_index.clear();nms_keep_index.shrink_to_fit();return results; 
}

结果展示

至此板端部署结束,接下来进行优化;

优化加速

可以看到模型推理的时间近2s,对于实时处理来说是远远不够的,因此需要对模型进行加速

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/bicheng/13202.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

在微信小程序项目中安装和使用 Vant 组件库

vant Wwapp 小程序开发组件库官网 Vant Weapp - 轻量、可靠的小程序 UI 组件库 安装 Vant 组件库 1.在微信小程序项目文件目录的空白位置右键&#xff0c;选择在外部终端窗口中打开 2在命令行输入如下命令&#xff08;在项目中创建包管理配置文件 package.json&#xff09; …

Service Worker的生命周期和全局对象和API

Service Worker的生命周期和全局对象和API 当我们注册了Service Worker后&#xff0c;它会经历生命周期的各个阶段&#xff0c;同时会触发相应的事件。整个生命周期包括了&#xff1a;installing --> installed --> activating --> activated --> redundant。当Se…

【ARMv8/v9 系统寄存器 6 -- EL 异常等级判定寄存器 CurrentEL 使用详细将介绍】

文章目录 ARMv8/v9 EL 等级获取EL 等级获取函数实现EL 等级获取测试 ARMv8/v9 EL 等级获取 下面这个宏定义是用于ARMv8/v9架构下&#xff0c;通过汇编语言检查当前执行在哪个异常级别&#xff08;Exception Level&#xff0c;EL&#xff09;并据此跳转到不同的标签。 异常级别…

svn批量解锁

问题 svn对文件进行checkout之后&#xff0c;先进行lock&#xff0c;之后再去更改&#xff0c;最后进行Commit操作&#xff1b; 上述为我们通过svn管理代码的正常方式&#xff0c;但总会有其他现象发生&#xff1b; 如果我们非正常操作&#xff0c;批量锁所有的svn文件&#x…

阿里云 服务之前设置的密钥登陆,关闭了密码登录,现在打开密码登录

通过网页远程链接 切换用户 sudo -i 输入vim /etc/ssh/sshd_config 进入配置文件 找到 将这一项设置为yes 重启系统 systemctl restart sshd.service

vivo X100s发布,搭载最新天玑9300+平台

在沉寂了半年后&#xff0c;vivo终于发布了新的旗舰产品。相较于前代的X100&#xff0c;X100s作为小迭代也有不少让人眼前一亮的地方&#xff0c;下面就让我们一同来了解下吧。 外观方面&#xff0c;虽然vivo X100s相较于X100没有大改&#xff0c;但却十分具有质感。以“青云”…

每周一算法:恰好经过K条边的最短路

题目描述 牛站 给定一张由 M M M 条边构成的无向图&#xff0c;点的编号为 1 ∼ 1000 1\sim 1000 1∼1000 之间的整数。 求从起点 S S S 到终点 E E E 恰好经过 K K K 条边&#xff08;可以重复经过&#xff09;的最短路。 注意: 数据保证一定有解。 输入格式 第 1 …

[牛客网]——C语言刷题day3

答案&#xff1a;A 解析&#xff1a; A.表示将数组a的首地址赋值给指针变量p B.将一个int型变量直接赋值给一个int型的指针是不行的 C.道理同B D.j2是一个右值&#xff0c;右值是不能进行取地址操作的 #include <iostream> using namespace std;#define N 7 int fun…

前端基础知识-ES6扩展运算符(快速实现数组添加新元素、字符串转为数组、对象添加新属性)

前言&#xff1a; 扩展运算符又称为Rest运算符&#xff0c;可以实现数组、对象、字符串在语法层面上的展开&#xff0c;达到简化语法的目的&#xff0c;使得我们提高开发效率 主要用法&#xff1a; 在需要解构的变量前加三个点&#xff08;...xxx&#xff09; 具体示例&…

快速查看字符对应的ASCII码

1、借助gdb查看 打印字符串用双引号括起来打印单个字符用单引号括起来x 表示十六机制d 表示十进制t 表示二进制 2、借助二进制查看软件 第一步&#xff1a;把要查看的字符保存到文本文件中第二步&#xff1a;借助二进制查看工具&#xff08;比如&#xff1a;Hex Editor Neo&am…

网络安全护网行动:形式主义还是真有价值?

中国每年都投入大量人力物力进行护网行动&#xff0c;如网络攻防演练、黑客技术研究等。有人质疑这些行动是否只是形式主义&#xff0c;缺乏真正的价值。然而&#xff0c;本文将深入解释这些护网行动的原因&#xff0c;并阐明其对信息安全发展的真实价值。 网络信息安全问题的…

自养号测评实战指南:Shopee、Lazada销量翻倍不再是难题

对于速卖通、亚马逊、eBay、敦煌网、SHEIN、Lazada、虾皮等平台的卖家而言&#xff0c;提高店铺流量并转化为实际销量是共同追求的目标。在这个过程中&#xff0c;自养号进行产品测评显得尤为重要。通过精心策划和执行的测评活动&#xff0c;卖家不仅能够显著增加产品的销量&am…

Google Chrome 设备工具栏原理

1.不同预览模式 2.计算出缩放比 3.固定滚动偏移 关键代码&#xff1a; overview&#xff1a; ratioW getChildRect().width / getParentRect().width ratioH getChildRect().height / getParentRect().height maxRatio max(ratioW, ratioH) if(maxRatio < 1) return 1 …

计算机网络实验3:路由器安全防控配置

实验目的和要求 理解标准IP访问控制列表的原理及功能理解CHAP、DHCP配置原理了解家用式无线路由配置方法实验项目内容 标准IP访问控制列表配置 CHAP验证路由器上配置DHCP网络地址转换NAT配置无线路由实现实验环境 1. 硬件&#xff1a;PC机&#xff1b; 2. 软件&#xff1a;W…

【35分钟掌握金融风控策略29】贷中模型调额调价策略

目录 贷中客户风险管理和客户运营体系 用信审批策略 用信审批策略决策流与策略类型 贷中预警策略 对存量客户进行风险评级 基于客户的风险评级为客户匹配相应的风险缓释措施和建议 调额策略 基于定额策略的调额策略 基于客户在贷中的风险表现的调额策略 调价策略 存…

【计算机毕业设计】springboot城市公交运营管理系统

二十一世纪我们的社会进入了信息时代&#xff0c; 信息管理系统的建立&#xff0c;大大提高了人们信息化水平。传统的管理方式对时间、地点的限制太多&#xff0c;而在线管理系统刚好能满足这些需求&#xff0c;在线管理系统突破了传统管理方式的局限性。于是本文针对这一需求设…

【校园生活小程序_超详细部署】

校园生活小程序 1 完整小程序源码2 运行环境3 初次运行3.1 启动后端程序3.1.1 导入项目&#xff0c;找到项目的pom.xml文件&#xff0c;点击ok进行打开。3.1.2 创建数据库并插入内容 3.1.3 配置项目结构信息3.1.4 配置Tomcat服务器3.1.5 正式启动后端项目3.1.6出现BUG3.1.7 解决…

Android实践:查看Activity信息

问题&#xff1a;本地Android SDK的monitor无法正常运行&#xff0c;看不了进程相关信息&#xff0c;确认当前显示Activity十分不便 解决办法&#xff1a;使用adb shell指令可以快速查看 命令&#xff1a; adb shell dumpsys activity activities 这个命令用于获取Android设…

Linux的常用指令 和 基础知识穿插巩固(巩固知识必看)

目录 前言 ls ls 扩展知识 ls -l ls -a ls -al cd cd 目录名 cd .. cd ~ cd - pwd 扩展知识 路径 / cp [选项] “源文件名” “目标文件名” mv [选项] “源文件名” “目标文件名” rm 作用 用法 ./"可执行程序名" mkdir rmdir touch m…

【YashanDB知识库】ycm纳管主机安装YCM-AGENT时报错“任务提交失败,无法连接主机”

问题现象 执行安装 ycm-agent 命令纳管主机时报错 问题的风险及影响 会导致 ycm-agent 纳管不成功&#xff0c;YCM 无法监控主机和数据库 问题影响的版本 yashandb-cloud-manager-23.2.1.100-linux-aarch64.tar 问题发生原因 因为 10.149.223.121 对 ycm 的主机没有开放端…