YOLOv5 onnx \tensorrt 推理

一、yolov5 pt模型转onnx

code:
https://github.com/ultralytics/yolov5

python export.py --weights yolov5s.pt --include onnx

二、onnx 推理

import os
import cv2
import numpy as np
import onnxruntime
import timeCLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light','fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow','elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee','skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard','tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple','sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch','potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone','microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear','hair drier', 'toothbrush']  # coco80类别class YOLOV5():def __init__(self, onnxpath):self.onnx_session = onnxruntime.InferenceSession(onnxpath)self.input_name = self.get_input_name()self.output_name = self.get_output_name()# -------------------------------------------------------#   获取输入输出的名字# -------------------------------------------------------def get_input_name(self):input_name = []for node in self.onnx_session.get_inputs():input_name.append(node.name)return input_namedef get_output_name(self):output_name = []for node in self.onnx_session.get_outputs():output_name.append(node.name)return output_name# -------------------------------------------------------#   输入图像# -------------------------------------------------------def get_input_feed(self, img_tensor):input_feed = {}for name in self.input_name:input_feed[name] = img_tensorreturn input_feed# -------------------------------------------------------#   1.cv2读取图像并resize#	2.图像转BGR2RGB和HWC2CHW#	3.图像归一化#	4.图像增加维度#	5.onnx_session 推理# -------------------------------------------------------def inference(self, img_path):img = cv2.imread(img_path)or_img = cv2.resize(img, (640, 640))img = or_img[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHWimg = img.astype(dtype=np.float32)img /= 255.0img = np.expand_dims(img, axis=0)input_feed = self.get_input_feed(img)pred = self.onnx_session.run(None, input_feed)[0]return pred, or_img# dets:  array [x,6] 6个值分别为x1,y1,x2,y2,score,class
# thresh: 阈值
def nms(dets, thresh):x1 = dets[:, 0]y1 = dets[:, 1]x2 = dets[:, 2]y2 = dets[:, 3]# -------------------------------------------------------#   计算框的面积#	置信度从大到小排序# -------------------------------------------------------areas = (y2 - y1 + 1) * (x2 - x1 + 1)scores = dets[:, 4]keep = []index = scores.argsort()[::-1]while index.size > 0:i = index[0]keep.append(i)# -------------------------------------------------------#   计算相交面积#	1.相交#	2.不相交# -------------------------------------------------------x11 = np.maximum(x1[i], x1[index[1:]])y11 = np.maximum(y1[i], y1[index[1:]])x22 = np.minimum(x2[i], x2[index[1:]])y22 = np.minimum(y2[i], y2[index[1:]])w = np.maximum(0, x22 - x11 + 1)h = np.maximum(0, y22 - y11 + 1)overlaps = w * h# -------------------------------------------------------#   计算该框与其它框的IOU,去除掉重复的框,即IOU值大的框#	IOU小于thresh的框保留下来# -------------------------------------------------------ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)idx = np.where(ious <= thresh)[0]index = index[idx + 1]return keepdef xywh2xyxy(x):# [x, y, w, h] to [x1, y1, x2, y2]y = np.copy(x)y[:, 0] = x[:, 0] - x[:, 2] / 2y[:, 1] = x[:, 1] - x[:, 3] / 2y[:, 2] = x[:, 0] + x[:, 2] / 2y[:, 3] = x[:, 1] + x[:, 3] / 2return ydef filter_box(org_box, conf_thres, iou_thres):  # 过滤掉无用的框# -------------------------------------------------------#   删除为1的维度#	删除置信度小于conf_thres的BOX# -------------------------------------------------------org_box = np.squeeze(org_box)conf = org_box[..., 4] > conf_thresbox = org_box[conf == True]# -------------------------------------------------------#	通过argmax获取置信度最大的类别# -------------------------------------------------------cls_cinf = box[..., 5:]cls = []for i in range(len(cls_cinf)):cls.append(int(np.argmax(cls_cinf[i])))all_cls = list(set(cls))# -------------------------------------------------------# 分别对每个类别进行过滤#	1.将第6列元素替换为类别下标#	2.xywh2xyxy 坐标转换#	3.经过非极大抑制后输出的BOX下标#	4.利用下标取出非极大抑制后的BOX# -------------------------------------------------------output = []for i in range(len(all_cls)):curr_cls = all_cls[i]curr_cls_box = []curr_out_box = []for j in range(len(cls)):if cls[j] == curr_cls:box[j][5] = curr_clscurr_cls_box.append(box[j][:6])curr_cls_box = np.array(curr_cls_box)# curr_cls_box_old = np.copy(curr_cls_box)curr_cls_box = xywh2xyxy(curr_cls_box)curr_out_box = nms(curr_cls_box, iou_thres)for k in curr_out_box:output.append(curr_cls_box[k])output = np.array(output)return outputdef draw(image, box_data):# -------------------------------------------------------#	取整,方便画框# -------------------------------------------------------boxes = box_data[..., :4].astype(np.int32)scores = box_data[..., 4]classes = box_data[..., 5].astype(np.int32)for box, score, cl in zip(boxes, scores, classes):top, left, right, bottom = boxprint('class: {}, score: {}'.format(CLASSES[cl], score))print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),(top, left),cv2.FONT_HERSHEY_SIMPLEX,0.6, (0, 0, 255), 2)if __name__ == "__main__":onnx_path = 'yolov5s.onnx'model = YOLOV5(onnx_path)output, or_img = model.inference('bus.jpg')print(output.shape)  # (1, 25200, 85)print(or_img.shape)  # (640, 640, 3)outbox = filter_box(output, 0.5, 0.5)draw(or_img, outbox)cv2.imwrite('res.jpg', or_img)

在这里插入图片描述

三、yolov5-7.0 tensorrt推理

https://zhuanlan.zhihu.com/p/655640909

"""
An example that uses TensorRT's Python api to make inferences video.
"""
import ctypes
import os
import shutil
import random
import sys
import threading
import time
import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trtCONF_THRESH = 0.5
IOU_THRESHOLD = 0.4
LEN_ALL_RESULT = 38001
LEN_ONE_RESULT = 38def get_img_path_batches(batch_size, img_dir):ret = []batch = []for root, dirs, files in os.walk(img_dir):for name in files:if len(batch) == batch_size:ret.append(batch)batch = []batch.append(os.path.join(root, name))if len(batch) > 0:ret.append(batch)return retdef plot_one_box(x, img, color=None, label=None, line_thickness=None):"""description: Plots one bounding box on image img,this function comes from YoLov5 project.param: x:      a box likes [x1,y1,x2,y2]img:    a opencv image objectcolor:  color to draw rectangle, such as (0,255,0)label:  strline_thickness: intreturn:no return"""tl = (line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1)  # line/font thicknesscolor = color or [random.randint(0, 255) for _ in range(3)]c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)if label:tf = max(tl - 1, 1)  # font thicknesst_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filledcv2.putText(img,label,(c1[0], c1[1] - 2),0,tl / 3,[225, 255, 255],thickness=tf,lineType=cv2.LINE_AA,)class YoLov5TRT(object):"""description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops."""def __init__(self, engine_file_path):# Create a Context on this device,self.ctx = cuda.Device(0).make_context()stream = cuda.Stream()TRT_LOGGER = trt.Logger(trt.Logger.INFO)runtime = trt.Runtime(TRT_LOGGER)# Deserialize the engine from filewith open(engine_file_path, "rb") as f:engine = runtime.deserialize_cuda_engine(f.read())context = engine.create_execution_context()host_inputs = []cuda_inputs = []host_outputs = []cuda_outputs = []bindings = []for binding in engine:print('bingding:', binding, engine.get_binding_shape(binding))size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_sizedtype = trt.nptype(engine.get_binding_dtype(binding))# Allocate host and device buffershost_mem = cuda.pagelocked_empty(size, dtype)cuda_mem = cuda.mem_alloc(host_mem.nbytes)# Append the device buffer to device bindings.bindings.append(int(cuda_mem))# Append to the appropriate list.if engine.binding_is_input(binding):self.input_w = engine.get_binding_shape(binding)[-1]self.input_h = engine.get_binding_shape(binding)[-2]host_inputs.append(host_mem)cuda_inputs.append(cuda_mem)else:host_outputs.append(host_mem)cuda_outputs.append(cuda_mem)# Storeself.stream = streamself.context = contextself.engine = engineself.host_inputs = host_inputsself.cuda_inputs = cuda_inputsself.host_outputs = host_outputsself.cuda_outputs = cuda_outputsself.bindings = bindingsself.batch_size = engine.max_batch_sizedef infer(self, raw_image_generator):threading.Thread.__init__(self)# Make self the active context, pushing it on top of the context stack.self.ctx.push()# Restorestream = self.streamcontext = self.contextengine = self.enginehost_inputs = self.host_inputscuda_inputs = self.cuda_inputshost_outputs = self.host_outputscuda_outputs = self.cuda_outputsbindings = self.bindings# Do image preprocessbatch_image_raw = []batch_origin_h = []batch_origin_w = []batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])for i, image_raw in enumerate(raw_image_generator):input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)batch_image_raw.append(image_raw)batch_origin_h.append(origin_h)batch_origin_w.append(origin_w)np.copyto(batch_input_image[i], input_image)batch_input_image = np.ascontiguousarray(batch_input_image)# Copy input image to host buffernp.copyto(host_inputs[0], batch_input_image.ravel())start = time.time()# Transfer input data  to the GPU.cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)# Run inference.context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)# Transfer predictions back from the GPU.cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)# Synchronize the streamstream.synchronize()end = time.time()# Remove any context from the top of the context stack, deactivating it.self.ctx.pop()# Here we use the first row of output in that batch_size = 1output = host_outputs[0]# Do postprocessfor i in range(self.batch_size):result_boxes, result_scores, result_classid = self.post_process(output[i * LEN_ALL_RESULT: (i + 1) * LEN_ALL_RESULT], batch_origin_h[i], batch_origin_w[i])# Draw rectangles and labels on the original imagefor j in range(len(result_boxes)):box = result_boxes[j]plot_one_box(box,batch_image_raw[i],label="{}:{:.2f}".format(categories[int(result_classid[j])], result_scores[j],),)return batch_image_raw, end - startdef destroy(self):# Remove any context from the top of the context stack, deactivating it.self.ctx.pop()def get_raw_image(self, image):"""description: Read an image from image path"""# for img_path in image_path_batch:#     yield cv2.imread(img_path)yield imagedef get_raw_image_zeros(self, image_path_batch=None):"""description: Ready data for warmup"""for _ in range(self.batch_size):yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)def preprocess_image(self, raw_bgr_image):"""description: Convert BGR image to RGB,resize and pad it to target size, normalize to [0,1],transform to NCHW format.param:input_image_path: str, image pathreturn:image:  the processed imageimage_raw: the original imageh: original heightw: original width"""image_raw = raw_bgr_imageh, w, c = image_raw.shapeimage = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)# Calculate widht and height and paddingsr_w = self.input_w / wr_h = self.input_h / hif r_h > r_w:tw = self.input_wth = int(r_w * h)tx1 = tx2 = 0ty1 = int((self.input_h - th) / 2)ty2 = self.input_h - th - ty1else:tw = int(r_h * w)th = self.input_htx1 = int((self.input_w - tw) / 2)tx2 = self.input_w - tw - tx1ty1 = ty2 = 0# Resize the image with long side while maintaining ratioimage = cv2.resize(image, (tw, th))# Pad the short side with (128,128,128)image = cv2.copyMakeBorder(image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128))image = image.astype(np.float32)# Normalize to [0,1]image /= 255.0# HWC to CHW format:image = np.transpose(image, [2, 0, 1])# CHW to NCHW formatimage = np.expand_dims(image, axis=0)# Convert the image to row-major order, also known as "C order":image = np.ascontiguousarray(image)return image, image_raw, h, wdef xywh2xyxy(self, origin_h, origin_w, x):"""description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-rightparam:origin_h:   height of original imageorigin_w:   width of original imagex:          A boxes numpy, each row is a box [center_x, center_y, w, h]return:y:          A boxes numpy, each row is a box [x1, y1, x2, y2]"""y = np.zeros_like(x)r_w = self.input_w / origin_wr_h = self.input_h / origin_hif r_h > r_w:y[:, 0] = x[:, 0] - x[:, 2] / 2y[:, 2] = x[:, 0] + x[:, 2] / 2y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2y /= r_welse:y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2y[:, 1] = x[:, 1] - x[:, 3] / 2y[:, 3] = x[:, 1] + x[:, 3] / 2y /= r_hreturn ydef post_process(self, output, origin_h, origin_w):"""description: postprocess the predictionparam:output:     A numpy likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h:   height of original imageorigin_w:   width of original imagereturn:result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]result_scores: finally scores, a numpy, each element is the score correspoing to boxresult_classid: finally classid, a numpy, each element is the classid correspoing to box"""# Get the num of boxes detectednum = int(output[0])# Reshape to a two dimentional ndarraypred = np.reshape(output[1:], (-1, LEN_ONE_RESULT))[:num, :]pred = pred[:, :6]# Do nmsboxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)result_boxes = boxes[:, :4] if len(boxes) else np.array([])result_scores = boxes[:, 4] if len(boxes) else np.array([])result_classid = boxes[:, 5] if len(boxes) else np.array([])return result_boxes, result_scores, result_classiddef bbox_iou(self, box1, box2, x1y1x2y2=True):"""description: compute the IoU of two bounding boxesparam:box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))            x1y1x2y2: select the coordinate formatreturn:iou: computed iou"""if not x1y1x2y2:# Transform from center and width to exact coordinatesb1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2else:# Get the coordinates of bounding boxesb1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]# Get the coordinates of the intersection rectangleinter_rect_x1 = np.maximum(b1_x1, b2_x1)inter_rect_y1 = np.maximum(b1_y1, b2_y1)inter_rect_x2 = np.minimum(b1_x2, b2_x2)inter_rect_y2 = np.minimum(b1_y2, b2_y2)# Intersection areainter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)# Union Areab1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)return ioudef non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):"""description: Removes detections with lower object confidence score than 'conf_thres' and performsNon-Maximum Suppression to further filter detections.param:prediction: detections, (x1, y1, x2, y2, conf, cls_id)origin_h: original image heightorigin_w: original image widthconf_thres: a confidence threshold to filter detectionsnms_thres: a iou threshold to filter detectionsreturn:boxes: output after nms with the shape (x1, y1, x2, y2, conf, cls_id)"""# Get the boxes that score > CONF_THRESHboxes = prediction[prediction[:, 4] >= conf_thres]# Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])# clip the coordinatesboxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)# Object confidenceconfs = boxes[:, 4]# Sort by the confsboxes = boxes[np.argsort(-confs)]# Perform non-maximum suppressionkeep_boxes = []while boxes.shape[0]:large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_threslabel_match = boxes[0, -1] == boxes[:, -1]# Indices of boxes with lower confidence scores, large IOUs and matching labelsinvalid = large_overlap & label_matchkeep_boxes += [boxes[0]]boxes = boxes[~invalid]boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])return boxesclass inferThread(threading.Thread):def __init__(self, yolov5_wrapper, image):threading.Thread.__init__(self)self.yolov5_wrapper = yolov5_wrapperself.image = imagedef run(self):batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image(self.image))# print(batch_image_raw[0].shape)# print((counter / (time.time() - start_time)))# cv2.putText(batch_image_raw[0], "FPS {0}".format(float('%.1f' % (counter / (time.time() - start_time)))),#             cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),#             2)cv2.imshow('img', batch_image_raw[0])# for i, img_path in enumerate(self.image):#     parent, filename = os.path.split(img_path)#     save_name = os.path.join('output', filename)#     # Save image#     cv2.imwrite(save_name, batch_image_raw[i])cv2.waitKey(0)class warmUpThread(threading.Thread):def __init__(self, yolov5_wrapper):threading.Thread.__init__(self)self.yolov5_wrapper = yolov5_wrapperdef run(self):batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image_zeros())print('warm_up->{}, time->{:.2f}ms'.format(batch_image_raw[0].shape, use_time * 1000))if __name__ == "__main__":# load custom plugin and enginePLUGIN_LIBRARY = "build/libmyplugins.so"engine_file_path = "build/yolov5s.engine"if len(sys.argv) > 1:engine_file_path = sys.argv[1]if len(sys.argv) > 2:PLUGIN_LIBRARY = sys.argv[2]ctypes.CDLL(PLUGIN_LIBRARY)# load coco labelscategories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat","traffic light","fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow","elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase","frisbee","skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard","surfboard","tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple","sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch","potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard","cell phone","microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors","teddy bear","hair drier", "toothbrush"]if os.path.exists('output/'):shutil.rmtree('output/')os.makedirs('output/')# a YoLov5TRT instanceyolov5_wrapper = YoLov5TRT(engine_file_path)try:print('batch size is', yolov5_wrapper.batch_size)# image_dir = "images/"# image_path_batches = get_img_path_batches(yolov5_wrapper.batch_size, image_dir)# print(yolov5_wrapper.batch_size)  # 1# print(image_path_batches)for i in range(10):# create a new thread to do warm_upthread1 = warmUpThread(yolov5_wrapper)thread1.start()thread1.join()# for batch in image_path_batches:#     print('........', batch)  # ........ ['images/zidane.jpg']# create a new thread to do inferencecap = cv2.VideoCapture('1.mp4')# start_time = time.time()# counter = 0while True:ret, frame = cap.read()# counter += 1if not ret:breakelse:# thread1 = inferThread(yolov5_wrapper, frame)# thread1.start()# thread1.join()batch_image_raw, use_time = yolov5_wrapper.infer(yolov5_wrapper.get_raw_image(frame))# print(batch_image_raw[0].shape)# print(use_time)print('input one frame cost time->{:.2f}ms, saving into output/'.format(use_time * 1000))cv2.imshow('img', batch_image_raw[0])# counter = 0# start_time = time.time()cv2.waitKey(1)finally:# destroy the instanceyolov5_wrapper.destroy()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/121153.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

stable diffusion简介和原理

Stable Diffusion中文的意思是稳定扩散&#xff0c;本质上是基于AI的图像扩散生成模型。 Stable Diffusion是一个引人注目的深度学习模型&#xff0c;它使用潜在扩散过程来生成图像&#xff0c;允许模型在生成图像时考虑到文本的描述。这个模型的出现引起了广泛的关注和讨论&am…

IP 地址查询,快速查询自己的 IP 地址

文章目录 在线结果 在线 http://myip.top/ 结果

【计算机网络笔记】Cookie技术

系列文章目录 什么是计算机网络&#xff1f; 什么是网络协议&#xff1f; 计算机网络的结构 数据交换之电路交换 数据交换之报文交换和分组交换 分组交换 vs 电路交换 计算机网络性能&#xff08;1&#xff09;——速率、带宽、延迟 计算机网络性能&#xff08;2&#xff09;…

Redis(04)| 数据结构-压缩列表

压缩列表的最大特点&#xff0c;就是它被设计成一种内存紧凑型的数据结构&#xff0c;占用一块连续的内存空间&#xff0c;不仅可以利用 CPU 缓存&#xff0c;而且会针对不同长度的数据&#xff0c;进行相应编码&#xff0c;这种方法可以有效地节省内存开销。 但是&#xff0c;…

腾讯云 AI 绘画:文生图、图生图、图审图 快速入门

腾讯云 AI 绘画是腾讯云推出的一款基于人工智能的图像生成和编辑产品&#xff0c;能够根据输入的图片或描述文本&#xff0c;智能生成与输入内容相关的图片&#xff0c;支持多样化的图片风格选择。 在本文中&#xff0c;我们将介绍如何使用腾讯云 AI 绘画的三项主要功能&#…

Windows-Oracle19c 安装详解-含Navicate远程连接配置 - 同时连接Oracle11g和Oracle19c

文章目录 0 说明1 下载链接2 安装&#xff1a;一定要以管理员身份运行&#xff0c;不然后面有可能会报错。3 启动监听4. 登录Oracle4 Navicate远程连接-配置监听4.1 修改监听文件4.2 网络配置助手-配置本地监听端口4.3 Navicate连接成功 5 Navicate同时连接两个Oracle数据库 0 …

鸿蒙ArkUI-X跨端应用开发,一套代码构建多平台应用

文章目录 一、项目介绍二、技术架构三、Gitee仓库地址四、ArkUI-X开发者文档五、快速开始——环境准备1、下载DevEco Studio&#xff0c;版本V4.0 Beta2以上2、打开DevEco&#xff0c;下载相关环境配置3、配置开发环境3.1、OpenHarmony SDK3.2、安装ArkUI-X SDK3.2、Android SD…

postgis ST_CoverageInvalidEdges用法

官方文档 概要 geometry ST_CoverageInvalidEdges(geometry winset geom, float8 tolerance 0); 描述 一个窗口函数&#xff0c;用于检查窗口分区中的多边形是否形成有效的多边形覆盖范围。 它返回线性指示器&#xff0c;显示每个多边形中无效边&#xff08;如果有&#x…

死锁Deadlock

定义 死锁是指两个或多个线程互相持有对方所需的资源&#xff0c;从而导致它们无法继续执行的情况。如下图所示&#xff0c;现有两个线程&#xff0c;分别是线程A及线程B&#xff0c;线程A持有锁A&#xff0c;线程B持有锁B。此时线程A想获取锁B&#xff0c;但锁B需等到线程B的结…

GCC、g++、gcc的关系

GCC、g、gcc的关系 引言 VsCode中对编译环境进行配置的时选择编译器时发现有多种不同的编译器 GNU计划和GCC GNU的全称 GNU’s Not UNIX GNU是一个计划 Q:为什么会有这个计划 因为当时的Unix开始收费和商业闭源,有人觉得不爽→ 想要自己开发和Unix类似的→GNU计划 GUN计划目…

几个常用的nosql数据库的操作方式

dynamoDB 键 partition key&#xff1a;分区键 定义&#xff1a;分区键是用于分布数据存储的主键&#xff0c;每个项&#xff08;Item&#xff09;在表中都必须有一个唯一的分区键值。 特点&#xff1a; 唯一性&#xff1a;每个分区键值在表中必须是唯一的&#xff0c;这是因为…

shell脚本变量

目录 1.变量的定义 2.shell脚本中变量的定义方法 3.变量的转译 4.Linux中命令的别名设定 5.用户环境变量的更改 6.利用命令的执行结果设定变量 7.脚本函数 1.变量的定义 1&#xff09;定义本身 变量就是内存一片区域的地址 2)变量存在的意义 命令无法操作一直变化的目…

【ETL工具】Datax-ETL-SqlServerToHDFS

&#x1f984; 个人主页——&#x1f390;个人主页 &#x1f390;✨&#x1f341; &#x1fa81;&#x1f341;&#x1fa81;&#x1f341;&#x1fa81;&#x1f341;&#x1fa81;&#x1f341; 感谢点赞和关注 &#xff0c;每天进步一点点&#xff01;加油&#xff01;&…

读高性能MySQL(第4版)笔记21_读后总结与感想兼导读

1. 基本信息 高性能MySQL&#xff1a;经过大规模运维验证的策略&#xff08;第4版&#xff09; High Performance MySQL, Fourth Edition [美] Silvia Botros(西尔维亚博特罗斯)&#xff1b;Jeremy Tinley(杰里米廷利) 电子工业出版社,2022年10月出版 1.1. 读薄率 书籍总字…

HarmonyOS DevEso环境搭建

DevEco Studio 3.1配套支持HarmonyOS 3.1版本及以上的应用及服务开发&#xff0c;提供了代码智能编辑、低代码开发、双向预览等功能&#xff0c;以及轻量构建工具DevEco Hvigor 、本地模拟器&#xff0c;持续提升应用及服务开发效率。 1.下载 官方网站&#xff1a; HUAWEI De…

Python深度学习实战-基于tensorflow原生代码搭建BP神经网络实现分类任务(附源码和实现效果)

实现功能 前面两篇文章分别介绍了两种搭建神经网络模型的方法&#xff0c;一种是基于tensorflow的keras框架&#xff0c;另一种是继承父类自定义class类&#xff0c;本篇文章将编写原生代码搭建BP神经网络。 实现代码 import tensorflow as tf from sklearn.datasets import…

ssh连接远程服务器,并在终端安装anaconda

官网下载安装&#xff1a;anaconda2023.09版本&#xff08;官网地址&#xff1a;https://www.anaconda.com/download#downloads&#xff09; wget https://repo.anaconda.com/archive/Anaconda3-2023.09-0-Linux-x86_64.sh使用阿里云镜像下载安装&#xff0c;官网下载太慢。阿…

2698 求一个整数的惩罚数 (子集和,DFS)

class Solution { public:bool dfs(int target, string s, int index, int sum) {// 只有整个字符串都被分割&#xff0c;求和&#xff0c;和看结果是不是等于targetif(index s.size()) {return sum target;}int num 0; // 在现在的子集中去依次加入余下的元素// 1 2 9 6// …

支付宝证书到期更新完整过程

如果用户收到 支付宝公钥证书 到期通知后&#xff0c;可以根据如下指引更新证书 确认上传成功后就会生成新的证书&#xff0c;把新的证书替换到生产环境就可以了

浏览器事件循环 (event loop)

进程与线程 进程 进程的概念 进程是操作系统中的一个程序或者一个程序的一次执行过程&#xff0c;是一个动态的概念&#xff0c;是程序在执行过程中分配和管理资源的基本单位&#xff0c;是操作系统结构的基础。 简单的来说&#xff0c;就是一个程序运行开辟的一块内存空间&a…