5、Grounded Segement Anything

github

sam安装与基本使用

stable diffusion安装与基本使用

安装GroundingDINO

git clone https://github.com/IDEA-Research/GroundingDINO.git
cd GroundingDINO
pip install -e .
pip install diffusers transformers accelerate scipy safetensors

安装RAM&Tag2Text

git clone https://github.com/xinyu1205/recognize-anything.git
pip install -r ./recognize-anything/requirements.txt
pip install -e ./recognize-anything/

安装OSX

git submodule update --init --recursive
cd grounded-sam-osx && bash install.sh

导入依赖

import os, sys
import argparse
import copyfrom IPython.display import display
from PIL import Image, ImageDraw, ImageFont
from torchvision.ops import box_convert# Grounding DINO
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util import box_ops
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
from groundingdino.util.inference import annotate, load_image, predictimport supervision as sv# segment anything
from segment_anything import build_sam, SamPredictor
import cv2
import numpy as np
import matplotlib.pyplot as plt# diffusers
import PIL
import requests
import torch
from io import BytesIO
from diffusers import StableDiffusionInpaintPipelinefrom huggingface_hub import hf_hub_downloaddevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

模型初始化

Load Grounding DINO model
权重

"""
Load Grounding DINO model
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"
ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"could download from : https://huggingface.co/ShilongLiu/GroundingDINO/tree/main
"""
def load_grounding(repo_id, filename, ckpt_config_filename, is_path=False, device='cpu'):if is_path:cache_file = filenamecache_config_file = ckpt_config_filenameelse:cache_file = hf_hub_download(repo_id=repo_id, filename=filename)cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)args = SLConfig.fromfile(cache_config_file)model = build_model(args)args.device = devicecheckpoint = torch.load(cache_file, map_location='cpu')log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)print("Model loaded from {} \n => {}".format(cache_file, log))_ = model.eval()return modelif __name__ == '__main__':ckpt_repo_id = "ShilongLiu/GroundingDINO"ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"ckpt_config_filename = "GroundingDINO_SwinB.cfg.py"groundingdino_model = load_grounding(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename)

Load SAM model
权重

def load_sam(model_type="vit_h", sam_checkpoint="/devdata/chengan/SAM_checkpoint/sam_vit_h_4b8939.pth", device="cuda"):# sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)# sam.to(device=device)sam = build_sam(checkpoint=sam_checkpoint)sam.to(device=device)sam_predictor = SamPredictor(sam)return sam_predictor

Load stable diffusion inpainting models

def load_diffusion_inpaint(device="cuda"):sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting",torch_dtype=torch.float16,).to(device)return sd_pipe

基本使用

grounding samples use

def grounding_sample(text_prompt, image_path, box_treshold=0.3, text_treshold=0.25):ckpt_repo_id = "ShilongLiu/GroundingDINO"ckpt_filenmae = "/devdata/chengan/GSAM_checkpoint/groundingino/groundingdino_swinb_cogcoor.pth"ckpt_config_filename = "/devdata/chengan/GSAM_checkpoint/groundingino/GroundingDINO_SwinB.cfg.py"groundingdino_model = load_grounding(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename, is_path=True)image_source, image = load_image(image_path)boxes, logits, phrases = predict(model=groundingdino_model,image=image,caption=text_prompt,box_threshold=box_treshold,text_threshold=text_treshold)annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)annotated_frame = annotated_frame[..., ::-1]  # BGR to RGBplt.imshow(image_source)plt.show()plt.imshow(annotated_frame)plt.show()grounding_sample(text_prompt="huskie", image_path="../data/headImage.png")

在这里插入图片描述

检测 分割 替换


# Grounding DINO for detection
# detect object using grounding DINO
def detect(image, image_source, text_prompt, model, box_threshold=0.3, text_threshold=0.25):boxes, logits, phrases = predict(model=model,image=image,caption=text_prompt,box_threshold=box_threshold,text_threshold=text_threshold)annotated_frame = annotate(image_source=image_source, boxes=boxes, logits=logits, phrases=phrases)annotated_frame = annotated_frame[..., ::-1]  # BGR to RGBreturn annotated_frame, boxes# SAM for segmentation
def segment(image, sam_model, boxes):sam_model.set_image(image)H, W, _ = image.shapeboxes_xyxy = box_ops.box_cxcywh_to_xyxy(boxes) * torch.Tensor([W, H, W, H])transformed_boxes = sam_model.transform.apply_boxes_torch(boxes_xyxy.to(device), image.shape[:2])masks, _, _ = sam_model.predict_torch(point_coords=None,point_labels=None,boxes=transformed_boxes,multimask_output=False,)return masks.cpu()def draw_mask(mask, image, random_color=True):if random_color:color = np.concatenate([np.random.random(3), np.array([0.8])], axis=0)else:color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6])h, w = mask.shape[-2:]mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)annotated_frame_pil = Image.fromarray(image).convert("RGBA")mask_image_pil = Image.fromarray((mask_image.cpu().numpy() * 255).astype(np.uint8)).convert("RGBA")return np.array(Image.alpha_composite(annotated_frame_pil, mask_image_pil))def generate_image(image, mask, prompt, negative_prompt, pipe, seed):# resize for inpaintingw, h = image.sizein_image = image.resize((512, 512))in_mask = mask.resize((512, 512))generator = torch.Generator(device).manual_seed(seed)result = pipe(image=in_image, mask_image=in_mask, prompt=prompt, negative_prompt=negative_prompt,generator=generator)result = result.images[0]return result.resize((w, h))if __name__ == '__main__':# imageimage_path = "../data/headImage.png"image_source, image = load_image(image_path)plt.imshow(image_source)plt.show()# samsam_checkpoint = '/devdata/chengan/SAM_checkpoint/sam_vit_h_4b8939.pth'sam_predictor = load_sam(sam_checkpoint=sam_checkpoint, device=device)# groundingckpt_repo_id = "ShilongLiu/GroundingDINO"ckpt_filenmae = "/devdata/chengan/GSAM_checkpoint/groundingino/groundingdino_swinb_cogcoor.pth"ckpt_config_filename = "/devdata/chengan/GSAM_checkpoint/groundingino/GroundingDINO_SwinB.cfg.py"groundingdino_model = load_grounding(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename, is_path=True, device=device)# diffusion inpaintsd_pipe = load_diffusion_inpaint(device=device)# get detect boxtext_prompt = "huskie"annotated_frame, detected_boxes = detect(image, image_source, text_prompt=text_prompt, model=groundingdino_model)plt.imshow(annotated_frame)plt.show()# samsegmented_frame_masks = segment(image_source, sam_predictor, boxes=detected_boxes)annotated_frame_with_mask = draw_mask(segmented_frame_masks[0][0], annotated_frame)plt.imshow(annotated_frame_with_mask)plt.show()#mask = segmented_frame_masks[0][0].cpu().numpy()inverted_mask = ((1 - mask) * 255).astype(np.uint8)image_source_pil = Image.fromarray(image_source)image_mask_pil = Image.fromarray(mask)inverted_image_mask_pil = Image.fromarray(inverted_mask)plt.imshow(inverted_image_mask_pil)plt.show()prompt = "A lovely cat"negative_prompt = "low resolution, ugly"seed = 32  # for reproducibilitygenerated_image = generate_image(image=image_source_pil, mask=image_mask_pil, prompt=prompt,negative_prompt=negative_prompt, pipe=sd_pipe, seed=seed)plt.imshow(generated_image)plt.show()

在这里插入图片描述

语义分割

sam_hq_checkpoint

import argparse
import osimport numpy as np
import json
import torch
import torchvision
from PIL import Image
# import litellm# Grounding DINO
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap# segment anything
from segment_anything import (build_sam,# build_sam_hq,SamPredictor
)
import cv2
import numpy as np
import matplotlib.pyplot as plt# Recognize Anything Model & Tag2Text
from ram.models import ram
from ram import inference_ram
import torchvision.transforms as TSfrom huggingface_hub import hf_hub_download# ChatGPT or nltk is required when using tags_chineses
# import openai
# import nltk
def load_image(image_path):# load imageimage_pil = Image.open(image_path).convert("RGB")  # load imagetransform = T.Compose([T.RandomResize([800], max_size=1333),T.ToTensor(),T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])image, _ = transform(image_pil, None)  # 3, h, wreturn image_pil, imagedef check_tags_chinese(tags_chinese, pred_phrases, max_tokens=100, model="gpt-3.5-turbo"):object_list = [obj.split('(')[0] for obj in pred_phrases]object_num = []for obj in set(object_list):object_num.append(f'{object_list.count(obj)} {obj}')object_num = ', '.join(object_num)print(f"Correct object number: {object_num}")if openai_key:prompt = [{'role': 'system','content': 'Revise the number in the tags_chinese if it is wrong. ' + \f'tags_chinese: {tags_chinese}. ' + \f'True object number: {object_num}. ' + \'Only give the revised tags_chinese: '}]response = litellm.completion(model=model, messages=prompt, temperature=0.6, max_tokens=max_tokens)reply = response['choices'][0]['message']['content']# sometimes return with "tags_chinese: xxx, xxx, xxx"tags_chinese = reply.split(':')[-1].strip()return tags_chinesedef load_grounding(filename, ckpt_config_filename, repo_id="ShilongLiu/GroundingDINO", is_path=False, device='cpu'):if is_path:cache_file = filenamecache_config_file = ckpt_config_filenameelse:cache_file = hf_hub_download(repo_id=repo_id, filename=filename)cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)args = SLConfig.fromfile(cache_config_file)model = build_model(args)args.device = devicecheckpoint = torch.load(cache_file, map_location='cpu')log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)print("Model loaded from {} \n => {}".format(cache_file, log))_ = model.eval()return modeldef get_grounding_output(model, image, caption, box_threshold, text_threshold, device="cpu"):caption = caption.lower()caption = caption.strip()if not caption.endswith("."):caption = caption + "."model = model.to(device)image = image.to(device)with torch.no_grad():outputs = model(image[None], captions=[caption])logits = outputs["pred_logits"].cpu().sigmoid()[0]  # (nq, 256)boxes = outputs["pred_boxes"].cpu()[0]  # (nq, 4)logits.shape[0]# filter outputlogits_filt = logits.clone()boxes_filt = boxes.clone()filt_mask = logits_filt.max(dim=1)[0] > box_thresholdlogits_filt = logits_filt[filt_mask]  # num_filt, 256boxes_filt = boxes_filt[filt_mask]  # num_filt, 4# get phrasetokenlizer = model.tokenizertokenized = tokenlizer(caption)# build predpred_phrases = []scores = []for logit, box in zip(logits_filt, boxes_filt):pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")scores.append(logit.max().item())return boxes_filt, torch.Tensor(scores), pred_phrasesdef show_mask(mask, ax, random_color=False):if random_color:color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)else:color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6])h, w = mask.shape[-2:]mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)ax.imshow(mask_image)def show_box(box, ax, label):x0, y0 = box[0], box[1]w, h = box[2] - box[0], box[3] - box[1]ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))ax.text(x0, y0, label)def save_mask_data(output_dir, tags_chinese, mask_list, box_list, label_list):value = 0  # 0 for backgroundmask_img = torch.zeros(mask_list.shape[-2:])for idx, mask in enumerate(mask_list):mask_img[mask.cpu().numpy()[0] == True] = value + idx + 1plt.figure(figsize=(10, 10))plt.imshow(mask_img.numpy())plt.axis('off')plt.savefig(os.path.join(output_dir, 'mask.jpg'), bbox_inches="tight", dpi=300, pad_inches=0.0)json_data = {'tags_chinese': tags_chinese,'mask': [{'value': value,'label': 'background'}]}for label, box in zip(label_list, box_list):value += 1name, logit = label.split('(')logit = logit[:-1]  # the last is ')'json_data['mask'].append({'value': value,'label': name,'logit': float(logit),'box': box.numpy().tolist(),})with open(os.path.join(output_dir, 'label.json'), 'w') as f:json.dump(json_data, f)if __name__ == "__main__":parser = argparse.ArgumentParser("Grounded-Segment-Anything Demo", add_help=True)parser.add_argument("--config", type=str, required=True, help="path to config file")parser.add_argument("--ram_checkpoint", type=str, required=True, help="path to checkpoint file")parser.add_argument("--grounded_checkpoint", type=str, required=True, help="path to checkpoint file")parser.add_argument("--sam_checkpoint", type=str, required=True, help="path to checkpoint file")parser.add_argument("--sam_hq_checkpoint", type=str, default=None, help="path to sam-hq checkpoint file")parser.add_argument("--use_sam_hq", action="store_true", help="using sam-hq for prediction")parser.add_argument("--input_image", type=str, required=True, help="path to image file")parser.add_argument("--split", default=",", type=str, help="split for text prompt")parser.add_argument("--openai_key", type=str, help="key for chatgpt")parser.add_argument("--openai_proxy", default=None, type=str, help="proxy for chatgpt")parser.add_argument("--output_dir", "-o", type=str, default="outputs", required=True, help="output directory")parser.add_argument("--box_threshold", type=float, default=0.25, help="box threshold")parser.add_argument("--text_threshold", type=float, default=0.2, help="text threshold")parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold")parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")args = parser.parse_args()# cfgconfig_file = args.config  # change the path of the model config fileram_checkpoint = args.ram_checkpoint  # change the path of the modelgrounded_checkpoint = args.grounded_checkpoint  # change the path of the modelsam_checkpoint = args.sam_checkpointsam_hq_checkpoint = args.sam_hq_checkpointuse_sam_hq = args.use_sam_hqimage_path = args.input_imagesplit = args.splitopenai_key = args.openai_keyopenai_proxy = args.openai_proxyoutput_dir = args.output_dirbox_threshold = args.box_thresholdtext_threshold = args.text_thresholdiou_threshold = args.iou_thresholddevice = args.device# ChatGPT or nltk is required when using tags_chineses# openai.api_key = openai_key# if openai_proxy:# openai.proxy = {"http": openai_proxy, "https": openai_proxy}# make diros.makedirs(output_dir, exist_ok=True)# load imageimage_pil, image = load_image(image_path)plt.imshow(image_pil)plt.show()# load grounding modelgroundingding_model = load_grounding(grounded_checkpoint, config_file, is_path=True)# visualize raw imageimage_pil.save(os.path.join(output_dir, "raw_image.jpg"))# initialize Recognize Anything Modelnormalize = TS.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])transform = TS.Compose([TS.Resize((384, 384)),TS.ToTensor(), normalize])# load model# https://recognize-anything.github.ioram_model = ram(pretrained=ram_checkpoint,image_size=384,vit='swin_l')# threshold for tagging# we reduce the threshold to obtain more tagsram_model.eval()ram_model = ram_model.to(device)raw_image = image_pil.resize((384, 384))raw_image = transform(raw_image).unsqueeze(0).to(device)res = inference_ram(raw_image, ram_model)# Currently ", " is better for detecting single tags# while ". " is a little worse in some casetags = res[0].replace(' |', ',')tags_chinese = res[1].replace(' |', ',')print("Image Tags: ", res[0])print("图像标签: ", res[1])# run grounding dino modelboxes_filt, scores, pred_phrases = get_grounding_output(groundingding_model, image, tags, box_threshold, text_threshold, device=device)# initialize SAMif use_sam_hq:print("Initialize SAM-HQ Predictor")# predictor = SamPredictor(build_sam_hq(checkpoint=sam_hq_checkpoint).to(device))else:predictor = SamPredictor(build_sam(checkpoint=sam_checkpoint).to(device))image = cv2.imread(image_path)image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)predictor.set_image(image)size = image_pil.sizeH, W = size[1], size[0]for i in range(boxes_filt.size(0)):boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])boxes_filt[i][:2] -= boxes_filt[i][2:] / 2boxes_filt[i][2:] += boxes_filt[i][:2]boxes_filt = boxes_filt.cpu()# use NMS to handle overlapped boxesprint(f"Before NMS: {boxes_filt.shape[0]} boxes")nms_idx = torchvision.ops.nms(boxes_filt, scores, iou_threshold).numpy().tolist()boxes_filt = boxes_filt[nms_idx]pred_phrases = [pred_phrases[idx] for idx in nms_idx]print(f"After NMS: {boxes_filt.shape[0]} boxes")tags_chinese = check_tags_chinese(tags_chinese, pred_phrases)print(f"Revise tags_chinese with number: {tags_chinese}")transformed_boxes = predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(device)masks, _, _ = predictor.predict_torch(point_coords=None,point_labels=None,boxes=transformed_boxes.to(device),multimask_output=False,)# draw output imageplt.figure(figsize=(10, 10))plt.imshow(image)for mask in masks:show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)for box, label in zip(boxes_filt, pred_phrases):show_box(box.numpy(), plt.gca(), label)# plt.title('RAM-tags' + tags + '\n' + 'RAM-tags_chineseing: ' + tags_chinese + '\n')plt.axis('off')plt.show()plt.savefig(os.path.join(output_dir, "automatic_label_output.jpg"),bbox_inches="tight", dpi=300, pad_inches=0.0)save_mask_data(output_dir, tags_chinese, masks, boxes_filt, pred_phrases)
--config
/devdata/chengan/GSAM_checkpoint/groundingino/GroundingDINO_SwinB.cfg.py
--grounded_checkpoint
/devdata/chengan/GSAM_checkpoint/groundingino/groundingdino_swinb_cogcoor.pth
--sam_checkpoint
/devdata/chengan/SAM_checkpoint/sam_vit_h_4b8939.pth
--ram_checkpoint
/devdata/chengan/GSAM_checkpoint/ram/ram_swin_large_14m.pth
--input_image
../data/headImage.png
--output_dir
"outputs"
--box_threshold
0.3
--text_threshold
0.25
--device
"cuda"

在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/218477.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

深度学习基本概念

1.全连接层 全连接层就是该层的所有节点与输入节点全部相连,如图所 示。假设输入节点为X1, X 2, X 3,输出节点为 Y 1, Y 2, Y 3, Y 4。令 矩阵 W 代表全连接层的权重, W 12也就代表 …

【C】⽂件操作

1. 为什么使⽤⽂件? 如果没有⽂件,我们写的程序的数据是存储在电脑的内存中,如果程序退出,内存回收,数据就丢失了,等再次运⾏程序,是看不到上次程序的数据的,如果要将数据进⾏持久化…

世微 AP5199S 降压恒流车灯驱动IC 兼容HV9910

说明 AP5199S 是一款外围电路简单的多功能平均电流 型 LED 恒流驱动器,适用于宽电压范围的非隔离式 大功率恒流 LED 驱动领域。 芯片 PWM 端口支持超小占空比的 PWM 调光, 可响应最小 60ns 脉宽。芯片采用我司专利算法,为客 户提供最佳解决方…

AIGC时代,如何保障ai绘图的算力需求

AIGC是目前非常热门的技术领域,被广泛应用于各个行业和领域,但同时AIGC也面临着诸多的痛点,那么如何解决这些痛点问题呢?云时代,又是如何通过云电脑赋能AIGC行业的,那么一起来文章中了解一下吧。 AIGC是什…

Trie 树详解

Trie 树详解 Trie 树(字典树)是一种用于高效存储和搜索字符串集合的树状数据结构。它的主要特点是能够在O(N)时间内实现字符串的插入、删除和搜索操作,其中 N 是字符串的长度。Trie 树的结构适用于敏感词过滤、单词搜索、自动补全等场景。在…

【HarmonyOS】【DevEco Studio】安装教程及环境配置问题解决

目录 一、DevEco Studio 安装二、配置环境ohmp安装报错问题解决方法一:注册表删除数值(没解决)方法二:进入DevEco Studio点击Settings进入设置(没解决)方法三:自己去官网下载ohmp的包安装&#…

PR片头模板|圣诞节快乐视频片头模板 Merry Christmas Logo

Merry Christmas logo 圣诞节快乐片头模板PR视频剪辑素材免费下载。 3个文本层。 9秒持续时间。 轻松的颜色控制。 After Effects and Premiere Pro 2021或更高版本。 全高清(19201080). 不需要插件。 包括视频教程. 不包括音频。 快速渲染。 来自PR模板…

Corona最新渲染器Corona11详解,附送下载地址

近日,Corona进行了大版本更新,发布了最新的Corona11。这次更新,包含众多新功能和新修复,借助 Corona 11 用户可将作品提升到更高的创作水准,更真实可感的视觉水平。 那么更新了那些呢?一起来看看吧&#x…

zookeeper1==zookeeper源码阅读,源码启动ZK集群

下载源码 Tags apache/zookeeper GitHub https://codeload.github.com/apache/zookeeper/zip/refs/tags/release-3.9.1 JDK8 MAVEN3.8.6 mvn -DskipTeststrue package 配置ZK1 zkServer.cmd中指出了启动类是 QuorumPeerMain QuorumPeer翻译成集群成员比较合理&#xf…

el-table/avue-curd 相同列内容合并

1.效果 2.html 3.js spanMethod({ row, column, rowIndex }) {if (column.property deviceName) {if (rowIndex > 0 && row.deviceName this.data[rowIndex - 1].deviceName) {return {rowspan: 0,colspan: 1,};}let rowspan 1;for (let i rowIndex 1; i < …

Windows提权方法

简介 内网提权&#xff0c;本意为通过某些服务的漏洞&#xff0c;从而获取到该服务器的shell&#xff0c;进而内网渗透&#xff0c;最终从普通用户变成超级管理员的一个过程 以下是一些常见的内网提权原理和方法&#xff1a; 横向移动&#xff1a;攻击者通过在内网中的一台受感…

学校家委会的职责和作用

在教育领域&#xff0c;学校家委会是一个不可或缺的角色。那么&#xff0c;学校家委会的职责和作用是什么呢&#xff1f;作为家长&#xff0c;我们又该如何参与其中呢&#xff1f;接下来&#xff0c;我将以知乎的口吻&#xff0c;为大家解答这些问题。 先让我们了解一下学校家委…

选品大作战:通过飙升榜找到了1000个爆款

通过商品飙升榜我发现了1000多个超级爆款&#xff01;赶超空间还很大。 店铺想要有流量、有销量&#xff0c;必须要会打造爆款&#xff01;打造的前提是会找到爆款。 拼多多每段时间都会有不少产品发生变化&#xff0c;在拼多多大幅爆单的&#xff0c;销售排名上升千万的商品…

YB2416D 30V 输入,3A 输出,CCCV 控制,高效率同步降压转换器

YB2416D 30V 输入&#xff0c;3A 输出&#xff0c;CC&CV 控制&#xff0c;高效率同步降压转换器 概述: YB2416是一款输入耐压超过40V,在4.5V-30V输入电压条件下正常工作&#xff0c;并且能够实现精确恒压以及恒流的同步降压型DC-DC转换器。YB2416内部集成80m2的上管和40m2…

Linux高级管理--安装MySQL数据库系统

MySQL服务基础 MySQL.是一个真正的多线程、多用户的SQL数据库服务&#xff0c;凭借其高性能、高可靠和易于使 用的特性&#xff0c;成为服务器领域中最受欢迎的开源数据库系统。在2008年以前&#xff0c;MySOL项目由MySQL AB公司进行开发&#xff0c;发布和支持&#xff0c;之后…

多模态统计图表综述:图表分类,图表理解,图表生成,图表大一统模型

Overview 多模态统计图表综述一、图表分类1.1 Survey1.2 常见分类数据集&#xff1a;1.3 常见图表类型 二、图表理解2.1 VQA2..1.1 DVQA CVPR20182.1.2 PlotQA 20192.1.3 ChartQA 2022 2.2 Summary2.2.1 Chart-to-text ACL 2022 三、图表生成四、图表大一统模型4.1 UniChart 20…

RHEL8_Linux使用podman管理容器

本章主要介绍使用 podman 管理容器 了解什么是容器&#xff0c;容器和镜像的关系安装和配置podman拉取和删除镜像给镜像打标签导出和导入镜像创建和删除镜像 1.了解容器及和镜像的关系 对于初学者来说&#xff0c;不太容易理解什么是容器&#xff0c;这里举一个例子。想象一下…

电线电缆行业生产管理MES系统解决方案

电线电缆行业生产管理mes系统核心功能 基础数据管理&#xff1a;对基础数据进行统一管理&#xff0c;包括组织架构、原材料数据、设备数据、报工数据、检验数据、员工数据等工艺与BOM管理&#xff1a;对工艺标准进行统一管理&#xff0c;包括工艺的版本管理、关联型号管理&…

结构化并发 ForkJoinPool StructuredTaskScope

Java 通过引入结构化并发 API 简化并发编程。结构化并发将在不同线程中运行的相关任务组视为单一工作单元&#xff0c;从而简化错误处理和取消操作、提高可靠性并增强可见性 结构化并发由 JEP 428 提出&#xff0c;并在 JDK 19 作为孵化API。它由 JEP 437 在 JDK 20 中重新孵化…

在linux服上使用nginx+tomcat部署若依前后端分离版本(RuoYi-Vue)

一、先拉工程&#xff0c;地址&#xff1a;RuoYi-Vue: &#x1f389; 基于SpringBoot&#xff0c;Spring Security&#xff0c;JWT&#xff0c;Vue & Element 的前后端分离权限管理系统&#xff0c;同时提供了 Vue3 的版本 二、在window上用idea打开跑通&#xff0c;可参考…