20分钟了解 MMAction2 框架设计

步骤3:构建一个识别器

# 修改此处 predictions[0].pred_score -> predictions[0].pred_scores.item
print('Scores of Sample[0]', predictions[0].pred_scores.item)

步骤4:构建一个评估指标

# 修改此处 data_sample['pred_score'].cpu().numpy() -> data_sample['pred_scores']['item']
scores = data_sample['pred_scores']['item']
# 并在下方额外添加 scores = np.array(scores)
scores = np.array(scores)

步骤5:使用本地 PyTorch 训练和测试

'''
修改此处 for data_batch in track_iter_progress(val_data_loader): ->
task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):
'''
task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):

修改完的完整代码

import mmaction
from mmaction.utils import register_all_modulesregister_all_modules(init_default_scope=True)
print('**************************步骤0:准备数据*****************************')
print('**************************步骤1:构建一个数据流水线*****************************')
import mmcv
import decord
import numpy as np
from mmcv.transforms import TRANSFORMS, BaseTransform, to_tensor
from mmaction.structures import ActionDataSample@TRANSFORMS.register_module()
class VideoInit(BaseTransform):def transform(self, results):container = decord.VideoReader(results['filename'])results['total_frames'] = len(container)results['video_reader'] = containerreturn results@TRANSFORMS.register_module()
class VideoSample(BaseTransform):def __init__(self, clip_len, num_clips, test_mode=False):self.clip_len = clip_lenself.num_clips = num_clipsself.test_mode = test_modedef transform(self, results):total_frames = results['total_frames']interval = total_frames // self.clip_lenif self.test_mode:# 使测试期间的采样具有确定性np.random.seed(42)inds_of_all_clips = []for i in range(self.num_clips):bids = np.arange(self.clip_len) * intervaloffset = np.random.randint(interval, size=bids.shape)inds = bids + offsetinds_of_all_clips.append(inds)results['frame_inds'] = np.concatenate(inds_of_all_clips)results['clip_len'] = self.clip_lenresults['num_clips'] = self.num_clipsreturn results@TRANSFORMS.register_module()
class VideoDecode(BaseTransform):def transform(self, results):frame_inds = results['frame_inds']container = results['video_reader']imgs = container.get_batch(frame_inds).asnumpy()imgs = list(imgs)results['video_reader'] = Nonedel containerresults['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoResize(BaseTransform):def __init__(self, r_size):self.r_size = (np.inf, r_size)def transform(self, results):img_h, img_w = results['img_shape']new_w, new_h = mmcv.rescale_size((img_w, img_h), self.r_size)imgs = [mmcv.imresize(img, (new_w, new_h))for img in results['imgs']]results['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoCrop(BaseTransform):def __init__(self, c_size):self.c_size = c_sizedef transform(self, results):img_h, img_w = results['img_shape']center_x, center_y = img_w // 2, img_h // 2x1, x2 = center_x - self.c_size // 2, center_x + self.c_size // 2y1, y2 = center_y - self.c_size // 2, center_y + self.c_size // 2imgs = [img[y1:y2, x1:x2] for img in results['imgs']]results['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoFormat(BaseTransform):def transform(self, results):num_clips = results['num_clips']clip_len = results['clip_len']imgs = results['imgs']# [num_clips*clip_len, H, W, C]imgs = np.array(imgs)# [num_clips, clip_len, H, W, C]imgs = imgs.reshape((num_clips, clip_len) + imgs.shape[1:])# [num_clips, C, clip_len, H, W]imgs = imgs.transpose(0, 4, 1, 2, 3)results['imgs'] = imgsreturn results@TRANSFORMS.register_module()
class VideoPack(BaseTransform):def __init__(self, meta_keys=('img_shape', 'num_clips', 'clip_len')):self.meta_keys = meta_keysdef transform(self, results):packed_results = dict()inputs = to_tensor(results['imgs'])data_sample = ActionDataSample().set_gt_label(results['label'])metainfo = {k: results[k] for k in self.meta_keys if k in results}data_sample.set_metainfo(metainfo)packed_results['inputs'] = inputspacked_results['data_samples'] = data_samplereturn packed_resultsimport os.path as osp
from mmengine.dataset import Composepipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=1, test_mode=False),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]pipeline = Compose(pipeline_cfg)
data_prefix = 'data/kinetics400_tiny/train'
results = dict(filename=osp.join(data_prefix, 'D32_1gwq35E.mp4'), label=0)
packed_results = pipeline(results)inputs = packed_results['inputs']
data_sample = packed_results['data_samples']print('shape of the inputs: ', inputs.shape)# 获取输入的信息
print('image_shape: ', data_sample.img_shape)
print('num_clips: ', data_sample.num_clips)
print('clip_len: ', data_sample.clip_len)# 获取输入的标签
print('label: ', data_sample.gt_label)
print('**************************步骤2:构建一个数据集和数据加载器*****************************')
import os.path as osp
from mmengine.fileio import list_from_file
from mmengine.dataset import BaseDataset
from mmaction.registry import DATASETS@DATASETS.register_module()
class DatasetZelda(BaseDataset):def __init__(self, ann_file, pipeline, data_root, data_prefix=dict(video=''),test_mode=False, modality='RGB', **kwargs):self.modality = modalitysuper(DatasetZelda, self).__init__(ann_file=ann_file, pipeline=pipeline, data_root=data_root,data_prefix=data_prefix, test_mode=test_mode,**kwargs)def load_data_list(self):data_list = []fin = list_from_file(self.ann_file)for line in fin:line_split = line.strip().split()filename, label = line_splitlabel = int(label)filename = osp.join(self.data_prefix['video'], filename)data_list.append(dict(filename=filename, label=label))return data_listdef get_data_info(self, idx: int) -> dict:data_info = super().get_data_info(idx)data_info['modality'] = self.modalityreturn data_infofrom mmaction.registry import DATASETStrain_pipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=1, test_mode=False),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]val_pipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=5, test_mode=True),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]train_dataset_cfg = dict(type='DatasetZelda',ann_file='kinetics_tiny_train_video.txt',pipeline=train_pipeline_cfg,data_root='data/kinetics400_tiny/',data_prefix=dict(video='train'))val_dataset_cfg = dict(type='DatasetZelda',ann_file='kinetics_tiny_val_video.txt',pipeline=val_pipeline_cfg,data_root='data/kinetics400_tiny/',data_prefix=dict(video='val'))train_dataset = DATASETS.build(train_dataset_cfg)packed_results = train_dataset[0]inputs = packed_results['inputs']
data_sample = packed_results['data_samples']print('shape of the inputs: ', inputs.shape)# 获取输入的信息
print('image_shape: ', data_sample.img_shape)
print('num_clips: ', data_sample.num_clips)
print('clip_len: ', data_sample.clip_len)# 获取输入的标签
print('label: ', data_sample.gt_label)from mmengine.runner import RunnerBATCH_SIZE = 2train_dataloader_cfg = dict(batch_size=BATCH_SIZE,num_workers=0,persistent_workers=False,sampler=dict(type='DefaultSampler', shuffle=True),dataset=train_dataset_cfg)val_dataloader_cfg = dict(batch_size=BATCH_SIZE,num_workers=0,persistent_workers=False,sampler=dict(type='DefaultSampler', shuffle=False),dataset=val_dataset_cfg)train_data_loader = Runner.build_dataloader(dataloader=train_dataloader_cfg)
val_data_loader = Runner.build_dataloader(dataloader=val_dataloader_cfg)batched_packed_results = next(iter(train_data_loader))batched_inputs = batched_packed_results['inputs']
batched_data_sample = batched_packed_results['data_samples']assert len(batched_inputs) == BATCH_SIZE
assert len(batched_data_sample) == BATCH_SIZE
print('**************************步骤3:构建一个识别器*****************************')
import torch
from mmengine.model import BaseDataPreprocessor, stack_batch
from mmaction.registry import MODELS@MODELS.register_module()
class DataPreprocessorZelda(BaseDataPreprocessor):def __init__(self, mean, std):super().__init__()self.register_buffer('mean',torch.tensor(mean, dtype=torch.float32).view(-1, 1, 1, 1),False)self.register_buffer('std',torch.tensor(std, dtype=torch.float32).view(-1, 1, 1, 1),False)def forward(self, data, training=False):data = self.cast_data(data)inputs = data['inputs']batch_inputs = stack_batch(inputs)  # 批处理batch_inputs = (batch_inputs - self.mean) / self.std  # 归一化data['inputs'] = batch_inputsreturn datafrom mmaction.registry import MODELSdata_preprocessor_cfg = dict(type='DataPreprocessorZelda',mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375])data_preprocessor = MODELS.build(data_preprocessor_cfg)preprocessed_inputs = data_preprocessor(batched_packed_results)
print(preprocessed_inputs['inputs'].shape)import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import BaseModel, BaseModule, Sequential
from mmengine.structures import LabelData
from mmaction.registry import MODELS@MODELS.register_module()
class BackBoneZelda(BaseModule):def __init__(self, init_cfg=None):if init_cfg is None:init_cfg = [dict(type='Kaiming', layer='Conv3d', mode='fan_out', nonlinearity="relu"),dict(type='Constant', layer='BatchNorm3d', val=1, bias=0)]super(BackBoneZelda, self).__init__(init_cfg=init_cfg)self.conv1 = Sequential(nn.Conv3d(3, 64, kernel_size=(3, 7, 7),stride=(1, 2, 2), padding=(1, 3, 3)),nn.BatchNorm3d(64), nn.ReLU())self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2),padding=(0, 1, 1))self.conv = Sequential(nn.Conv3d(64, 128, kernel_size=3, stride=2, padding=1),nn.BatchNorm3d(128), nn.ReLU())def forward(self, imgs):# imgs: [batch_size*num_views, 3, T, H, W]# features: [batch_size*num_views, 128, T/2, H//8, W//8]features = self.conv(self.maxpool(self.conv1(imgs)))return features@MODELS.register_module()
class ClsHeadZelda(BaseModule):def __init__(self, num_classes, in_channels, dropout=0.5, average_clips='prob', init_cfg=None):if init_cfg is None:init_cfg = dict(type='Normal', layer='Linear', std=0.01)super(ClsHeadZelda, self).__init__(init_cfg=init_cfg)self.num_classes = num_classesself.in_channels = in_channelsself.average_clips = average_clipsif dropout != 0:self.dropout = nn.Dropout(dropout)else:self.dropout = Noneself.fc = nn.Linear(self.in_channels, self.num_classes)self.pool = nn.AdaptiveAvgPool3d(1)self.loss_fn = nn.CrossEntropyLoss()def forward(self, x):N, C, T, H, W = x.shapex = self.pool(x)x = x.view(N, C)assert x.shape[1] == self.in_channelsif self.dropout is not None:x = self.dropout(x)cls_scores = self.fc(x)return cls_scoresdef loss(self, feats, data_samples):cls_scores = self(feats)labels = torch.stack([x.gt_label for x in data_samples])labels = labels.squeeze()if labels.shape == torch.Size([]):labels = labels.unsqueeze(0)loss_cls = self.loss_fn(cls_scores, labels)return dict(loss_cls=loss_cls)def predict(self, feats, data_samples):cls_scores = self(feats)num_views = cls_scores.shape[0] // len(data_samples)# assert num_views == data_samples[0].num_clipscls_scores = self.average_clip(cls_scores, num_views)for ds, sc in zip(data_samples, cls_scores):pred = LabelData(item=sc)ds.pred_scores = predreturn data_samplesdef average_clip(self, cls_scores, num_views):if self.average_clips not in ['score', 'prob', None]:raise ValueError(f'{self.average_clips} is not supported. 'f'Currently supported ones are 'f'["score", "prob", None]')total_views = cls_scores.shape[0]cls_scores = cls_scores.view(total_views // num_views, num_views, -1)if self.average_clips is None:return cls_scoreselif self.average_clips == 'prob':cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1)elif self.average_clips == 'score':cls_scores = cls_scores.mean(dim=1)return cls_scores@MODELS.register_module()
class RecognizerZelda(BaseModel):def __init__(self, backbone, cls_head, data_preprocessor):super().__init__(data_preprocessor=data_preprocessor)self.backbone = MODELS.build(backbone)self.cls_head = MODELS.build(cls_head)def extract_feat(self, inputs):inputs = inputs.view((-1, ) + inputs.shape[2:])return self.backbone(inputs)def loss(self, inputs, data_samples):feats = self.extract_feat(inputs)loss = self.cls_head.loss(feats, data_samples)return lossdef predict(self, inputs, data_samples):feats = self.extract_feat(inputs)predictions = self.cls_head.predict(feats, data_samples)return predictionsdef forward(self, inputs, data_samples=None, mode='tensor'):if mode == 'tensor':return self.extract_feat(inputs)elif mode == 'loss':return self.loss(inputs, data_samples)elif mode == 'predict':return self.predict(inputs, data_samples)else:raise RuntimeError(f'Invalid mode: {mode}')import torch
import copy
from mmaction.registry import MODELSmodel_cfg = dict(type='RecognizerZelda',backbone=dict(type='BackBoneZelda'),cls_head=dict(type='ClsHeadZelda',num_classes=2,in_channels=128,average_clips='prob'),data_preprocessor = dict(type='DataPreprocessorZelda',mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375]))model = MODELS.build(model_cfg)# 训练
model.train()
model.init_weights()
data_batch_train = copy.deepcopy(batched_packed_results)
data = model.data_preprocessor(data_batch_train, training=True)
loss = model(**data, mode='loss')
print('loss dict: ', loss)# 验证
with torch.no_grad():model.eval()data_batch_test = copy.deepcopy(batched_packed_results)data = model.data_preprocessor(data_batch_test, training=False)predictions = model(**data, mode='predict')
here = (predictions)
print('Label of Sample[0]', predictions[0].gt_label)
print('----------------------------------------------------')
print('Label of Sample[0]', predictions[0].gt_label)
print('Scores of Sample[0]', predictions[0].pred_scores.item)
print('**************************步骤4:构建一个评估指标*****************************')
import copy
from collections import OrderedDict
from mmengine.evaluator import BaseMetric
from mmaction.evaluation import top_k_accuracy
from mmaction.registry import METRICS@METRICS.register_module()
class AccuracyMetric(BaseMetric):def __init__(self, topk=(1, 5), collect_device='cpu', prefix='acc'):super().__init__(collect_device=collect_device, prefix=prefix)self.topk = topkdef process(self, data_batch, data_samples):data_samples = copy.deepcopy(data_samples)for data_sample in data_samples:result = dict()scores = data_sample['pred_scores']['item']scores = np.array(scores)label = data_sample['gt_label'].item()result['scores'] = scoresresult['label'] = labelself.results.append(result)def compute_metrics(self, results: list) -> dict:eval_results = OrderedDict()labels = [res['label'] for res in results]scores = [res['scores'] for res in results]topk_acc = top_k_accuracy(scores, labels, self.topk)for k, acc in zip(self.topk, topk_acc):eval_results[f'topk{k}'] = accreturn eval_resultsfrom mmaction.registry import METRICSmetric_cfg = dict(type='AccuracyMetric', topk=(1, 5))metric = METRICS.build(metric_cfg)data_samples = [d.to_dict() for d in predictions]metric.process(batched_packed_results, data_samples)
acc = metric.compute_metrics(metric.results)
print(acc)
print('**************************步骤5:使用本地 PyTorch 训练和测试*****************************')
import torch.optim as optim
from mmengine import track_iter_progress
from tqdm import tqdmdevice = 'cuda' # or 'cpu'
max_epochs = 10optimizer = optim.Adam(model.parameters(), lr=0.01)for epoch in range(max_epochs):model.train()losses = []task_num = len(train_data_loader)for data_batch in track_iter_progress((train_data_loader, task_num)):data = model.data_preprocessor(data_batch, training=True)loss_dict = model(**data, mode='loss')loss = loss_dict['loss_cls']optimizer.zero_grad()loss.backward()optimizer.step()losses.append(loss.item())print(f'Epoch[{epoch}]: loss ', sum(losses) / len(train_data_loader))with torch.no_grad():model.eval()task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):data = model.data_preprocessor(data_batch, training=False)predictions = model(**data, mode='predict')data_samples = [d.to_dict() for d in predictions]metric.process(data_batch, data_samples)acc = metric.acc = metric.compute_metrics(metric.results)for name, topk in acc.items():print(f'{name}: ', topk)
print('**************************步骤6:使用 MMEngine 训练和测试(推荐)*****************************')
# from mmengine.runner import Runner
#
# train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=10, val_interval=1)
# val_cfg = dict(type='ValLoop')
#
# optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.01))
#
# runner = Runner(model=model_cfg, work_dir='./work_dirs/guide',
#                 train_dataloader=train_dataloader_cfg,
#                 train_cfg=train_cfg,
#                 val_dataloader=val_dataloader_cfg,
#                 val_cfg=val_cfg,
#                 optim_wrapper=optim_wrapper,
#                 val_evaluator=[metric_cfg],
#                 default_scope='mmaction')
# runner.train()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/bicheng/76339.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

单轨小车悬挂输送机安全规程

导语 大家好,我是社长,老K。专注分享智能制造和智能仓储物流等内容。欢迎大家使用我们的仓储物流技术AI智能体。 新书《智能物流系统构成与技术实践》 新书《智能仓储项目出海-英语手册,必备!》 完整版文件和更多学习资料&#xf…

C++之多态

文章目录 一、多态的概念 多态的定义与类型 二、多态的实现 三、虚函数 虚函数的概念 虚函数的重写/覆盖 协变 析构函数的重写/覆盖 override,final关键字 override final 纯虚函数与抽象类 三个概念辨析 四、多态实现的原理 虚函数表指针 动态绑定与静态绑定 …

深入理解 HTML5 Audio:网页音频播放的新时代

在网页开发领域,音频的嵌入和播放一直是一个重要且不断演进的话题。HTML5 的出现,为网页音频播放带来了标准化的解决方案,极大地改善了开发者和用户的体验。 一、HTML5 之前的音频播放状况 在 HTML5 诞生之前,互联网上缺乏统一的网页音频播放标准。当时,大多数音频播放依…

重载和重写的区别

重载 在同一个类中定义多个同名方法, 但参数列表不同(参数类型、参数个数或参数顺序不同)返回值类型不同。 public class MathOperations {int add(int a, int b) {return a b;}double add(double a, double b) {return a b;} }重写 子…

机器视觉+深度学习,让电子零部件表面缺陷检测效率大幅提升

在精密加工的3C电子行业中,一抹0.1毫米的油渍,一粒肉眼难辨的灰尘或将引发整机性能隐患。当制造业迈入微米级品质竞争时代,产品表面看似微不足道的脏污缺陷,正成为制约企业高质量发展的隐形枷锁。分布无规律的污渍斑点、形态各异的…

Dart逆向之函数调用

我们从Blutter恢复的部分IL中可以看到Dart调用函数的逻辑 // 0x180490: r16 <int> // 0x180490: ldr x16, [PP, #0x8a0] ; [pp0x8a0] TypeArguments: <int> // 0x180494: r30 Instance_MethodChannel // 0x180494: ldr lr, [P…

如何白嫖Grok3 API? 如何使用Grok3 API调用实例?怎么使用Grok3模型?

前段时间&#xff0c;Grok3&#xff08;想要体验Grok3的童鞋可以参考本文&#xff1a;Grok 上线角色扮演功能&#xff0c;教你课后作业手到擒来&#xff0c;Grok3使用次数限制&#xff1f;如何使用Grok3? Grok3国内支付手段如何订阅升级Premium - AI is all your need!&#x…

《超短心法》速读笔记

文章目录 书籍信息概览主线行业篇战法一 人气涨停战法战法二 四维主线战法 主线龙头篇战法三 龙头起爆战法战法四 六合强庄控盘战法战法五 筹码战法之七星连珠 趋势牛股篇战法六 趋势擒龙之暴涨形态战法七 趋势破位起爆战法战法八 强中选强多头战法 涨停晋级篇战法九 强势涨停狙…

git仓库迁移包括提交记录日志

网上找了很多资料都不好用&#xff0c;直到看到一个亲测有效后&#xff0c;整理如下&#xff1a; 1、进入仓库目录下&#xff0c;并且切换到要迁移的分支上 前提是你本地已有旧仓库的代码&#xff1b;如果没有的话&#xff0c;先拉取。 2、更改仓库地址 git remote set-url …

powerDesign 逆向 mysql 生成 物理模型,并用VBS脚本整理comment

学习自&#xff1a;https://www.cnblogs.com/xmyjcs/p/8536233.html 文章目录 Reverse Engineer格式化模型执行 VBS 脚本 Reverse Engineer 下面 DBMS 可以通过 ODBC&#xff08;Open Database Connectivity&#xff0c;开放数据库连接&#xff09;连接&#xff0c; 需要自己先…

Qt文件读写

Qt文件读写&#xff08;Stream流形式&#xff09; 文件读写相关类 1. QFile类 QFile主要用于文件的打开、关闭等功能&#xff1b; [override virtual] bool QFile::open(QIODevice::OpenMode mode);Reimplements: QIODevice::open(QIODevice::OpenMode mode). Opens the fi…

[特殊字符]【高并发实战】Java Socket + 线程池实现高性能文件上传服务器(附完整源码)[特殊字符]

大家好&#xff01;今天给大家分享一个 Java Socket 线程池 实现的高性能文件上传服务器&#xff0c;支持 多客户端并发上传&#xff0c;代码可直接运行&#xff0c;适合 面试、项目实战、性能优化 学习&#xff01; &#x1f4cc; 本文亮点&#xff1a; ✅ 完整可运行代码&a…

Python proteinflow 库介绍

ProteinFlow是一个开源的Python库,旨在简化蛋白质结构数据在深度学习应用中的预处理过程。以下是其详细介绍: 功能 数据处理:支持处理单链和多链蛋白质结构,包括二级结构特征、扭转角等特征化选项。 数据获取:能够从Protein Data Bank (PDB)和Structural Antibody Databa…

WebPages 对象

WebPages 对象 引言 在Web开发领域&#xff0c;WebPages 对象是前端工程师和开发者常用的工具之一。它提供了丰富的API&#xff0c;使我们能够轻松地与网页元素进行交互。本文将深入探讨WebPages对象的概念、特性以及在实际开发中的应用。 概念 WebPages对象是现代浏览器提…

Mysql表的操作(2)

1.去重 select distinct 列名 from 表名 2.查询时排序 select 列名 from 表名 order by 列名 asc/desc; 不影响数据库里面的数据 错误样例 &#xff1a; 但结果却有点出乎意料了~为什么会失败呢&#xff1f; 其实这是因为书写的形式不对&#xff0c;如果带了引号&#xff0c;…

先占个日常,等会写。

引入一个重要的概念 “struct” &#xff08;译为中文&#xff1a;结构体&#xff09; 可用作设出比较复杂的一些变量类型 语法 &#xff1a;struct point name { int x; int y; int z;} point 和 name是任意命名的名字&#xff0c;含义是&#xff0c;声明一个变量类型为st…

SmolDocling:一种超紧凑的视觉语言模型,用于端到端多模态文档转换

paper地址:SmolDocling: An ultra-compact vision-language model for end-to-end multi-modal document conversion Huggingface地址:SmolDocling-256M-preview 代码对应的权重文件:SmolDocling-256M-preview权重文件 一、摘要 以下是文章摘要的总结: SmolDocling 是一…

MySQL SQL Mode

SQL Mode 是 MySQL 中一个重要的系统变量&#xff0c;它决定了 MySQL 应遵循的 SQL 语法规则和数据验证规则。 什么是 SQL Mode SQL Mode 定义了 MySQL 应该支持的 SQL 语法以及执行数据验证的方式。通过设置不同的 SQL Mode&#xff0c;可以让 MySQL 在不同程度上兼容其他数据…

Java bs架构/反射

bs架构 规定的格式是要换行&#xff0c;而打印流天然换行 线程池可以直接处理thread&#xff0c;thread继承自runnable 在Java中&#xff0c;线程池的pool.execute()方法用于提交一个任务给线程池执行。这个方法接受一个Runnable对象作为参数。Runnable是一个函数式接口&…

C++手撕单链表及逆序打印

在学习数据结构的过程中&#xff0c;链表是一个非常重要的基础数据结构。今天&#xff0c;我们将通过C手动实现一个单链表&#xff0c;并添加一个逆序打印的功能&#xff0c;帮助大家更好地理解链表的实现和操作。 一、链表简介 链表是一种线性数据结构&#xff0c;其中每个元…