深入浅出 diffusion(4):pytorch 实现简单 diffusion

 1. 训练和采样流程

 2. 无条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as Fclass ResidualConvBlock(nn.Module):def __init__(self, in_channels: int, out_channels: int, is_res: bool = False) -> None:super().__init__()'''standard ResNet style convolutional block'''self.same_channels = in_channels==out_channelsself.is_res = is_resself.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)def forward(self, x: torch.Tensor) -> torch.Tensor:if self.is_res:x1 = self.conv1(x)x2 = self.conv2(x1)# this adds on correct residual in case channels have increasedif self.same_channels:out = x + x2else:out = x1 + x2return out / 1.414else:x1 = self.conv1(x)x2 = self.conv2(x1)return x2class UnetDown(nn.Module):def __init__(self, in_channels, out_channels):super(UnetDown, self).__init__()'''process and downscale the image feature maps'''layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]self.model = nn.Sequential(*layers)def forward(self, x):return self.model(x)class UnetUp(nn.Module):def __init__(self, in_channels, out_channels):super(UnetUp, self).__init__()'''process and upscale the image feature maps'''layers = [nn.ConvTranspose2d(in_channels, out_channels, 2, 2),ResidualConvBlock(out_channels, out_channels),ResidualConvBlock(out_channels, out_channels),]self.model = nn.Sequential(*layers)def forward(self, x, skip):x = torch.cat((x, skip), 1)x = self.model(x)return xclass EmbedFC(nn.Module):def __init__(self, input_dim, emb_dim):super(EmbedFC, self).__init__()'''generic one layer FC NN for embedding things  '''self.input_dim = input_dimlayers = [nn.Linear(input_dim, emb_dim),nn.GELU(),nn.Linear(emb_dim, emb_dim),]self.model = nn.Sequential(*layers)def forward(self, x):x = x.view(-1, self.input_dim)return self.model(x)
class Unet(nn.Module):def __init__(self, in_channels, n_feat=256):super(Unet, self).__init__()self.in_channels = in_channelsself.n_feat = n_featself.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)self.down1 = UnetDown(n_feat, n_feat)self.down2 = UnetDown(n_feat, 2 * n_feat)self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())self.timeembed1 = EmbedFC(1, 2 * n_feat)self.timeembed2 = EmbedFC(1, 1 * n_feat)self.up0 = nn.Sequential(# nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_featnn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_featnn.GroupNorm(8, 2 * n_feat),nn.ReLU(),)self.up1 = UnetUp(4 * n_feat, n_feat)self.up2 = UnetUp(2 * n_feat, n_feat)self.out = nn.Sequential(nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),nn.GroupNorm(8, n_feat),nn.ReLU(),nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),)def forward(self, x, t):'''输入加噪图像和对应的时间step,预测反向噪声的正态分布:param x: 加噪图像:param t: 对应step:return: 正态分布噪声'''x = self.init_conv(x)down1 = self.down1(x)down2 = self.down2(down1)hiddenvec = self.to_vec(down2)# embed time steptemb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)# 将上采样输出与step编码相加,输入到下一个上采样层up1 = self.up0(hiddenvec)up2 = self.up1(up1 + temb1, down2)up3 = self.up2(up2 + temb2, down1)out = self.out(torch.cat((up3, x), 1))return outclass DDPM(nn.Module):def __init__(self, model, betas, n_T, device):super(DDPM, self).__init__()self.model = model.to(device)# register_buffer 可以提前保存alpha相关,节约时间for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():self.register_buffer(k, v)self.n_T = n_Tself.device = deviceself.loss_mse = nn.MSELoss()def ddpm_schedules(self, beta1, beta2, T):'''提前计算各个step的alpha,这里beta是线性变化:param beta1: beta的下限:param beta2: beta的下限:param T: 总共的step数'''assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组sqrt_beta_t = torch.sqrt(beta_t)alpha_t = 1 - beta_tlog_alpha_t = torch.log(alpha_t)alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alphasqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmabreturn {"alpha_t": alpha_t,  # \alpha_t"oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}"sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}"alphabar_t": alphabar_t,  # \bar{\alpha_t}"sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差"sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值"mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}}def forward(self, x):"""训练过程中, 随机选择step和生成噪声"""# 随机选择step_ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)# 随机生成正态分布噪声noise = torch.randn_like(x)  # eps ~ N(0, 1)# 加噪后的图像x_tx_t = (self.sqrtab[_ts, None, None, None] * x+ self.sqrtmab[_ts, None, None, None] * noise)# 将unet预测的对应step的正态分布噪声与真实噪声做对比return self.loss_mse(noise, self.model(x_t, _ts / self.n_T))def sample(self, n_sample, size, device):# 随机生成初始噪声图片 x_T ~ N(0, 1)x_i = torch.randn(n_sample, *size).to(device)for i in range(self.n_T, 0, -1):t_is = torch.tensor([i / self.n_T]).to(device)t_is = t_is.repeat(n_sample, 1, 1, 1)z = torch.randn(n_sample, *size).to(device) if i > 1 else 0eps = self.model(x_i, t_is)x_i = x_i[:n_sample]x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * zreturn x_iclass ImageGenerator(object):def __init__(self):'''初始化,定义超参数、数据集、网络结构等'''self.epoch = 20self.sample_num = 100self.batch_size = 256self.lr = 0.0001self.n_T = 400self.device = 'cuda' if torch.cuda.is_available() else 'cpu'self.init_dataloader()self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)def init_dataloader(self):'''初始化数据集和dataloader'''tf = transforms.Compose([transforms.ToTensor(),])train_dataset = MNIST('./data/',train=True,download=True,transform=tf)self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)val_dataset = MNIST('./data/',train=False,download=True,transform=tf)self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)def train(self):self.sampler.train()print('训练开始!!')for epoch in range(self.epoch):self.sampler.model.train()loss_mean = 0for i, (images, labels) in enumerate(self.train_dataloader):images, labels = images.to(self.device), labels.to(self.device)# 将latent和condition拼接后输入网络loss = self.sampler(images)loss_mean += loss.item()self.optimizer.zero_grad()loss.backward()self.optimizer.step()train_loss = loss_mean / len(self.train_dataloader)print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))self.visualize_results(epoch)@torch.no_grad()def visualize_results(self, epoch):self.sampler.eval()# 保存结果路径output_path = 'results/Diffusion'if not os.path.exists(output_path):os.makedirs(output_path)tot_num_samples = self.sample_numimage_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))out = self.sampler.sample(tot_num_samples, (1, 28, 28), self.device)save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)if __name__ == '__main__':generator = ImageGenerator()generator.train()

3. 有条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as Fclass ResidualConvBlock(nn.Module):def __init__(self, in_channels: int, out_channels: int, is_res: bool = False) -> None:super().__init__()'''standard ResNet style convolutional block'''self.same_channels = in_channels==out_channelsself.is_res = is_resself.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)def forward(self, x: torch.Tensor) -> torch.Tensor:if self.is_res:x1 = self.conv1(x)x2 = self.conv2(x1)# this adds on correct residual in case channels have increasedif self.same_channels:out = x + x2else:out = x1 + x2return out / 1.414else:x1 = self.conv1(x)x2 = self.conv2(x1)return x2class UnetDown(nn.Module):def __init__(self, in_channels, out_channels):super(UnetDown, self).__init__()'''process and downscale the image feature maps'''layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]self.model = nn.Sequential(*layers)def forward(self, x):return self.model(x)class UnetUp(nn.Module):def __init__(self, in_channels, out_channels):super(UnetUp, self).__init__()'''process and upscale the image feature maps'''layers = [nn.ConvTranspose2d(in_channels, out_channels, 2, 2),ResidualConvBlock(out_channels, out_channels),ResidualConvBlock(out_channels, out_channels),]self.model = nn.Sequential(*layers)def forward(self, x, skip):x = torch.cat((x, skip), 1)x = self.model(x)return xclass EmbedFC(nn.Module):def __init__(self, input_dim, emb_dim):super(EmbedFC, self).__init__()'''generic one layer FC NN for embedding things  '''self.input_dim = input_dimlayers = [nn.Linear(input_dim, emb_dim),nn.GELU(),nn.Linear(emb_dim, emb_dim),]self.model = nn.Sequential(*layers)def forward(self, x):x = x.view(-1, self.input_dim)return self.model(x)
class Unet(nn.Module):def __init__(self, in_channels, n_feat=256, n_classes=10):super(Unet, self).__init__()self.in_channels = in_channelsself.n_feat = n_featself.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)self.down1 = UnetDown(n_feat, n_feat)self.down2 = UnetDown(n_feat, 2 * n_feat)self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())self.timeembed1 = EmbedFC(1, 2 * n_feat)self.timeembed2 = EmbedFC(1, 1 * n_feat)self.conditionembed1 = EmbedFC(n_classes, 2 * n_feat)self.conditionembed2 = EmbedFC(n_classes, 1 * n_feat)self.up0 = nn.Sequential(# nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_featnn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_featnn.GroupNorm(8, 2 * n_feat),nn.ReLU(),)self.up1 = UnetUp(4 * n_feat, n_feat)self.up2 = UnetUp(2 * n_feat, n_feat)self.out = nn.Sequential(nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),nn.GroupNorm(8, n_feat),nn.ReLU(),nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),)def forward(self, x, c, t):'''输入加噪图像和对应的时间step,预测反向噪声的正态分布:param x: 加噪图像:param c: contition向量:param t: 对应step:return: 正态分布噪声'''x = self.init_conv(x)down1 = self.down1(x)down2 = self.down2(down1)hiddenvec = self.to_vec(down2)# embed time steptemb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)cemb1 = self.conditionembed1(c).view(-1, self.n_feat * 2, 1, 1)cemb2 = self.conditionembed2(c).view(-1, self.n_feat, 1, 1)# 将上采样输出与step编码相加,输入到下一个上采样层up1 = self.up0(hiddenvec)up2 = self.up1(cemb1 * up1 + temb1, down2)up3 = self.up2(cemb2 * up2 + temb2, down1)out = self.out(torch.cat((up3, x), 1))return outclass DDPM(nn.Module):def __init__(self, model, betas, n_T, device):super(DDPM, self).__init__()self.model = model.to(device)# register_buffer 可以提前保存alpha相关,节约时间for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():self.register_buffer(k, v)self.n_T = n_Tself.device = deviceself.loss_mse = nn.MSELoss()def ddpm_schedules(self, beta1, beta2, T):'''提前计算各个step的alpha,这里beta是线性变化:param beta1: beta的下限:param beta2: beta的下限:param T: 总共的step数'''assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组sqrt_beta_t = torch.sqrt(beta_t)alpha_t = 1 - beta_tlog_alpha_t = torch.log(alpha_t)alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alphasqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmabreturn {"alpha_t": alpha_t,  # \alpha_t"oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}"sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}"alphabar_t": alphabar_t,  # \bar{\alpha_t}"sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差"sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值"mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}}def forward(self, x, c):"""训练过程中, 随机选择step和生成噪声"""# 随机选择step_ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)# 随机生成正态分布噪声noise = torch.randn_like(x)  # eps ~ N(0, 1)# 加噪后的图像x_tx_t = (self.sqrtab[_ts, None, None, None] * x+ self.sqrtmab[_ts, None, None, None] * noise)# 将unet预测的对应step的正态分布噪声与真实噪声做对比return self.loss_mse(noise, self.model(x_t, c, _ts / self.n_T))def sample(self, n_sample, c, size, device):# 随机生成初始噪声图片 x_T ~ N(0, 1)x_i = torch.randn(n_sample, *size).to(device)for i in range(self.n_T, 0, -1):t_is = torch.tensor([i / self.n_T]).to(device)t_is = t_is.repeat(n_sample, 1, 1, 1)z = torch.randn(n_sample, *size).to(device) if i > 1 else 0eps = self.model(x_i, c, t_is)x_i = x_i[:n_sample]x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * zreturn x_iclass ImageGenerator(object):def __init__(self):'''初始化,定义超参数、数据集、网络结构等'''self.epoch = 20self.sample_num = 100self.batch_size = 256self.lr = 0.0001self.n_T = 400self.device = 'cuda' if torch.cuda.is_available() else 'cpu'self.init_dataloader()self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)def init_dataloader(self):'''初始化数据集和dataloader'''tf = transforms.Compose([transforms.ToTensor(),])train_dataset = MNIST('./data/',train=True,download=True,transform=tf)self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)val_dataset = MNIST('./data/',train=False,download=True,transform=tf)self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)def train(self):self.sampler.train()print('训练开始!!')for epoch in range(self.epoch):self.sampler.model.train()loss_mean = 0for i, (images, labels) in enumerate(self.train_dataloader):images, labels = images.to(self.device), labels.to(self.device)labels = F.one_hot(labels, num_classes=10).float()# 将latent和condition拼接后输入网络loss = self.sampler(images, labels)loss_mean += loss.item()self.optimizer.zero_grad()loss.backward()self.optimizer.step()train_loss = loss_mean / len(self.train_dataloader)print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))self.visualize_results(epoch)@torch.no_grad()def visualize_results(self, epoch):self.sampler.eval()# 保存结果路径output_path = 'results/Diffusion'if not os.path.exists(output_path):os.makedirs(output_path)tot_num_samples = self.sample_numimage_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))labels = F.one_hot(torch.Tensor(np.repeat(np.arange(10), 10)).to(torch.int64), num_classes=10).to(self.device).float()out = self.sampler.sample(tot_num_samples, labels, (1, 28, 28), self.device)save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)if __name__ == '__main__':generator = ImageGenerator()generator.train()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/651600.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

LayoutInflater.inflate全面解读

方法解析 LayoutInflater.inflate() 是 Android 系统中用于将 XML 布局文件转换成相应的 View 的方法。在 Android 开发中&#xff0c;我们经常使用此方法来动态创建和填充布局。 public View inflate(LayoutRes int resource, Nullable ViewGroup root, boolean attachToRoo…

LVGL v9学习笔记 | 12 - 弧形控件的使用方法(arc)

一、arc控件 arc控件的API在lvgl/src/widgets/arc/lv_arc.h 中声明,以lv_arc_xxx命名。 arc控件由背景圆弧和前景圆弧组成,前景圆弧的末端有一个旋钮,前景圆弧可以被触摸调节。 1. 创建arc对象 /*** Create an arc object* @param parent pointer to an object, it w…

Pyecharts 风采:从基础到高级,打造炫酷象形柱状图的完整指南【第40篇—python:象形柱状图】

文章目录 引言安装PyechartsPyecharts象形柱状图参数详解1. Bar 类的基本参数2. 自定义图表样式3. 添加标签和提示框 代码实战&#xff1a;绘制多种炫酷象形柱状图进阶技巧&#xff1a;动态数据更新与交互性1. 动态数据更新2. 交互性设计 拓展应用&#xff1a;结合其他图表类型…

深度学习-使用Labelimg数据标注

数据标注是计算机视觉和机器学习项目中至关重要的一步&#xff0c;而使用工具进行标注是提高效率的关键。本文介绍了LabelImg&#xff0c;一款常用的开源图像标注工具。用户可以在图像中方便而准确地标注目标区域&#xff0c;为训练机器学习模型提供高质量的标注数据。LabelImg…

Unity中URP下逐顶点光照

文章目录 前言一、之前额外灯逐像素光照的数据准备好后&#xff0c;还有最后的处理二、额外灯的逐顶点光照1、逐顶点额外灯的光照颜色2、inputData.vertexLighting3、surfaceData.albedo 前言 在上篇文章中&#xff0c;我们分析了Unity中URP下额外灯&#xff0c;逐像素光照中聚…

vue3 codemirror关于 sql 和 json格式化的使用以及深入了解codemirror 使用json格式化提示错误的关键代码

文章目录 需求说明0、安装1. 导入js脚本2.配置3.html处使用4.js处理数据&#xff08;1&#xff09;json格式化处理&#xff08;2&#xff09;sql格式化处理 5. 解决问题1:json格式化错误提示报错&#xff08;1&#xff09;打开官网&#xff08;2&#xff09;打开官网&#xff0…

【机器学习笔记】1 线性回归

回归的概念 二分类问题可以用1和0来表示 线性回归&#xff08;Linear Regression&#xff09;的概念 是一种通过属性的线性组合来进行预测的线性模型&#xff0c;其目的是找到一条直线或者一个平面或者更高维的超平面&#xff0c;使得预测值与真实值之间的误差最小化&#x…

ppt背景图片怎么设置?让你的演示更加出彩!

PowerPoint是一款广泛应用于演示文稿制作的软件&#xff0c;而背景图片是演示文稿中不可或缺的一部分。一个好的背景图片能够提升演示文稿的整体效果&#xff0c;使观众更加关注你的演示内容。可是ppt背景图片怎么设置呢&#xff1f;本文将介绍ppt背景图片设置的三个方法&#…

数据库 sql select *from account where name=‘张三‘ 执行过程

select *from account where name张三分析上面语句的执行过程 用到了索引 由于是根据 1.name字段进行查询&#xff0c;所以先根据name张三’到name字段的二级索引中进行匹配查 找。但是在二级索引中只能查找到 Arm 对应的主键值 10。 2.由于查询返回的数据是*&#xff0c…

5.Hive表修改Location,一次讲明白

Hive表修改Loction 一、Hive中修改Location语句二、方案1 删表重建1. 创建表&#xff0c;写错误的Location2. 查看Location3. 删表4. 创建表&#xff0c;写正确的Location5. 查看Location 三、方案2 直接修改Location并恢复数据1.建表&#xff0c;指定错误的Location&#xff0…

【CSS】实现鼠标悬停图片放大的几种方法

1.背景图片放大 使用css设置背景图片大小100%&#xff0c;同时设置位置和过渡效果&#xff0c;然后使用&#xff1a;hover设置当鼠标悬停时修改图片大小&#xff0c;实现悬停放大效果。 <!DOCTYPE html> <html lang"en"> <head><meta charset…

###C语言程序设计-----C语言学习(4)#

前言&#xff1a;感谢老铁的浏览&#xff0c;希望老铁可以一键三连加个关注&#xff0c;您的支持和鼓励是我前进的动力&#xff0c;后续会分享更多学习编程的内容。现在开始今天的内容&#xff1a; 一. 主干知识的学习 1.字符型数据 &#xff08;1&#xff09;字符型常量 字…

Leetcode541反转字符串Ⅱ(java实现)

我们今天分享的题目是字符串反转的进阶版反转字符串Ⅱ。 我们首先来看题目描述&#xff1a; 乍一看题目&#xff0c;有种懵逼的感觉&#xff0c;不要慌&#xff0c;博主来带着你分析题目&#xff0c;题目要求&#xff1a; 1. 每隔2k个字符&#xff0c;就对2k字符中的前k个字符…

C++设计模式介绍:优雅编程的艺术

物以类聚 人以群分 文章目录 简介为什么有设计模式&#xff1f; 设计模式七大原则单一职责原则&#xff08;Single Responsibility Principle - SRP&#xff09;开放封闭原则&#xff08;Open/Closed Principle - OCP&#xff09;里氏替换原则&#xff08;Liskov Substitution …

MongoDB:从容器使用到 Mongosh、Python/Node.js 数据操作

文章目录 1. 容器与应用之间的关系介绍2. 使用 Docker 容器安装 MongoDB3. Mongosh 操作3.1 Mongosh 连接到 MongoDB3.2 基础操作与 CRUD 4. Python 操作 MongoDB5. Nodejs 操作 MongoDB参考文献 1. 容器与应用之间的关系介绍 MongoDB 的安装有时候并不是那么容易的&#xff0…

《HelloGitHub》第 94 期

兴趣是最好的老师&#xff0c;HelloGitHub 让你对编程感兴趣&#xff01; 简介 HelloGitHub 分享 GitHub 上有趣、入门级的开源项目。 https://github.com/521xueweihan/HelloGitHub 这里有实战项目、入门教程、黑科技、开源书籍、大厂开源项目等&#xff0c;涵盖多种编程语言 …

Redis6基础知识梳理~

初识NOSQL&#xff1a; NOSQL是为了解决性能问题而产生的技术&#xff0c;在最初&#xff0c;我们都是使用单体服务器架构&#xff0c;如下所示&#xff1a; 随着用户访问量大幅度提升&#xff0c;同时产生了大量的用户数据&#xff0c;单体服务器架构面对着巨大的压力 NOSQL解…

openssl3.2 - 测试程序的学习 - test\acvp_test.c

文章目录 openssl3.2 - 测试程序的学习 - test\acvp_test.c概述笔记要单步学习的测试函数备注END openssl3.2 - 测试程序的学习 - test\acvp_test.c 概述 openssl3.2 - 测试程序的学习 将test*.c 收集起来后, 就不准备看makefile和make test的日志参考了. 按照收集的.c, 按照…

换个思维方式快速上手UML和 plantUML——类图

和大多数朋友一样&#xff0c;Jeffrey 在一开始的时候也十分的厌烦软件工程的一系列东西&#xff0c;对工程化工具十分厌恶&#xff0c;觉得它繁琐&#xff0c;需要记忆很多没有意思的东西。 但是之所以&#xff0c;肯定有是因为。对工程化工具的不理解和不认可主要是基于两个逻…

【c++】类对象模型

1.如何计算类对象的大小 class A { public:void PrintA(){cout<<_a<<endl;} private:char _a; }; 问题&#xff1a;类中既可以有成员变量&#xff0c;又可以有成员函数&#xff0c;那么一个类的对象中包含了什么&#xff1f;如何计算一个类的大小&#xff1f; 2…