瞬间将模型改为原来的60-200倍小

目录

  • 代码
  • 解释
  • 随机版本

代码

import paddle
import faiss
from new_model_13 import GPT as GPT13import pandas as pd
from sklearn.preprocessing import normalize
import json
import math
from collections import Counter
from tqdm import tqdm
import numpy as np#  36 36
def gen_small_voc():num = "0123456789" + 'qwertyuiopasdfghjklzxcvbnm' + "QWERTYUIOPASDFGHJKLZXCVBNM"num = list(num)small_em_voc = dict()voc_id = 0for i in range(16):for n in num:small_em_voc[voc_id] = "{}_{}".format(i, n)voc_id += 1return small_em_vocdef random_gen_voc():num = "0123456789" + 'qwertyuiopasdfghjklzxcvbnm' + "QWERTYUIOPASDFGHJKLZXCVBNM"num = list(num)p_list = ["{}_{}".format(i, np.random.choice(num)) for i in range(16)]return "#".join(p_list)def gen_text_voc_to_token_id(text, large_em_voc, small_voc_em):text = list(text)text_list = []for ii in text:one = large_em_voc.get(ii, None)if one is None:while True:two = random_gen_voc()if large_em_voc.get(two, None) is None:large_em_voc[two] = iilarge_em_voc[ii] = twotwo = [small_voc_em.get(i) for i in two.split("#")]text_list.append(two)breakelse:two = [small_voc_em.get(i) for i in one.split("#")]text_list.append(two)return text_list, large_em_vocdef train():with open("唐诗.json", "r", encoding="utf-8") as f:data = f.read()data = json.loads(data)data = [i[4].split() for i in data if len(i[4].split()) > 3]data = np.hstack(data)data = [i for i in data if len("".join(i.split())) == 24 and "a" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "f" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "e" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "h" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "X" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "“" not in i]data = [i for i in data if len("".join(i.split())) == 24 and '□' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '《' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '》' not in i]small_em_voc = gen_small_voc()small_voc_em = {k: v for v, k in small_em_voc.items()}large_em_voc = dict()model = GPT13(len(small_em_voc), 512, 32, 8)# model.load_dict(paddle.load("gpt.pdparams"))print("参数量:",sum([i.shape[0] * i.shape[-1] if len(i.shape) > 1 else i.shape[-1] for i in model.parameters()]) / 1000000000,"B")loss_func = paddle.nn.CrossEntropyLoss()opt = paddle.optimizer.Adam(parameters=model.parameters(), learning_rate=0.0003)for epoch in range(190):bar = tqdm(range(0, len(data), 1000))for i in bar:j = i + 1000large_data = []for one in data[i:j]:two, large_em_voc = gen_text_voc_to_token_id(one, large_em_voc, small_voc_em)large_data.append(two)out, _ = model(paddle.to_tensor(large_data)[:, :-1])loss = loss_func(out, paddle.to_tensor(large_data)[:, 1:].reshape([out.shape[0], -1]))bar.set_description("epoch___{}__loss__{}".format(epoch, loss.item()))opt.clear_grad()loss.backward()opt.step()paddle.save(model.state_dict(), "duo_yang_xing.pkl")pd.to_pickle(large_em_voc, "large_em_voc.pkl")pd.to_pickle(small_em_voc, "small_em_voc.pkl")def val():with open("唐诗.json", "r", encoding="utf-8") as f:data = f.read()data = json.loads(data)data = [i[4].split() for i in data if len(i[4].split()) > 3]data = np.hstack(data)data = [i for i in data if len("".join(i.split())) == 24 and "a" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "f" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "e" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "h" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "X" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "“" not in i]data = [i for i in data if len("".join(i.split())) == 24 and '□' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '《' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '》' not in i]small_em_voc = pd.read_pickle("small_em_voc.pkl")small_voc_em = {k: v for v, k in small_em_voc.items()}large_em_voc = pd.read_pickle("large_em_voc.pkl")model = GPT13(len(small_em_voc), 512, 32, 8)model.load_dict(paddle.load("duo_yang_xing.pkl"))model.eval()print("参数量:",sum([i.shape[0] * i.shape[-1] if len(i.shape) > 1 else i.shape[-1] for i in model.parameters()]) / 1000000000,"B")word = data[38][:10]df_large_voc = pd.DataFrame([i.split("#") for i in large_em_voc.keys() if len(i) > 10])top_k=10for _ in range(17):two, large_em_voc = gen_text_voc_to_token_id(word, large_em_voc, small_voc_em)out, _ = model(paddle.to_tensor(two).unsqueeze(0))# 使用top_ktop = paddle.topk(out, top_k)top = top[1][0, -16:]top_0 = [[small_em_voc.get(i.item()) for i in top_0] for top_0 in top.T]df_out=pd.DataFrame(top_0)df_large_voc_copy=df_large_voc.copy()for i in range(16):df_large_voc_copy=df_large_voc_copy[df_large_voc_copy[i].isin(df_out[i])]if len(df_large_voc_copy)<3:break# 进制 取数if df_large_voc_copy.empty:top_k+=1continueword += large_em_voc.get("#".join(df_large_voc_copy.values.tolist()[0]))print(word)top_k=10if __name__ == '__main__':# train()val()

解释

这段代码的目的是创建一个词到ID的映射,以便于将文本数据转换为机器学习模型可以理解的数字格式。具体来说,这个映射是通过以下步骤构建的:

  1. gen_small_voc 函数创建了一个包含所有可能字符(数字、大小写字母)的列表,并为每个字符生成了一个唯一的ID。这个ID是通过将字符与其在列表中的位置组合而成的。
  2. random_gen_voc 函数随机选择16个字符,并为它们生成一个唯一的ID。这个ID是通过将字符与其在列表中的位置组合而成的。
  3. gen_text_voc_to_token_id 函数接受一个文本字符串、一个大词表和一个小词表作为输入。对于文本中的每个字符,函数首先检查它是否已经在大词表中。如果不在,函数就会随机生成一个新的ID,并将其添加到大词表中。然后,函数将这个字符的ID(无论是已经存在的还是新创建的)转换为一个整数列表,并将其添加到输出列表中。
    这个构建词表的过程的主要优点是,它可以处理任何文本数据,即使数据中包含未知的字符。这是因为如果遇到一个未知的字符,函数会自动为它生成一个新的ID,并将其添加到大词表中。这使得这个方法非常灵活,可以处理各种不同的文本数据。
import mathimport paddle
import paddle.nn as nnclass MaxState(paddle.nn.Layer):def __init__(self, hidden_dim, heads, win):super(MaxState, self).__init__()assert hidden_dim % heads == 0, "Hidden size must be divisible by the number of heads."self.head_size = hidden_dim // heads# self.head =paddle.nn.Linear(hidden_dim,2*hidden_dim,bias_attr=False)self.head = paddle.nn.Linear(hidden_dim, hidden_dim, bias_attr=False)# self.head_out =paddle.nn.Linear(hidden_dim*2,hidden_dim,bias_attr=False)self.head_num = headsself.win = winself.hidden = hidden_dimself.mask = paddle.triu(paddle.ones([win, win]))def forward(self, input_data, state=None):b, s, k, h, w = input_data.shape[0], input_data.shape[1], self.head_num, self.head_size, self.winwindow = paddle.ones([1, w])out = self.head(input_data)out = out.unsqueeze(-1) @ windowout = out.transpose([0, 2, 1, 3])one_list = []if state is None:state = paddle.ones([out.shape[0], out.shape[1], 1, 1]) * float("-inf")for i in range(0, s, w):j = w + ione = out[:, :, i:j]_, _, r, c = one.shapeif r != self.win:one = paddle.where(self.mask[:r, :], one, paddle.to_tensor(-float('inf')))else:one = paddle.where(self.mask, one, paddle.to_tensor(-float('inf')))one = paddle.concat([one, state @ window], axis=2)state = paddle.max(one, axis=2, keepdim=True)one = state.reshape([b, k, h, w])state = state[..., -1:]if r != self.win:one = one[..., :r]one = one.transpose([0, 3, 1, 2])one_list.append(one)out = paddle.concat(one_list, 1)out = out.reshape([b, s, -1])# out = self.head_out(out)return out, stateclass FeedForward(nn.Layer):def __init__(self, hidden_size):super(FeedForward, self).__init__()self.ffn1 = nn.Linear(hidden_size, hidden_size * 2)self.ffn2 = nn.Linear(hidden_size * 2, hidden_size)self.gate = nn.Linear(hidden_size, hidden_size * 2)self.relu = nn.Silu()def forward(self, x):x1 = self.ffn1(x)x2 = self.relu(self.gate(x))x = x1 * x2x = self.ffn2(x)return xclass RMSNorm(nn.Layer):def __init__(self, dim, eps: float = 1e-6):super(RMSNorm, self).__init__()self.eps = epsself.fc = paddle.create_parameter(shape=[dim], dtype='float32',default_initializer=nn.initializer.Constant(value=1.0))def norm(self, x):return x * paddle.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)def forward(self, x):output = self.norm(x)return output * self.fcclass GPTDecoderLayer(nn.Layer):def __init__(self, hidden_size, num_heads):super(GPTDecoderLayer, self).__init__()# self.self_attention = MaskMultiHeadAttention(hidden_size, num_heads)self.self_attention = MaxState(hidden_size, num_heads, 8)self.ffn = FeedForward(hidden_size)self.norm = nn.LayerNorm(hidden_size)self.norm1 = RMSNorm(hidden_size)def forward(self, x, state=None, seq_len=None):x1, state = self.self_attention(x, state)  # Self-Attention with residual connectionx = x1 + xx = self.norm(x)x = self.ffn(x) + x  # Feed-Forward with residual connectionx = self.norm1(x)return x, stateclass PositionalEncoding(nn.Layer):def __init__(self, d_model, max_len=5000):super(PositionalEncoding, self).__init__()# Create a long enough Paddle array to hold position encodings for the maximum sequence lengthposition = paddle.arange(max_len).unsqueeze(1).astype("float32")# Create a constant 'pe' matrix with the same size as the embedding matrixdiv_term = paddle.exp(paddle.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))pe = paddle.zeros([max_len, d_model])pe[:, 0::2] = paddle.sin(position * div_term)pe[:, 1::2] = paddle.cos(position * div_term)self.pe = pe.unsqueeze(0)  # Shape: [1, max_len, d_model]# Register 'pe' as a buffer (non-trainable parameter)def forward(self, x, seq_len=None):# x is of shape [batch_size, seq_len, d_model]if seq_len is None:seq_len = x.shape[1]return x + self.pe[:, :seq_len, :]else:return x + self.pe[:, seq_len - 1:seq_len, :]# %%def sinusoidal_position_embedding(max_len, output_dim):# (max_len, 1)position = paddle.arange(0, max_len, dtype="float32").unsqueeze(-1)# (output_dim//2)ids = paddle.arange(0, output_dim // 2, dtype="float32")  # 即公式里的i, i的范围是 [0,d/2]theta = 10000 ** (-2 * ids / output_dim)# (max_len, output_dim//2)embeddings = position * theta  # 即公式里的:pos / (10000^(2i/d))sin_embeddings = paddle.sin(embeddings)cos_embeddings = paddle.cos(embeddings)return sin_embeddings, cos_embeddingsdef rope(q, sin_em, cos_em, seq_len=None):if seq_len is None:sin_em = sin_em[:q.shape[2]]cos_em = cos_em[:q.shape[2]]else:sin_em = sin_em[seq_len - 1:seq_len]cos_em = cos_em[seq_len - 1:seq_len]q1 = q.reshape([q.shape[0], q.shape[1], q.shape[2], -1, 2])[..., 1]q2 = q.reshape([q.shape[0], q.shape[1], q.shape[2], -1, 2])[..., 0]# 奇数负值*sin_em+偶数正值*cos_em  奇数正值*cos_em+偶数正值*sin_emq3 = paddle.stack([-q1 * sin_em + q2 * cos_em, q1 * cos_em + q2 * sin_em], -1)q = q3.reshape(q.shape)  # reshape后就是正负交替了return qclass GPT(nn.Layer):def __init__(self, vocab_size, hidden_size, num_heads, num_layers):super(GPT, self).__init__()self.embedding = nn.Embedding(vocab_size, hidden_size)self.label_embedding = nn.Embedding(vocab_size, hidden_size)self.decoder_layers = nn.LayerList([GPTDecoderLayer(hidden_size, num_heads) for _ in range(num_layers)])self.fc = nn.Linear(hidden_size, vocab_size, bias_attr=False)self.sin_em, self.cos_em = sinusoidal_position_embedding(50000, hidden_size // num_heads // 2)self.conv=paddle.nn.Conv1D(1,16,kernel_size=3,padding=1,bias_attr=False)self.out = nn.Linear(16, 16, bias_attr=False)self.layer_nor= paddle.nn.LayerNorm(hidden_size)# self.rms_norm=RMSNorm(hidden_size)def forward(self, xx, state=None, seq_len=None):xx = self.embedding(xx)# x = self.position_embedding(x, seq_len)x = paddle.max(xx, -2)if state is None:state = [None] * len(self.decoder_layers)i = 0x = rope(x.reshape([x.shape[0], x.shape[1], -1, self.sin_em.shape[1] * 2]).transpose([0, 2, 1, 3]),self.sin_em,self.cos_em, seq_len).transpose([0, 2, 1, 3]).reshape(x.shape) + xfor decoder_layer in self.decoder_layers:x1, state[i] = decoder_layer(x, state[i])x = x1 + xi += 1# out = self.fc(self.rms_norm(x))out = self.conv(x.reshape([-1, 1, x.shape[-1]]))+xx.reshape([-1, 16, x.shape[-1]])out = out.reshape([x.shape[0],-1,x.shape[-1]])out = self.fc(self.layer_nor(out))return out, state

随机版本

def val():with open("唐诗.json", "r", encoding="utf-8") as f:data = f.read()data = json.loads(data)data = [i[4].split() for i in data if len(i[4].split()) > 3]data = np.hstack(data)data = [i for i in data if len("".join(i.split())) == 24 and "a" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "f" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "e" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "h" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "X" not in i]data = [i for i in data if len("".join(i.split())) == 24 and "“" not in i]data = [i for i in data if len("".join(i.split())) == 24 and '□' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '《' not in i]data = [i for i in data if len("".join(i.split())) == 24 and '》' not in i]small_em_voc = pd.read_pickle("small_em_voc.pkl")small_voc_em = {k: v for v, k in small_em_voc.items()}large_em_voc = pd.read_pickle("large_em_voc.pkl")model = GPT13(len(small_em_voc), 512, 32, 8)model.load_dict(paddle.load("duo_yang_xing.pkl"))model.eval()print("参数量:",sum([i.shape[0] * i.shape[-1] if len(i.shape) > 1 else i.shape[-1] for i in model.parameters()]) / 1000000000,"B")word = data[38][:10]df_large_voc = pd.DataFrame([i.split("#") for i in large_em_voc.keys() if len(i) > 10])top_k=10for _ in range(17):two, large_em_voc = gen_text_voc_to_token_id(word, large_em_voc, small_voc_em)out, _ = model(paddle.to_tensor(two).unsqueeze(0))# 使用top_ktop = paddle.topk(out, top_k)top = top[1][0, -16:]top_0 = [[small_em_voc.get(i.item()) for i in top_0] for top_0 in top.T]df_out=pd.DataFrame(top_0)df_large_voc_copy=df_large_voc.copy()for i in np.random.choice(list(range(16)),16,replace=False):df_large_voc_copy=df_large_voc_copy[df_large_voc_copy[i].isin(df_out[i])]if len(df_large_voc_copy)<3:break# 进制 取数if df_large_voc_copy.empty:top_k+=1continueword += large_em_voc.get("#".join(df_large_voc_copy.values.tolist()[0]))print(word)top_k=10

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/pingmian/30057.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

现网中交换机生成树优化思路

现象&#xff1a;交换机启动后等一段时间才可以接入网络&#xff0c;使用一段期间后访问其他主机速度才能加快&#xff0c;一段时间不用后&#xff0c;再使用速度又会慢下来 分析&#xff1a;这台交换机是一台网管交换机。所以&#xff0c;为了避免网络中的拓扑环引起网络瘫痪&…

faiss 简单使用

Faiss&#xff08;Facebook AI Similarity Search&#xff09;是一个由Facebook AI Research开发的库&#xff0c;用于高效相似性搜索和稠密向量聚类。它提供了多种算法来处理大规模的向量搜索问题&#xff0c;支持多种距离度量&#xff08;例如欧氏距离、内积等&#xff09;&a…

【操作系统】操作系统实验04-文件系统扩展

题目要求&#xff1a; 对【程序5_9】进行扩展&#xff0c;要求参数为目录名&#xff0c;且其下至少有三层目录&#xff0c;分别用深度遍历及广度遍历两种方法对此目录进行遍历&#xff0c;输出此目录下所有文件的大小及修改时间。 1. 程序代码&#xff08;注意程序格式&#…

解决 Visual C++ 17.5 __cplusplus 始终为 199711L 的问题

目录 软件环境问题描述查阅资料解决问题参考文献 软件环境 Visual Studio 2022, Visual C, Version 17.5.4 问题描述 在应用 https://github.com/ToniLipponen/cpp-sqlite 的过程中&#xff0c;发现源代码文件 sqlite.hpp 中&#xff0c;有一处宏&#xff0c;和本项目的 C L…

2024香港人才引进计划有哪些?申请条件、政策、利弊一次性说清楚

2024香港人才引进计划有哪些&#xff1f; 拥有香港身份&#xff0c;不仅可以享受到优质的教育资源、税收优惠、以及国际化的商业环境&#xff0c;还能在金融、商业、法律保障和生活品质等方面获得显著的好处。 而这&#xff0c;也是很多内地精英人群&#xff0c;通过申请香港…

threejs 头疼事情之一事件01

在做threejs相关开发的时候&#xff0c;或者封装成三维可视化引擎的时候&#xff0c;应用到项目中&#xff0c;总会和事件打交道&#xff0c;因为项目肯定有交互。但是 threejs 对事件又不友好&#xff0c;反正折腾来折腾去&#xff0c;疼痛。 在Three.js中&#xff0c;Object…

JS-页面截图下载为pdf

这个需要两个 js 库支持&#xff0c;html2canvas 和 jspdf。 下载&#xff1a; npm install html2canvas --save npm install jspdf–save 引用&#xff1a; import { jsPDF } from ‘jspdf’; import html2canvas from ‘html2canvas’; 直接上代码&#xff1a; const downlo…

哪个城市的Delphier最多?Delphier平均年龄多大了?

先来看看哪个城市的Delphier最多&#xff1a; 北上广深不是白叫的&#xff0c; 大家想换工作&#xff0c;就去这些大城市&#xff0c;机会多。 有人会觉得奇怪&#xff0c;怎么才这么几个人&#xff1f; 因为以上数据统计基数为2000人&#xff0c; 根据微信公众号和QQ群得出…

Linux1(介绍与基本命令1)

目录 一、初始Linux 1. Linux的起源 2. Linux是什么&#xff1f; 3. Linux内核版本 4. Linux的应用 5. 终端 6. Shell 7. Linux目录结构 二、基本命令 1. 基本的命令格式 2. shutdown 关机命令 3. pwd 当前工作目录 4. ls 查看目录内容 5. cd 改变工作目录 …

国际荐酒师携手各国际荐酒师专业委员会深化2024年度合作

国际荐酒师&#xff08;香港&#xff09;协会携手广东海上丝绸之路文化促进会及广东省城镇化发展研究会&#xff0c;深化2024年度合作&#xff0c;共同打造品荐与传播大师班培养荐酒师专业人材 近日&#xff0c;国际荐酒师&#xff08;香港&#xff09;协会、广东海上丝绸之路…

学会python——制作一款天气查询工具(python实例七)

目录 1、认识Python 2、环境与工具 2.1 python环境 2.2 Visual Studio Code编译 3、天气查询工具 3.1 代码构思 3.2 代码示例 3.3 运行结果 4、总结 1、认识Python Python 是一个高层次的结合了解释性、编译性、互动性和面向对象的脚本语言。 Python 的设计具有很强的…

Redis在微服务架构中的角色:服务间通信与数据共享

I. 引言 A. 介绍微服务架构的概念和特点 微服务架构是一种设计模式,它将一个大型的单体应用分解成一组小的服务,每个服务都运行在其自身的进程中,独立地进行部署和扩展。这些服务之间通过轻量级的通信机制(如HTTP RESTful API)进行交互,每个服务都围绕一个特定的业务功…

打造精致UI界面:字体设计的妙招

字体设计是UI设计的关键模块之一。字体设计是否有效可能直接实现或破坏整个UI界面。那么&#xff0c;界面设计的字体设计有哪些规范呢&#xff1f;如何设计细节字体&#xff1f;本文将解释字体设计规范的可读性、可读性和可用性&#xff0c;并介绍UI界面中的字体设计技巧。 如…

二级造价工程师建设工程造价管理试题

1、根据《建设工程质量管理条例》&#xff0c;在正常使用条件下&#xff0c;供热与供冷系统的最低保修期限是( )个采暖期、供冷期。 A.1 B.2 C.3 D.4 [答案]B 2、关于甲级工程造价咨询企业的资质标准&#xff0c;叙述错误的是( )。 A.企业与专职专业人员签订劳动合同&…

【Python】JSON

json 一、JSON1.1 概述1.2 数据结构1.3 值1.4 字符串1.5 数值 二、编程语言与JSON2.1 JavaScript与JSON2.2 Python与JSON 一、JSON 1.1 概述 JSON(JavaScript Object Notation) 是一种轻量级的数据交换格式&#xff0c;易于人阅读和编写。同时也易于机器解析和生成。 JSON采…

如何用大模型+知识库打造微信群里的AI问答神器!

想象一下&#xff0c;你的微信群或公众号中&#xff0c;有一个AI问答专家随时待命&#xff0c;帮助你和你的朋友们解答各种问题&#xff0c;是不是很酷&#xff1f; 现在&#xff0c;让我们来看看这个项目的技术框架&#xff0c;一步步了解它是如何构建的&#xff1a; 基础起…

zookeeper学习、配置文件参数详解

zookeeper学习、配置文件参数详解 zookeeper 配置文件参数详解tickTime 、session 的过期时间、maxSessionTimeout 三者之间的关系initLimit&#xff0c;syncLimit什么区别minSessionTimeout 默认值,**他的单位是ms** zookeeper 配置文件参数详解 ZooKeeper 是一个分布式协调服…

自动驾驶#芯片-1

概述 汽车是芯片应用场景之一&#xff0c;汽车芯片需要具备车规级。  车规级芯片对加工工艺要求不高&#xff0c;但对质量要求高。需要经过的认证过程&#xff0c;包括质量管理标准ISO/TS 16949、可靠性标准 AEC-Q100、功能安全标准ISO26262等。  汽车内不同用途的芯片要求…

Python中的深拷贝和浅拷贝

一、概述&#xff1a; Python中拷贝的方法有两种&#xff1a; 浅拷贝&#xff1a;copy()深拷贝&#xff1a;copy.deepcopy() 二、理解深拷贝和浅拷贝 2.1 拷贝的应用 浅拷贝 # 原始数据 list1 ["php", "java", "python"] # 对list进行复制…

如何卸载windows系统自带游戏

为了清晰地指导如何卸载Windows系统自带游戏&#xff0c;我们可以参考以下步骤进行&#xff1a; 方法一&#xff1a;通过控制面板卸载 打开控制面板进入程序和功能在控制面板中&#xff0c;找到并点击“程序和功能”。在程序列表中&#xff0c;找到你想要卸载的自带游戏。 方…