目录
设置参数:
训练时参数:
调用命令:
设置参数:
import argparseparser = argparse.ArgumentParser()parser.add_argument('--batch_size', type=int, default=64, help='Batch size for training')parser.add_argument('--local_rank', type=int, default=0, help='Local rank of the process')parser.add_argument('--device', type=str, default='0', help='Local rank of the process')args = parser.parse_args()torch.distributed.init_process_group(backend='nccl', init_method='env://')local_rank = args.local_rankdevice = torch.device('cuda', local_rank)os.environ["CUDA_VISIBLE_DEVICES"] = "0"epoch_num = 500batch_size = args.batch_sizeif torch.cuda.is_available():net.to(device)net = torch.nn.parallel.DistributedDataParallel(net, device_ids= [local_rank],find_unused_parameters=True)
训练时参数:
# 在训练循环中使用本地 GPU 设备
for batch in dataloader:inputs, labels = batchinputs = inputs.to(device)labels = labels.to(device)# 在这里进行训练...
调用命令:
CUDA_VISIBLE_DEVICES=3,4 /data3/lbg/envs//aimet_3.8/bin/python3.8 -m torch.distributed.launch --master_port 49998 --nproc_per_node 2 train.py --device '' --batch_size 256