在https://blog.csdn.net/qq_43235540/article/details/140250813?spm=1001.2014.3001.5502中介绍了parser的使用方法,一般传入参数和是配置文件一起使用,配置.
OmegaConf.load()
从.yaml文件读取配置
opt, unknown = parser.parse_known_args()
configs = [OmegaConf.load(cfg) for cfg in opt.base]
OmegaConf.from_dotlist
使用OmegaConf.from_dotlist将命令行参数转换为配置字典
cli = OmegaConf.from_dotlist(unknown)
OmegaConf.merge(*configs, cli)
merge合并两个omegaconf.dictconfig.DictConfig类型的配置字典
config = OmegaConf.merge(*configs, cli)
#configs是装着omegaconf.dictconfig.DictConfig的列表,cli是omegaconf.dictconfig.DictConfig
config.pop 删掉一些东西
#从config里弹出"loghtning"这个字典项目,然后把它用赋值给OmegaConf.create()创建的新对象omegaconf.dictconfig.DictConfig.
lightning_config = config.pop("lightning", OmegaConf.create())
get得到配置字典中的某一项
model_config = config.get("model", OmegaConf.create())
代码
import argparse
# from pytorch_lightning import Trainer
from omegaconf import OmegaConfparser = argparse.ArgumentParser()parser.add_argument("-b","--base",nargs="*",required=False,metavar="base_config.yaml",help="paths to base configs. Loaded from left-to-right. ""Parameters can be overwritten or added with command-line options of the form `--key value`.",default=["test.yaml"]
)
# 解析命令行参数
opt, unknown = parser.parse_known_args()
configs = [OmegaConf.load(cfg) for cfg in opt.base]
print("configs",configs)# 使用OmegaConf.from_dotlist将命令行参数转换为配置字典
cli = OmegaConf.from_dotlist(unknown)config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
model_config = config.get("model", OmegaConf.create())
配置文件 test.yaml
model:base_learning_rate: 2.0e-06target: ldm.models.diffusion.ddpm.LatentDiffusionparams:log_every_t: 200timesteps: 1000unet_config:target: ldm.modules.diffusionmodules.openaimodel.UNetModelparams:image_size: 64in_channels: 1out_channels: 1
data:target: main.DataModuleFromConfigparams:batch_size: 48train:target: ldm.data.lsun.LSUNBedroomsTrainparams:size: 64validation:target: ldm.data.lsun.LSUNBedroomsValidationparams:size: 64lightning:callbacks:image_logger:target: main.ImageLoggerparams:batch_frequency: 10000000000max_images: 1increase_log_steps: Falsetrainer:benchmark: Truemax_epochs: 200000