DataX自动化生成配置json,创建ODS表,多线程调度脚本[mysql-->hive]

DataX自动生成json配置文件及多线程执行脚本(仅支持mysql-->hive),其他版本自行实现,修改json模版即可

执行Datax任务

datax_run.py

# 指定项目下的所有的ods任务,如果不指定参数,默认执行dw下的prefix过滤后的所有抽取任务
# 使用方式:python3 datax_run.py -p 项目名 -f 过滤json的前缀或多个文件名,拼接
import os
import re
import sys
import json
import time
import argparse
import subprocess
from threading import Thread# from logger import create_logger# 必须在datax目录下执行
datax_home = Nonedatax = f"bin/datax.py"
datax_config_dir = f"job/dw"
logs_dir = "logs"
prefix = ["ods_"]
subfix = [".json"]
thread_num = 8
task_count = 0def init():global datax_home# 检查配置及环境变量是否存在问题# 必须在datax目录下执行environ = os.environif not environ.keys().__contains__("DATAX_HOME"):print("未找到环境变量[DATAX_HOME]")return Falsedatax_home = environ.get("DATAX_HOME")if datax_home is None:print("环境变量[DATAX_HOME]未设置值")return Falseelse:hive)datax_config_dir = f"job/{hive}"logs_dir = f"{logs_dir}/{hive}"if args_filter is not None:print("过滤条件:", args_filter)prefix.clear()for config in args_filter.split(","):prefix.append(config)elif hive is not None:prefix = ["ods_"]print(f"初始化参数:配置路径--> {datax_config_dir}\nprefix--> {prefix}")def run_sub_shell(cmd):try:# cmd = f"source /etc/profile && cd {datax_home} && " + cmd# print(cmd)output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, encoding="utf-8", cwd=datax_home)print(f'Command "{cmd}" executed successfully. Output: \n{output}')return outputexcept subprocess.CalledProcessError as e:print(f'Command "{cmd}" failed with error: \n{e.output}')exit(-1)def check_status(log_path):with open(file=log_path, mode="r", encoding="utf-8") as fp:if fp.read().__contains__("completed succes  print(f"datax_home:{datax_home}")return Trueif not init():print("初始化失败,正在退出...")exit(-1)# log = create_logger("datax_run", f"{datax_home}/logs/")def extract_filename(file_path):re_search = re.search(r'([^\\/]+)\.json$', file_path)if re_search:return re_search.group(1)else:print("未匹配到文件名,请检查...")return Nonedef check_args():parser = argparse.ArgumentParser(description='datax批量任务执行脚本')# 添加--hive参数parser.add_argument("--hive", type=str, help="hive数据库")# 添加-f/--filter参数parser.add_argument("-f", "--filter", type=str, help="输入过滤条件")# 解析命令行参数args = parser.parse_args()hive = args.hiveargs_filter = args.filter# 输出结果global prefix, datax_config_dir, logs_dirif hive is None:print(f"默认使用配置目录[{datax_config_dir}]")else:print("目标Hive库:",sfully"):print(f"执行datax任务成功:{log_path}")return Trueelse:print(f"执行datax任务失败:{log_path}")exit(-1)def init_log():# 获取今天日期from datetime import datetimedate_str = datetime.today().date()# 创建目录global logs_dirlogs_dir = f"{datax_home}/{logs_dir}/{date_str}"os.makedirs(logs_dir, exist_ok=True)print(f"logs dir[{logs_dir}]")def match_config(x: str, prefix: [], subfix: []):for pre in prefix:if x.startswith(pre):for sub in subfix:if x.endswith(sub):return Truereturn Falsedef thread_run(config):config_name = extract_filename(config)cmd = f"python {datax} {config}"# cmd = f"python {datax} {config} > {logs_dir}/{config_name}.log"output = run_sub_shell(cmd)if output.__contains__("completed successfully"):task_count -= 1print(f"同步数据[{config_name}]成功,剩余任务{task_count}...")else:print(f"同步数据[{config_name}]失败!")exit(-1)def gen_thread_data():full_paths = []# 指定配置文件目录for dirpath, dirnames, filenames in os.walk(f"{datax_home}/{datax_config_dir}"):configs = filter(lambda x: match_config(x, prefix, subfix), filenames)full_paths = [dirpath + "/" + x for x in configs]return full_pathsdef future_thread():from concurrent import futuresthread_data = gen_thread_data()global task_counttask_count = len(thread_data)print(f"待执行抽取任务数量:{task_count}")with futures.ThreadPoolExecutor(max_workers=thread_num) as executor:for elem in thread_data:executor.submit(thread_run, elem)def start():check_args()# init_log()future_thread()if __name__ == '__main__':start()

生成Datax配置,创建ODS表

根据配置生成Datax配置json和自动创建hive ods表的脚本build_core.py

import json
import re
import sys
from pathlib import Path
import mysql.connector
from pyhive import hive
import os
import subprocessdatax_home = None# 初始化检查环境
def init():global datax_home# 检查配置及环境变量是否存在问题# 必须在datax目录下执行environ = os.environif not environ.keys().__contains__("DATAX_HOME"):print("未找到环境变量[DATAX_HOME]")return Falsedatax_home = environ.get("DATAX_HOME")if datax_home is None:print("环境变量[DATAX_HOME]未设置值")return Falseelse:print(f"datax_home:{datax_home}")return Trueif not init():print("初始化失败,正在退出...")exit(-1)# 主要内容用于生成datax的mysql到hive的配置文件
# 对于不同的项目或数据库,指定不同的配置文件,生成不同的json
def dynamic_import():import importlib.utilargv = sys.argvif len(argv) <= 1:print("请输出加载的python配置模块名!")exit(-1)module_ = argv[1]try:print(f"使用__import__导入模块")module = __import__(module_)except Exception as e:print(f"使用__import__导入模块失败")print(f"使用importlib导入模块")args = module_.split(os.sep)if len(args) == 1:module_name = args[0]module_path = module_nameelif len(args) > 1:module_name = args[-1]module_path = module_print(f"module_path:{module_path}\nmodule_name:{module_name}")spec = importlib.util.spec_from_file_location(module_name, module_path)module = importlib.util.module_from_spec(spec)spec.loader.exec_module(module)m_keys = module.__dict__key_list = list(m_keys.keys())for k in key_list:if not str(k).startswith("__"):globals()[k] = m_keys.get(k)return moduledynamic_import()global_config = project_config
config_source = source_ds
project_path = global_config['project_path']hive_host = global_config['hive_host']
hive_port = global_config['hive_port']
hive_db = global_config['hive_db']use_kb = global_config['enable_kerberos']
use_pt = global_config['enable_partition']
keytab = global_config['key_table']
principal = global_config['principal']# 加载当前项目数据库及表
def load_db():if isinstance(config_source, list):# 多数据源多数据库模式for source in config_source:db_tables_ = source["db_tables"]db_connect = source["connect"]host_ = db_connect['host']port_ = db_connect['port']username_ = db_connect['username']password_ = db_connect['password']for db_info in db_tables_:db_ = db_info["db"]if dict(db_info).keys().__contains__("project"):project_ = db_info["project"]else:project_ = Nonetables_ = db_info["tables"]query_table(host_, port_, username_, password_, db_, project_, tables_)else:print("加载source_ds的config配置出现问题...")def save_local(save_path, datax_json):path = Path(f'../job/{save_path}')if datax_home is not None:path = Path(f"{datax_home}/job/{save_path}")elif not Path('../').exists():path = Path(f"job/{save_path}")path.parent.mkdir(parents=True, exist_ok=True)# 覆盖文件写入path.write_text(datax_json, encoding="utf-8")def camel_to_snake(field: str):return re.sub('([a-z0-9])([A-Z])', r'\1_\2', field).lower()def is_camel(s):return bool(re.match(r'^[a-z]+([A-Z][a-z]*)*$', s))def convert_field(field):table_name = field[0]field_name = field[1]field_type = field[2]field_comment = field[3]# 是否为驼峰if is_camel(field_name):table_name = f"camel_{table_name}"field_name = camel_to_snake(field_name)field_comment = f"({field[1]}){field_comment}"return [table_name, field_name, field_type, field_comment]def convert_ods_field(field):field_name = field['field_name']field_type = field['field_type']field_hive_type = field['field_hive_type']field_comment = field['field_comment']# 是否为驼峰if is_camel(field_name):field_name = camel_to_snake(field_name)field_comment = f"({field['field_name']}){field_comment}"return {"field_name": field_name, "field_type": field_type, "field_hive_type": field_hive_type,"field_comment": field_comment}def build_db(tables: list):database = {}for table in tables:# 查询指定表的所有字段名和类型table_name = table[0]field_name = table[1]field_type = table[2]field_comment = table[3]table_fields: list = database.get(table_name)field_hive_type = hive_type(field_type)field_one = {"field_name": field_name, "field_type": field_type, "field_hive_type": field_hive_type,"field_comment": field_comment}if table_fields is not None:table_fields.append(field_one)else:table_fields = [field_one]database[table_name] = table_fieldsreturn databasedef run_sub_shell(cmd):try:# cmd = f"source /etc/profile && cd {datax_home} && " + cmd# print(cmd)output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, encoding="utf-8", cwd=datax_home)print(f'Command "{cmd}" executed successfully. Output: \n{output}')return outputexcept subprocess.CalledProcessError as e:print(f'Command "{cmd}" failed with error: \n{e.output}')exit(-1)def hive_file_sql(create_db_sql):# 创建临时文件tmp_hql = f"{datax_home}/tmp/_hive_sql.hql"with open(tmp_hql, mode="w", encoding="utf-8") as fp:fp.write(create_db_sql)# 执行hive -fif os.path.exists(tmp_hql):run_sub_shell(f"hive -f {tmp_hql}")else:print(f"{tmp_hql}文件不存在...")# 删除临时文件os.remove(tmp_hql)def query_table(host, port, user, password, db, project_, include_tables):# 连接 MySQL 数据库conn = mysql.connector.connect(host=host,port=port,user=user,password=password,database=db)# 获取游标对象cursor = conn.cursor()query_col_sql = f"select table_name,column_name,data_type,column_comment from information_schema.`COLUMNS` where table_schema='{db}' "if len(include_tables) > 0:name_str = ",".join([f"'{x}'" for x in include_tables])table_filter = f' and table_name in({name_str})'query_col_sql += table_filterelse:print(f"查询数据库:[{db}]的所有表")# 查询指定数据库中的所有表名# print(query_col_sql)cursor.execute(query_col_sql)tables = cursor.fetchall()# 数据库的jsondatabase = build_db(tables)create_db_sql = f"use {hive_db};"# 生成各个表的datax配置文件for table_name in database.keys():table_fields = database[table_name]ods_source, ods_table, datax_json = build_datax(host, port, user, password, db, project_, table_name,table_fields)# datax和hive的表名全部小写,datax配置文件中的表名使用原始的大小写save_local(f"{hive_db}/{ods_table}.json", datax_json)print(f"生成datax配置文件-->{hive_db}/{ods_table}.json")# 生成建表语句create_db_sql += build_create_hive(ods_table, table_fields)print(f"创建hive表-->{hive_db}.{ods_table}")hive_file_sql(create_db_sql)# print(create_db_sql)# 关闭游标和连接cursor.close()conn.close()print(f"自动处理数据库[{db}]的datax配置及hive库ods表完成\n")def exec_hive_sql(sql_list=["show databases"]):# 连接到Hive服务器if use_kb:conn = hive.Connection(host=hive_host, port=hive_port, database=hive_db, auth='KERBEROS',kerberos_service_name='hive')else:conn = hive.Connection(host=hive_host, port=hive_port, database=hive_db)# 执行查询cursor = conn.cursor()for sql in sql_list:# print(f"执行sql:\n{sql}\n")cursor.execute(sql)# 关闭连接cursor.close()conn.close()def build_create_hive(hive_table, fields):# 生成建表语句stored = "orc"hive_fields = list(map(convert_ods_field, fields))field_sql = ",\n".join(map(lambda x: f"\t\t`{x['field_name']}` {x['field_hive_type']} comment '{x['field_comment']}'", hive_fields))dw_type = hive_table.split("_")[0]partition_sql = ""if use_pt:partition_sql = "partitioned by(pt_day string comment '格式:YYYYMMDD')"create_sql = f"""drop table if exists {hive_table};create external table if not exists {hive_table}({field_sql}){partition_sql}ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'stored as {stored}LOCATION '{project_path}/{dw_type}/{hive_table}'TBLPROPERTIES('orc.compress'='SNAPPY');"""# print(create_sql)return create_sqldef unify_name(database: str):snake_case = re.sub(r'(?<!^)(?=[A-Z])', '_', database).lower()return snake_case.replace("_", "").replace("-", "")def build_datax(host, port, username, password, database, project_, source_table, fields):if project_ is None or len(project_) == 0:ods_source = unify_name(database)else:ods_source = unify_name(project_)pt_str = '_pt' if use_pt else ''ods_table = f"ods_{ods_source}_{source_table}{pt_str}_a".lower()jdbc_url = f"jdbc:mysql://{host}:{port}/{database}?useSSL=false&useUnicode=true&allowMultiQueries=true&characterEncoding=utf8&characterSetResults=utf8&serverTimezone=Asia/Shanghai"columns = ",".join([f'"`{field["field_name"]}`"' for field in fields])hive_fields = list(map(convert_ods_field, fields))hive_columns = ",".join([f'{{"name":"{field["field_name"]}","type":"{field["field_hive_type"]}" }}' for field in hive_fields])pt_config = '"ptDay": "pt_day",' if use_pt else ""kerberos_config = f'"haveKerberos": "{use_kb}","kerberosKeytabFilePath": "{keytab}","kerberosPrincipal": "{principal}",' if use_kb else ""mysql_hive_tpl = '''{"job": {"setting": {"speed": {"channel": 3,"byte":-1},"errorLimit": {"record": 0,"percentage": 0}},"content": [{"reader": {"name": "mysqlreader","parameter": {"username": "${username}","password": "${password}","column": [${columns}],"splitPk": null,"connection": [{"table": ["${sourceTable}"],"jdbcUrl": ["${jdbcUrl}"]}]}},"writer": {"name": "hdfswriter","parameter": {"defaultFS": "hdfs://master:8020","fileType": "orc",${kerberosConfig}${ptConfig}"path": "${projectPath}/ods/${odsTable}","fileName": "${odsTable}","column": [${hiveColumns}],"writeMode": "truncate","fieldDelimiter": "\\t","compress": "SNAPPY","database":"${database}"}}}]}
}'''var_dict = {"username": username, "password": password, "columns": columns, "sourceTable": source_table,"jdbcUrl": jdbc_url,"kerberosConfig": kerberos_config, "ptConfig": pt_config, "projectPath": project_path,"odsTable": ods_table,"hiveColumns": hive_columns, "database": hive_db}for k in var_dict.keys():mysql_hive_tpl = mysql_hive_tpl.replace('${' + k + '}', var_dict[k])data = json.loads(mysql_hive_tpl)data = json.dumps(data, indent=2, ensure_ascii=False).replace("True", "true").replace("False", "false")return ods_source, ods_table, datadef hive_type(mysql_type):if mysql_type == "tinyint" or mysql_type == "smallint" or mysql_type == "boolean":return "smallint"elif mysql_type == "long" or mysql_type == "int":return "bigint"elif mysql_type == "float" or mysql_type == "double" or mysql_type == "decimal":return "double"elif mysql_type == "date":return "date"elif mysql_type == "timestamp":return "timestamp"else:return "string"if __name__ == '__main__':load_db()

配置文件模版config_xx.py

# 目前只支持从mysql到hive
project_name = "project_name"
hive_host = "master"
hive_db = "hive_db"
# hdfs路径
if project_name == 'project_name':project_path = f"/project/{project_name}/{hive_db}"
else:project_path = f"/project/{project_name}/warehouse/{hive_db}"
# 主要配置
project_config = {"project_name": project_name,"hive_host": hive_host,"hive_port": 10000,"project_path": project_path,# write:hive数据库"hive_db": hive_db,"enable_kerberos": True, #是否启用Kerberos"enable_partition": False, #是否分区表# 默认不用修改"default_fs": f"hdfs://{hive_host}:8020","principal": "principal","key_table": "hdfs.keytab" #如果存在Kerberos的话
}
# 不指定表名,则扫描全库
# 抽取的mysql数据源
source_ds = [{"db_tables": [{"db": "database","tables": ["view_online"],}],"connect": {"host": "host","port": 23306,"username": "username","password": "password",}}
]

使用方法:

配置DATAX_HOME,将脚本放在$DATAX_HOME/bin,自行创建job文件夹

生成Datax任务json,同时生成的json文件在$DATAX_HOME/job/{hive_database}下面

python3 build_core.py config/config_dw.py

多线程执行

cd $DATAX_HOME
python3 bin/datax_run.py --hive hive_database

--hive 指定hive数据库,指定要执行的json文件路径在$DATAX_HOME/job/{hive_database}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/818887.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

反爬虫之代理IP封禁-协采云IP池

反爬虫之代理IP封禁-协采云IP池 1、目标网址2、IP封禁4033、协采云IP池 1、目标网址 aHR0cDovL3d3dy5jY2dwLXRpYW5qaW4uZ292LmNuLw 2、IP封禁403 这个网站对IP的要求很高&#xff0c;短时间请求十几次就会遭关进小黑屋。如下图&#xff1a; 明显是网站进行了反爬处理&…

【JVM】JVM的垃圾回收机制与垃圾回收器的选择

昨天说了JVM的工作流程&#xff0c;感兴趣的可以先去看一看。今天咱们接着来探讨JVM的垃圾回收机制&#xff0c;包括其工作原理、如何确定对象回收、可用的垃圾回收器以及如何进行垃圾回收器的选择与配置。 1.工作原理 JVM的垃圾回收机制主要依赖于垃圾回收器来自动管理内存的…

Cannot access ‘androidx.activity.FullyDrawnReporterOwner‘

Android Studio新建项目就报错&#xff1a; Cannot access ‘androidx.activity.FullyDrawnReporterOwner’ which is a supertype of ‘cn.dazhou.osddemo.MainActivity’. Check your module classpath for missing or conflicting dependencies 整个类都报错了。本来原来一直…

OpenCV-AMF算法(自适应中值滤波Adaptive Median Filtering)

作者&#xff1a;翟天保Steven 版权声明&#xff1a;著作权归作者所有&#xff0c;商业转载请联系作者获得授权&#xff0c;非商业转载请注明出处 实现原理 AMF&#xff08;Adaptive Median Filter&#xff0c;自适应中值滤波&#xff09;是一种用于图像处理和信号处理的滤波算…

了解.DevicData-P-XXXXXXXX勒索病毒,以及如何保护您的数据

导言&#xff1a; 随着网络技术的快速发展&#xff0c;网络安全问题日益凸显&#xff0c;其中勒索病毒成为了一种严重的网络安全威胁。在众多勒索病毒中&#xff0c;.DevicData-P-XXXXXXXX勒索病毒以其强大的加密能力和恶劣的勒索行为引起了广泛关注。本文将对该病毒进行详细介…

运维——1.网线接在家用无线路由器LAN口依然可以上网,什么原理

当你将网线连接到家用无线路由器的LAN口时&#xff0c;这种连接方式实际上是将路由器作为一个普通的网络交换机来使用。家用无线路由器通常具有多个LAN口&#xff0c;这些LAN口实际上就是一个内置的网络交换机&#xff0c;用于在局域网内连接多台设备。 无线路由器的LAN口连接…

蓝桥杯2024年第十五届省赛真题-R 格式(高精度乘法 + 加法)

本题链接&#xff1a;蓝桥杯2024年第十五届省赛真题-R 格式 - C语言网 题目&#xff1a;​​​​​​​ 样例&#xff1a; 输入 2 3.14 输出 13 思路&#xff1a; 根据题意&#xff0c;结合数据范围&#xff0c;这是一道模板的高精度乘以低精度问题。 题意是double 类型 d 与…

论文笔记:Time Travel in LLMs: Tracing Data Contamination in Large Language Models

iclr 2024 spotlight reviewer评分 688 1 intro 论文认为许多下游任务&#xff08;例如&#xff0c;总结、自然语言推理、文本分类&#xff09;上观察到的LLMs印象深刻的表现可能因数据污染而被夸大 所谓数据污染&#xff0c;即这些下游任务的测试数据出现在LLMs的预训练数据…

mac电脑软件 Magnet v2.14.0免激活中文版

Magnet是一款窗口管理工具&#xff0c;适用于Mac操作系统。它可以帮助用户轻松地管理和组织多个应用程序的窗口&#xff0c;提高工作效率。 Magnet支持多种窗口布局和组合方式&#xff0c;可以将窗口分为左右、上下、四分之一等不同的比例和位置&#xff0c;用户可以根据实际需…

Mac上的PD虚拟机安装parallels tools问题

本文主要记录mac上的虚拟机软件安装好centos7.9的时候安装parallels tools的错误的解决办法&#xff1a; Centos 7.9虚拟机安装parallels tools 前言 在centos高版本上安装parallells tools时会报错&#xff0c;具体错误信息可以在/var/log/parallels.log文件中查看。 本文…

阿里云Centos7下编译glibc

编译glibc 原来glibc版本 编译前需要的环境: CentOS7 gcc 8.3.0 gdb 8.3.0 make 4.0 binutils 2.39 (ld -v) python 3.6.8 其他看INSTALL, 但有些版本也不易太高 wget https://mirrors.aliyun.com/gnu/glibc/glibc-2.37.tar.gz tar -zxf glibc-2.37.tar.gz cd glibc-2.37/ …

Spring Boot 整合 Mockito:提升Java单元测试的高效实践

引言 在Java开发领域&#xff0c;Spring Boot因其便捷的配置和强大的功能而受到广泛欢迎&#xff0c;而Mockito作为一款成熟的单元测试模拟框架&#xff0c;则在提高测试质量、确保代码模块间解耦方面扮演着至关重要的角色。本文将详细介绍如何在Spring Boot项目中整合Mockito&…

c++总结笔记(一)

计算机可以将程序转化为二进制指令&#xff08;即机器码&#xff09;&#xff0c;并由CPU执行&#xff0c;CPU会按照指令的顺序依次执行每个指令。 C语言特点&#xff1a; 简洁高效可移植模块化标准化 C语言的标准 C89(C90)标准C99标准C11标准 导入 使用include导入包含…

《R语言与农业数据统计分析及建模》学习——数据读入

一、工作目录 # 获取当前工作目录 getwd()# 改变工作目录为指定路径下的文件夹 # 注意工作目录的表达方式 setwd(D:/R_class) setwd(D:\\R_class) 二、文件路径 读取文件中的数据首先要确定文件路径&#xff0c;如果文件不在工作目录下&#xff0c;则必须使用绝对路径 1、文…

Nginx 负载均衡配置

负载均衡算法 1. 轮询 权重 &#xff08;最为合理&#xff0c;常用&#xff09; 2. ip_hash / n取模&#xff08;n 节点个数&#xff09; &#xff08;移动端会因为网络&#xff0c;基站的变动&#xff0c;ip会变动。生产不推荐不用&#xff09; 3. 最少访问 &#xff08;记…

AI、AGI、AIGC、AI Agent、Prompt、LLM 名词解释

啊&#xff01;ChatGPT 最近很火呀&#xff0c;你们说的 AGI、AIGC、AI Agent、Prompt、LLM … 到底是什么意思啊&#xff1f;还有 Midjourney、Lensa、Sora、DALL-E、Llama … 又是什么鬼&#xff1f; AI&#xff08;Artificial Intelligence&#xff0c;人工智能&#xff09…

Python 基于 OpenCV 视觉图像处理实战 之 OpenCV 简单实战案例 之十二 简单图片添加水印效果

Python 基于 OpenCV 视觉图像处理实战 之 OpenCV 简单实战案例 之十二 简单图片添加水印效果 目录 Python 基于 OpenCV 视觉图像处理实战 之 OpenCV 简单实战案例 之十二 简单图片添加水印效果 一、简单介绍 二、简单图片添加水印效果实现原理 三、简单图片添加水印效果案例…

解决VS2022创建项目只有解决方案看不到项目文件

问题&#xff1a;无法运行、看不到项目文件 解决&#xff1a; 检查环境变量是否正确

开源相机管理库Aravis例程学习(一)——单帧采集single-acquisition

开源相机管理库Aravis例程学习&#xff08;一&#xff09;——单帧采集single-acquisition 简介源码函数说明arv_camera_newarv_camera_acquisitionarv_camera_get_model_namearv_buffer_get_image_widtharv_buffer_get_image_height 简介 本文针对官方例程中的第一个例程&…

MVSplat:稀疏多视点图像的高效3D高斯溅射

MVSplat: Efficient 3D Gaussian Splatting from Sparse Multi-View Images MVSplat&#xff1a;稀疏多视点图像的高效3D高斯溅射 Yuedong Chen1  Haofei Xu2,3  Chuanxia Zheng4  Bohan Zhuang 粤东陈浩飞徐 2,3 郑传霞 4 庄伯涵1 Marc Pollefeys2,5  Andreas Geiger3  T…