Springboot整合HBase

Springboot整合HBase数据库

1、添加依赖
<!-- Spring Boot HBase 依赖 -->
<dependency><groupId>com.spring4all</groupId><artifactId>spring-boot-starter-hbase</artifactId>
</dependency>
<dependency><groupId>org.springframework.data</groupId><artifactId>spring-data-hadoop-hbase</artifactId><version>2.5.0.RELEASE</version>
</dependency>
<dependency><groupId>org.springframework.data</groupId><artifactId>spring-data-hadoop</artifactId><version>2.5.0.RELEASE</version>
</dependency>
2、添加配置
通过Yaml方式配置
spring:hbase:zookeeper:quorum: hbase1.xxx.org,hbase2.xxx.org,hbase3.xxx.orgproperty:clientPort: 2181data:hbase:quorum: XXXrootDir: XXXnodeParent: XXXzookeeper:znode:parent: /hbase
3、添加配置类
@Configuration
public class HBaseConfig {@Beanpublic HBaseService getHbaseService() {//设置临时的hadoop环境变量,之后程序会去这个目录下的\bin目录下找winutils.exe工具,windows连接hadoop时会用到//System.setProperty("hadoop.home.dir", "D:\\Program Files\\Hadoop");//执行此步时,会去resources目录下找相应的配置文件,例如hbase-site.xmlorg.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();return new HBaseService(conf);}
}
4、工具类的方式实现HBASE操作
@Service
public class HBaseService {private Admin admin = null;private Connection connection = null;public HBaseService(Configuration conf) {connection = ConnectionFactory.createConnection(conf);admin = connection.getAdmin();}//创建表 create <table>, {NAME => <column family>, VERSIONS => <VERSIONS>}public boolean creatTable(String tableName, List<String> columnFamily) {//列族column familyList<ColumnFamilyDescriptor> cfDesc = new ArrayList<>(columnFamily.size());columnFamily.forEach(cf -> {cfDesc.add(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(cf)).build());});//表 tableTableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamilies(cfDesc).build();if (admin.tableExists(TableName.valueOf(tableName))) {log.debug("table Exists!");} else {admin.createTable(tableDesc);log.debug("create table Success!");}close(admin, null, null);return true;}public List<String> getAllTableNames() {List<String> result = new ArrayList<>();TableName[] tableNames = admin.listTableNames();for (TableName tableName : tableNames) {result.add(tableName.getNameAsString());}close(admin, null, null);return result;}public Map<String, Map<String, String>> getResultScanner(String tableName) {Scan scan = new Scan();return this.queryData(tableName, scan);}private Map<String, Map<String, String>> queryData(String tableName, Scan scan) {// <rowKey,对应的行数据>Map<String, Map<String, String>> result = new HashMap<>();ResultScanner rs = null;//获取表Table table = null;table = getTable(tableName);rs = table.getScanner(scan);for (Result r : rs) {// 每一行数据Map<String, String> columnMap = new HashMap<>();String rowKey = null;// 行键,列族和列限定符一起确定一个单元(Cell)for (Cell cell : r.listCells()) {if (rowKey == null) {rowKey = Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());}columnMap.put(//列限定符Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),//列族Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));}if (rowKey != null) {result.put(rowKey, columnMap);}}close(null, rs, table);return result;}public void putData(String tableName, String rowKey, String familyName, String[] columns, String[] values) {Table table = null;table = getTable(tableName);putData(table, rowKey, tableName, familyName, columns, values);close(null, null, table);}private void putData(Table table, String rowKey, String tableName, String familyName, String[] columns, String[] values) {//设置rowkeyPut put = new Put(Bytes.toBytes(rowKey));if (columns != null && values != null && columns.length == values.length) {for (int i = 0; i < columns.length; i++) {if (columns[i] != null && values[i] != null) {put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes(columns[i]), Bytes.toBytes(values[i]));} else {throw new NullPointerException(MessageFormat.format("列名和列数据都不能为空,column:{0},value:{1}", columns[i], values[i]));}}}table.put(put);log.debug("putData add or update data Success,rowKey:" + rowKey);table.close();}private Table getTable(String tableName) throws IOException {return connection.getTable(TableName.valueOf(tableName));}private void close(Admin admin, ResultScanner rs, Table table) {if (admin != null) {try {admin.close();} catch (IOException e) {log.error("关闭Admin失败", e);}if (rs != null) {rs.close();}if (table != null) {rs.close();}if (table != null) {try {table.close();} catch (IOException e) {log.error("关闭Table失败", e);}}}}
}
测试类
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest
class HBaseApplicationTests {@Resourceprivate HBaseService hbaseService;//测试创建表@Testpublic void testCreateTable() {hbaseService.creatTable("test_base", Arrays.asList("a", "back"));}//测试加入数据@Testpublic void testPutData() {hbaseService.putData("test_base", "000001", "a", new String[]{"project_id", "varName", "coefs", "pvalues", "tvalues","create_time"}, new String[]{"40866", "mob_3", "0.9416","0.0000", "12.2293", "null"});hbaseService.putData("test_base", "000002", "a", new String[]{"project_id", "varName", "coefs", "pvalues", "tvalues","create_time"}, new String[]{"40866", "idno_prov", "0.9317","0.0000", "9.8679", "null"});hbaseService.putData("test_base", "000003", "a", new String[]{"project_id", "varName", "coefs", "pvalues", "tvalues","create_time"}, new String[]{"40866", "education", "0.8984","0.0000", "25.5649", "null"});}//测试遍历全表@Testpublic void testGetResultScanner() {Map<String, Map<String, String>> result2 = hbaseService.getResultScanner("test_base");System.out.println("-----遍历查询全表内容-----");result2.forEach((k, value) -> {System.out.println(k + "--->" + value);});}
}

三、使用spring-data-hadoop-hbase

3、配置类
@Configuration
public class HBaseConfiguration {@Value("${hbase.zookeeper.quorum}")private String zookeeperQuorum;@Value("${hbase.zookeeper.property.clientPort}")private String clientPort;@Value("${zookeeper.znode.parent}")private String znodeParent;@Beanpublic HbaseTemplate hbaseTemplate() {org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();conf.set("hbase.zookeeper.quorum", zookeeperQuorum);conf.set("hbase.zookeeper.property.clientPort", clientPort);conf.set("zookeeper.znode.parent", znodeParent);return new HbaseTemplate(conf);}
}
4、业务类中使用HbaseTemplate

这个是作为工具类

@Service
@Slf4j
public class HBaseService {@Autowiredprivate HbaseTemplate hbaseTemplate;//查询列簇public List<Result> getRowKeyAndColumn(String tableName, String startRowkey, String stopRowkey, String column, String qualifier) {FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);if (StringUtils.isNotBlank(column)) {log.debug("{}", column);filterList.addFilter(new FamilyFilter(CompareFilter.CompareOp.EQUAL,new BinaryComparator(Bytes.toBytes(column))));}if (StringUtils.isNotBlank(qualifier)) {log.debug("{}", qualifier);filterList.addFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(qualifier))));}Scan scan = new Scan();if (filterList.getFilters().size() > 0) {scan.setFilter(filterList);}scan.setStartRow(Bytes.toBytes(startRowkey));scan.setStopRow(Bytes.toBytes(stopRowkey));return hbaseTemplate.find(tableName, scan, (rowMapper, rowNum) -> rowMapper);}public List<Result> getListRowkeyData(String tableName, List<String> rowKeys, String familyColumn, String column) {return rowKeys.stream().map(rk -> {if (StringUtils.isNotBlank(familyColumn)) {if (StringUtils.isNotBlank(column)) {return hbaseTemplate.get(tableName, rk, familyColumn, column, (rowMapper, rowNum) -> rowMapper);} else {return hbaseTemplate.get(tableName, rk, familyColumn,(rowMapper, rowNum) -> rowMapper);}}return hbaseTemplate.get(tableName, rk, (rowMapper, rowNum) -> rowMapper);}).collect(Collectors.toList());}
}

四、使用spring-boot-starter-data-hbase

参考:https://blog.csdn.net/cpongo1/article/details/89550486

## 下载spring-boot-starter-hbase代码
git clone https://github.com/SpringForAll/spring-boot-starter-hbase.git
## 安装
cd spring-boot-starter-hbase
mvn clean install
2、添加配置项
  • spring.data.hbase.quorum 指定 HBase 的 zk 地址
  • spring.data.hbase.rootDir 指定 HBase 在 HDFS 上存储的路径
  • spring.data.hbase.nodeParent 指定 ZK 中 HBase 的根 ZNode
3、定义好DTO
@Data
public class City {private Long id;private Integer age;private String cityName;  
}
4、创建对应rowMapper
public class CityRowMapper implements RowMapper<City> {private static byte[] COLUMN_FAMILY = "f".getBytes();private static byte[] NAME = "name".getBytes();private static byte[] AGE = "age".getBytes();@Overridepublic City mapRow(Result result, int rowNum) throws Exception {String name = Bytes.toString(result.getValue(COLUMN_FAMILY, NAME));int age = Bytes.toInt(result.getValue(COLUMN_FAMILY, AGE));City dto = new City();dto.setCityName(name);dto.setAge(age);return dto;}
}
5、操作实现增改查
  • HbaseTemplate.find 返回 HBase 映射的 City 列表
  • HbaseTemplate.get 返回 row 对应的 City 信息
  • HbaseTemplate.saveOrUpdates 保存或者更新
    如果 HbaseTemplate 操作不满足需求,完全可以使用 hbaseTemplate 的getConnection() 方法,获取连接。进而类似 HbaseTemplate 实现的逻辑,实现更复杂的需求查询等功能
@Service
public class CityServiceImpl implements CityService {@Autowired private HbaseTemplate hbaseTemplate;//查询public List<City> query(String startRow, String stopRow) {Scan scan = new Scan(Bytes.toBytes(startRow), Bytes.toBytes(stopRow));scan.setCaching(5000);List<City> dtos = this.hbaseTemplate.find("people_table", scan, new CityRowMapper());return dtos;}//查询public City query(String row) {City dto = this.hbaseTemplate.get("people_table", row, new CityRowMapper());return dto;}//新增或者更新public void saveOrUpdate() {List<Mutation> saveOrUpdates = new ArrayList<Mutation>();Put            put           = new Put(Bytes.toBytes("135xxxxxx"));put.addColumn(Bytes.toBytes("people"), Bytes.toBytes("name"), Bytes.toBytes("test"));saveOrUpdates.add(put);this.hbaseTemplate.saveOrUpdates("people_table", saveOrUpdates);}
}

Springboot整合Influxdb

中文文档:https://jasper-zhang1.gitbooks.io/influxdb/content/Introduction/installation.html

注意,项目建立在spring-boot-web基础上

1、添加依赖
<dependency><groupId>org.influxdb</groupId><artifactId>influxdb-java</artifactId><version>2.15</version>
</dependency>
2、添加配置
spring:influx:database: my_sensor1password: adminurl: http://127.0.0.1:6086user: admin
3、编写配置类
@Configuration
public class InfluxdbConfig {@Value("${spring.influx.url}")private String influxDBUrl; @Value("${spring.influx.user}")private String userName;    @Value("${spring.influx.password}")private String password;    @Value("${spring.influx.database}")private String database;    @Bean("influxDB")public InfluxDB influxdb(){     InfluxDB influxDB = InfluxDBFactory.connect(influxDBUrl, userName, password);try {/** * 异步插入:* enableBatch这里第一个是point的个数,第二个是时间,单位毫秒    * point的个数和时间是联合使用的,如果满100条或者60 * 1000毫秒   * 满足任何一个条件就会发送一次写的请求。*/influxDB.setDatabase(database).enableBatch(100,1000 * 60, TimeUnit.MILLISECONDS);} catch (Exception e) { e.printStackTrace();} finally { //设置默认策略influxDB.setRetentionPolicy("sensor_retention");    }//设置日志输出级别influxDB.setLogLevel(InfluxDB.LogLevel.BASIC);  return influxDB;}
}
4、InfluxDB原生API实现
@SpringBootTest(classes = {MainApplication.class})
@RunWith(SpringJUnit4ClassRunner.class)
public class InfluxdbDBTest {@Autowiredprivate InfluxDB influxDB;//measurementprivate final String measurement = "sensor";@Value("${spring.influx.database}")private String database;/*** 批量插入第一种方式*/@Testpublic void insert(){List<String> lines = new ArrayList<String>();       Point point = null;     for(int i=0;i<50;i++){          point = Point.measurement(measurement).tag("deviceId", "sensor" + i).addField("temp", 3).addField("voltage", 145+i).addField("A1", "4i").addField("A2", "4i").build();lines.add(point.lineProtocol());}//写入influxDB.write(lines);}/*** 批量插入第二种方式*/@Testpublic void batchInsert(){BatchPoints batchPoints = BatchPoints.database(database).consistency(InfluxDB.ConsistencyLevel.ALL).build();//遍历sqlserver获取数据for(int i=0;i<50;i++){//创建单条数据对象——表名Point point = Point.measurement(measurement)//tag属性——只能存储String类型.tag("deviceId", "sensor" + i).addField("temp", 3).addField("voltage", 145+i).addField("A1", "4i").addField("A2", "4i").build();//将单条数据存储到集合中batchPoints.point(point);}//批量插入influxDB.write(batchPoints); }/*** 获取数据*/@Testpublic void datas(@RequestParam Integer page){int pageSize = 10;// InfluxDB支持分页查询,因此可以设置分页查询条件String pageQuery = " LIMIT " + pageSize + " OFFSET " + (page - 1) * pageSize;String queryCondition = "";  //查询条件暂且为空// 此处查询所有内容,如果String queryCmd = "SELECT * FROM "// 查询指定设备下的日志信息// 要指定从 RetentionPolicyName.measurement中查询指定数据,默认的策略可以不加;// + 策略name + "." + measurement+ measurement// 添加查询条件(注意查询条件选择tag值,选择field数值会严重拖慢查询速度)+ queryCondition// 查询结果需要按照时间排序+ " ORDER BY time DESC"// 添加分页查询条件+ pageQuery;QueryResult queryResult = influxDB.query(new Query(queryCmd, database));System.out.println("query result => "+queryResult);}
}
5、采用封装工具类
1、创建实体类
@Data
@Measurement(name = "sensor")
public class Sensor {@Column(name="deviceId",tag=true)private String deviceId;@Column(name="temp")private float temp;@Column(name="voltage")private float voltage;@Column(name="A1")private float A1;@Column(name="A2")private float A2;@Column(name="time")private String time;    }
2、创建工具类
@Component
public class InfluxdbUtils {@Autowiredprivate InfluxDB influxDB;@Value("${spring.influx.database}")private String database;    /*** 新增单条记录,利用java的反射机制进行新增操作*/@SneakyThrowspublic void insertOne(Object obj){//获取度量Class<?> clasz = obj.getClass();Measurement measurement = clasz.getAnnotation(Measurement.class);//构建Point.Builder builder = Point.measurement(measurement.name());// 获取对象属性Field[] fieldArray = clasz.getDeclaredFields();Column column = null;for(Field field : fieldArray){column = field.getAnnotation(Column.class);//设置属性可操作field.setAccessible(true); if(column.tag()){//tag属性只能存储String类型builder.tag(column.name(), field.get(obj).toString());}else{//设置fieldif(field.get(obj) != null){builder.addField(column.name(), field.get(obj).toString());}}}influxDB.write(builder.build());}/*** 批量新增,方法一*/@SneakyThrowspublic void insertBatchByRecords(List<?> records){List<String> lines = new ArrayList<String>();   records.forEach(record->{Class<?> clasz = record.getClass();//获取度量Measurement measurement = clasz.getAnnotation(Measurement.class);//构建Point.Builder builder = Point.measurement(measurement.name());Field[] fieldArray = clasz.getDeclaredFields();Column column = null;for(Field field : fieldArray){column = field.getAnnotation(Column.class);//设置属性可操作field.setAccessible(true); if(column.tag()){//tag属性只能存储String类型builder.tag(column.name(), field.get(record).toString());}else{//设置fieldif(field.get(record) != null){builder.addField(column.name(), field.get(record).toString());}}}lines.add(builder.build().lineProtocol());});influxDB.write(lines);}/*** 批量新增,方法二*/@SneakyThrowspublic void insertBatchByPoints(List<?> records){BatchPoints batchPoints = BatchPoints.database(database).consistency(InfluxDB.ConsistencyLevel.ALL).build();records.forEach(record->{Class<?> clasz = record.getClass();//获取度量Measurement measurement = clasz.getAnnotation(Measurement.class);//构建Point.Builder builder = Point.measurement(measurement.name());Field[] fieldArray = clasz.getDeclaredFields();Column column = null;for(Field field : fieldArray){column = field.getAnnotation(Column.class);//设置属性可操作field.setAccessible(true); if(column.tag()){//tag属性只能存储String类型builder.tag(column.name(), field.get(record).toString());}else{//设置fieldif(field.get(record) != null){builder.addField(column.name(), field.get(record).toString());}}}batchPoints.point(builder.build());});influxDB.write(batchPoints);}/*** 查询,返回Map集合* @param query 完整的查询语句*/public List<Object> fetchRecords(String query){List<Object> results = new ArrayList<Object>();QueryResult queryResult = influxDB.query(new Query(query, database));queryResult.getResults().forEach(result->{result.getSeries().forEach(serial->{List<String> columns = serial.getColumns();int fieldSize = columns.size();serial.getValues().forEach(value->{     Map<String,Object> obj = new HashMap<String,Object>();for(int i=0;i<fieldSize;i++){   obj.put(columns.get(i), value.get(i));}results.add(obj);});});});return results;}/*** 查询,返回map集合* @param fieldKeys 查询的字段,不可为空;不可为单独的tag* @param measurement 度量,不可为空;*/public List<Object> fetchRecords(String fieldKeys, String measurement){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);     return this.fetchRecords(query.toString());}/*** 查询,返回map集合* @param fieldKeys 查询的字段,不可为空;不可为单独的tag* @param measurement 度量,不可为空;*/public List<Object> fetchRecords(String fieldKeys, String measurement, String order){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);query.append(" order by ").append(order);       return this.fetchRecords(query.toString());}/*** 查询,返回map集合* @param fieldKeys 查询的字段,不可为空;不可为单独的tag* @param measurement 度量,不可为空;*/public List<Object> fetchRecords(String fieldKeys, String measurement, String order, String limit){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);query.append(" order by ").append(order);query.append(limit);return this.fetchRecords(query.toString());}/*** 查询,返回对象的list集合*/@SneakyThrowspublic <T> List<T> fetchResults(String query, Class<?> clasz){List results = new ArrayList<>();QueryResult queryResult = influxDB.query(new Query(query, database));queryResult.getResults().forEach(result->{result.getSeries().forEach(serial->{List<String> columns = serial.getColumns();int fieldSize = columns.size();     serial.getValues().forEach(value->{ Object obj = null;obj = clasz.newInstance();for(int i=0;i<fieldSize;i++){   String fieldName = columns.get(i);Field field = clasz.getDeclaredField(fieldName);field.setAccessible(true);Class<?> type = field.getType();if(type == float.class){field.set(obj, Float.valueOf(value.get(i).toString()));}else{field.set(obj, value.get(i));}                           }results.add(obj);});});});return results;}/*** 查询,返回对象的list集合*/public <T> List<T> fetchResults(String fieldKeys, String measurement, Class<?> clasz){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);     return this.fetchResults(query.toString(), clasz);}/*** 查询,返回对象的list集合*/public <T> List<T> fetchResults(String fieldKeys, String measurement, String order, Class<?> clasz){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);query.append(" order by ").append(order);return this.fetchResults(query.toString(), clasz);}/*** 查询,返回对象的list集合*/public <T> List<T> fetchResults(String fieldKeys, String measurement, String order, String limit, Class<?> clasz){StringBuilder query = new StringBuilder();query.append("select ").append(fieldKeys).append(" from ").append(measurement);query.append(" order by ").append(order);query.append(limit);        return this.fetchResults(query.toString(), clasz);}
}
3、使用工具类的测试代码
@SpringBootTest(classes = {MainApplication.class})
@RunWith(SpringJUnit4ClassRunner.class)
public class InfluxdbUtilTest {@Autowiredprivate InfluxdbUtils influxdbUtils;/*** 插入单条记录*/@Testpublic void insert(){Sensor sensor = new Sensor();sensor.setA1(10);sensor.setA2(10);sensor.setDeviceId("0002");sensor.setTemp(10L);sensor.setTime("2021-01-19");sensor.setVoltage(10);influxdbUtils.insertOne(sensor);}/*** 批量插入第一种方式*/@GetMapping("/index22")public void batchInsert(){  List<Sensor> sensorList = new ArrayList<Sensor>();for(int i=0; i<50; i++){Sensor sensor = new Sensor();sensor.setA1(2);sensor.setA2(12);sensor.setTemp(9);sensor.setVoltage(12);sensor.setDeviceId("sensor4545-"+i);sensorList.add(sensor);}influxdbUtils.insertBatchByRecords(sensorList);}/*** 批量插入第二种方式*/@GetMapping("/index23")public void batchInsert1(){ List<Sensor> sensorList = new ArrayList<Sensor>();Sensor sensor = null;for(int i=0; i<50; i++){sensor = new Sensor();sensor.setA1(2);sensor.setA2(12);sensor.setTemp(9);sensor.setVoltage(12);sensor.setDeviceId("sensor4545-"+i);sensorList.add(sensor);}influxdbUtils.insertBatchByPoints(sensorList);}/*** 查询数据*/@GetMapping("/datas2")public void datas(@RequestParam Integer page){int pageSize = 10;// InfluxDB支持分页查询,因此可以设置分页查询条件String pageQuery = " LIMIT " + pageSize + " OFFSET " + (page - 1) * pageSize;String queryCondition = "";  //查询条件暂且为空// 此处查询所有内容,如果String queryCmd = "SELECT * FROM sensor"// 查询指定设备下的日志信息// 要指定从 RetentionPolicyName.measurement中查询指定数据,默认的策略可以不加;// + 策略name + "." + measurement// 添加查询条件(注意查询条件选择tag值,选择field数值会严重拖慢查询速度)+ queryCondition// 查询结果需要按照时间排序+ " ORDER BY time DESC"// 添加分页查询条件+ pageQuery;List<Object> sensorList = influxdbUtils.fetchRecords(queryCmd);System.out.println("query result => {}"+sensorList );}/*** 获取数据*/@GetMapping("/datas21")public void datas1(@RequestParam Integer page){int pageSize = 10;// InfluxDB支持分页查询,因此可以设置分页查询条件String pageQuery = " LIMIT " + pageSize + " OFFSET " + (page - 1) * pageSize;String queryCondition = "";  //查询条件暂且为空// 此处查询所有内容,如果String queryCmd = "SELECT * FROM sensor"// 查询指定设备下的日志信息// 要指定从 RetentionPolicyName.measurement中查询指定数据,默认的策略可以不加;// + 策略name + "." + measurement// 添加查询条件(注意查询条件选择tag值,选择field数值会严重拖慢查询速度)+ queryCondition// 查询结果需要按照时间排序+ " ORDER BY time DESC"// 添加分页查询条件+ pageQuery;List<Sensor> sensorList = influxdbUtils.fetchResults(queryCmd, Sensor.class);//List<Sensor> sensorList = influxdbUtils.fetchResults("*", "sensor", Sensor.class);sensorList.forEach(sensor->{System.out.println("query result => {}"+sensorList );});     }
}
6、采用封装数据模型的方式
1、在Influxdb库中创建存储策略
CREATE RETENTION POLICY "rp_order_payment" ON "db_order" DURATION 30d REPLICATION 1 DEFAULT
2、创建数据模型
@Data
@Measurement(name = "m_order_payment",database = "db_order", retentionPolicy = "rp_order_payment")
public class OrderPayment implements Serializable  {// 统计批次@Column(name = "batch_id", tag = true)private String batchId;// 哪个BU@Column(name = "bu_id", tag = true)private String buId;// BU 名称@Column(name = "bu_name")private String buName;// 总数@Column(name = "total_count", tag = true)private String totalCount;// 支付量@Column(name = "pay_count", tag = true)private String payCount;// 金额@Column(name = "total_money", tag = true)private String totalMoney;
}
3、创建Mapper
public class InfluxMapper extends InfluxDBMapper {public InfluxMapper(InfluxDB influxDB) {super(influxDB);}
}
4、配置Mapper
@Log4j2
@Configuration
public class InfluxAutoConfiguration {@Beanpublic InfluxMapper influxMapper(InfluxDB influxDB) {InfluxMapper influxMapper = new InfluxMapper(influxDB);return influxMapper;}
}
5、测试CRUD
@SpringBootTest(classes = {MainApplication.class})
@RunWith(SpringJUnit4ClassRunner.class)
public class InfluxdbMapperTest {@Autowiredprivate InfluxMapper influxMapper;@Testpublic void save(OrderPayment product) {influxMapper.save(product);}@Testpublic void queryAll() {List<OrderPayment> products = influxMapper.query(OrderPayment.class);System.out.println(products);}@Testpublic void queryByBu(String bu) {String sql = String.format("%s'%s'", "select * from m_order_payment where bu_id = ", bu);Query query = new Query(sql, "db_order");List<OrderPayment> products = influxMapper.query(query, OrderPayment.class);System.out.println(products);}
}

参考:https://blog.csdn.net/cpongo1/article/details/89550486

https://github.com/SpringForAll/spring-boot-starter-hbase

https://github.com/JeffLi1993/springboot-learning-example

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/66866.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

【STM32】学习笔记(OLED)

调试方式 OLED简介 硬件电路 驱动函数 OLED.H #ifndef __OLED_H #define __OLED_Hvoid OLED_Init(void); void OLED_Clear(void); void OLED_ShowChar(uint8_t Line, uint8_t Column, char Char); void OLED_ShowString(uint8_t Line, uint8_t Column, char *String); void OL…

地质灾害监测方案(地质灾害监测原理与方法)

我国坡地较多,地质灾害时有发生,给人民生命财产安全和经济建设造成严重威胁。采用工业物联网技术进行地质灾害监测,可以实现对山体移动、边坡变形等地质灾害的预警和实时监测,保护人民生命财产安全。现提出如下地质灾害监测方案: 1. 监测场景:针对易发地质灾害的区域,如矿山边坡…

ReactNative 井字游戏 实战

效果展示 需要的插件准备 此实战项目需要用到两个插件。 react-native-snackbar 底部信息提示组件。 react-native-vector-icons 图标组件。 安装组件&#xff1a; npm i react-native-snackbar npm i react-native-vector-icons npm i types/react-native-vector-icons /…

java企业工程项目管理系统源码(三控:进度组织、质量安全、预算资金成本、二平台:招采、设计管理)

工程项目管理软件&#xff08;工程项目管理系统&#xff09;对建设工程项目管理组织建设、项目策划决策、规划设计、施工建设到竣工交付、总结评估、运维运营&#xff0c;全过程、全方位的对项目进行综合管理 工程项目各模块及其功能点清单 一、系统管理 1、数据字典&#xff…

Opencv手工选择图片区域去水印

QT 插件化图像算法研究平台的功能在持续完善&#xff0c;补充了一个人工选择图片区域的功能。 其中&#xff0c;图片选择功能主要代码如下&#xff1a; QRect GLImageWidget::getSeleted() {QRect ajust(0,0,0,0);if(image.isNull() || !hasSelection)return ajust;double w1…

Springboot 接口方式硬通知实现ConfigurationProperties 、@Value 动态刷新

前言 看到这个文章标题&#xff0c;也许有的看官就觉得很多余&#xff0c; 因为Nacos 可以设置 NacosValue(value "${XXX}",autoRefreshed true) 实现动态刷新&#xff1b; 又因为cloud config的RefreshScope 实现动态刷新&#xff1b; 还有阿波罗...等 这…

代码随想录训练营第38天|62.不同路径,63.不同路径II

代码随想录训练营第38天|62.不同路径&#xff0c;63.不同路径II 62.不同路径文章思路代码 63.不同路径II文章思路代码 总结 62.不同路径 文章 代码随想录|0062.不同路径 思路 d p [ i ] [ j ] { 1 , i 0 ∧ j 0 d p [ i − 1 ] [ j ] d p [ i ] [ j − 1 ] , e l s e \b…

huggingface transformers库中LlamaForCausalLM

新手入门笔记。 LlamaForCausalLM 的使用示例&#xff0c;这应该是一段推理代码。 from transformers import AutoTokenizer, LlamaForCausalLMmodel LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) tokenizer AutoTokenizer.from_pretrained(PATH_TO_CONVE…

【python爬虫】13.吃什么不会胖(爬虫实操练习)

文章目录 前言项目实操明确目标分析过程代码实现 前言 吃什么不会胖——这是我前段时间在健身时比较关注的话题。 相信很多人&#xff0c;哪怕不健身&#xff0c;也会和我一样注重饮食的健康&#xff0c;在乎自己每天摄入的食物热量。 不过&#xff0c;生活中应该很少有人会…

10 Python的文件操作

概述 在上一节&#xff0c;我们介绍了Python的推导式&#xff0c;包括&#xff1a;列表推导式、元组推导式、集合推导式、字典推导式、条件推导式等内容。在这一节中&#xff0c;我们将介绍Python的文件操作。Python是一种高级编程语言&#xff0c;它提供了许多内置函数和模块来…

景联文科技:高质量AI数据标注助力大语言模型训练,推动人工智能落地应用

大语言模型在各类LLM新技术的融会贯通下&#xff0c;不断加速Instruction-tuning、RLHF、思维链等新技术在大语言模型中的深度应用&#xff0c;人工智能技术以惊人的速度不断进化。 大语言模型&#xff08;LLM&#xff09;是一种基于深度学习技术和海量文本数据&#xff0c;它们…

Leetcode:349. 两个数组的交集【题解超详细】

题目 给定两个数组 nums1 和 nums2 &#xff0c;返回 它们的交集 。输出结果中的每个元素一定是 唯一 的。我们可以 不考虑输出结果的顺序 。 难度&#xff1a;简单 题目链接&#xff1a;349.两个数组的交集 示例 1&#xff1a; 输入&#xff1a;nums1 [1,2,2,1], nums2 [2,…

【08期】ArrayList常见面试题

简介 ArrayList是我们开发中非常常用的数据存储容器之一&#xff0c;其底层是数组实现的&#xff0c;我们可以在集合中存储任意类型的数据&#xff0c;ArrayList是线程不安全的&#xff0c;非常适合用于对元素进行查找&#xff0c;效率非常高。 线程安全性 对ArrayList的操作…

LeetCode每日一题:1921. 消灭怪物的最大数量(2023.9.3 C++)

目录 1921. 消灭怪物的最大数量 题目描述&#xff1a; 实现代码与解析&#xff1a; 贪心 原理思路&#xff1a; 1921. 消灭怪物的最大数量 题目描述&#xff1a; 你正在玩一款电子游戏&#xff0c;在游戏中你需要保护城市免受怪物侵袭。给你一个 下标从 0 开始 且长度为 …

百度文心一言GPT免费入口也来了!!!

文心一言入口地址&#xff1a;文心一言能力全面开放 文心一言是百度全新一代知识增强大语言模型&#xff0c;文心大模型家族的新成员&#xff0c;能够与人对话互动&#xff0c;回答问题&#xff0c;协助创作&#xff0c;高效便捷地帮助人们获取信息、知识和灵感。 文心一言的技…

Python 分析HTTP的可靠性

在这篇文章中&#xff0c;我们将介绍如何使用 Python 来分析代理服务提供商的可靠性。代理服务在许多场景中都非常有用&#xff0c;例如突破地理限制、保护隐私和提高网络安全性。然而&#xff0c;并非所有的代理服务提供商都是可靠的。因此&#xff0c;我们将使用 Python 来测…

【传输层】TCP -- 三次握手四次挥手 | 可靠性与提高性能策略

超时重传机制连接管理机制三次握手四次挥手滑动窗口拥塞控制延迟应答捎带应答面向字节流粘包问题TCP异常情况TCP小结基于TCP应用层协议理解 listen 的第二个参数 超时重传机制 主机A发送数据给B之后&#xff0c;可能因为网络拥堵等原因&#xff0c;数据无法到达主机B&#xff1…

3DCAT携手华为,打造XR虚拟仿真实训实时云渲染解决方案

2023年5月8日-9日&#xff0c;以 因聚而生 众志有为 为主题的 华为中国合作伙伴大会2023 在深圳国际会展中心隆重举行。本次大会汇聚了ICT产业界的广大新老伙伴朋友&#xff0c;共同探讨数字化转型的新机遇&#xff0c;共享数字化未来的新成果。 华为中国合作伙伴大会2023现场&…

在线音乐播放器测试报告

文章目录 一、项目背景二、项目功能三、测试目的四、测试环境五、测试计划5.1 功能测试5.2 自动化测试 六、测试结果 一、项目背景 今天&#xff0c;市面上的音乐播放器种类繁多同时功能强大。一个单纯的音乐播放器可能不再单纯只是音乐播放的功能&#xff0c;而是更多地集短视…

对战ChatGPT,创邻科技的Graph+AI会更胜一筹吗?

大模型&#xff08;大规模语言模型&#xff0c;即Large Language Model&#xff09;的应用已经成为千行百业发展的必然。特定领域或行业中经过训练和优化的企业级垂直大模型则成为大模型走下神坛、真正深入场景的关键之路。 但是&#xff0c;企业级垂直大模型在正式落地应用前…