2019独角兽企业重金招聘Python工程师标准>>>
1、pom.xml引入依赖及打包
<dependencies><dependency><groupId>org.apache.hive</groupId><artifactId>hive-exec</artifactId><version>1.1.0</version></dependency><dependency><groupId>org.apache.hadoop</groupId><artifactId>hadoop-common</artifactId><version>2.6.0</version></dependency>
</dependencies><build><plugins><!-- 配置java插件,指定版本 --><plugin><groupId>org.apache.maven.plugins</groupId><artifactId>maven-compiler-plugin</artifactId><configuration><encoding>UTF-8</encoding><source>1.8</source><target>1.8</target><showWarnings>true</showWarnings></configuration></plugin></plugins>
</build>
2、对单个字段,或者多个字段进行处理
import utils.CommonUtils;
import org.apache.hadoop.hive.ql.exec.UDF;/*** [@Author](https://my.oschina.net/arthor) liufu* @CreateTime 2017/5/4 14:13* @Descrition*/
public class AllTracksUDF extends UDF {// 重载方法// 处理Int类型字段// 及时要插入的表中字段为int、bigint类型等,都可以用string类型插入进去// int类型数据,在传入参数的时候直接传递数字即可,比如:evaluate(power, 1)public Integer evaluate(String column, int columnType) {String longValue = getStringValue(column);if(longValue != null){return Integer.parseInt(longValue);}return null;}// 处理Long类型字段,包括时间// long类型参数,传递columnType的时候要加上"L", 比如:evaluate(startTime, 1L)public Long evaluate(String column, long columnType) {String longValue = getStringValue(column);if(longValue != null){// 1表示是时间,而时间为秒,要转化为毫秒,*1000if(columnType == 1){return Long.parseLong(longValue) * 1000;}return Long.parseLong(longValue);}return null;}// 处理String类型字段public String evaluate(String column) {return getStringValue(column);}// 处理两个字段,比如xpoint 和 ypoing的转换,判空和拼接public String evaluate(String column1, String column2) {return convertLatLon(column1, column2);}/*** [@param](https://my.oschina.net/u/2303379) value* [@return](https://my.oschina.net/u/556800)* 获取string类型的字段,判空处理*/private String getStringValue(String value) {if (value != null && !"MULL".equalsIgnoreCase(value) && !"NULL".equalsIgnoreCase(value) && value.trim().length() != 0) {return value;}return null;}/*** @param lat* @param lon* @return* 将经度、维度拼接*/private String convertLatLon(String lat, String lon) {if (lat == null | lon == null || "MULL".equalsIgnoreCase(lat) || "MULL".equalsIgnoreCase(lon) || "NULL".equalsIgnoreCase(lat) || "NULL".equalsIgnoreCase(lon) || "0".equalsIgnoreCase(lat) || "0".equalsIgnoreCase(lon)) {return "0,0";}// 经纬度转换if (CommonUtils.parseDouble(lat) > CommonUtils.parseDouble(lon)) {return lon + "," + lat;} else {return lat + "," + lon;}}
}
3、利用map函数,将一条数据组装成Map,然后传递进来
/*** 读取hive的数据,然后将每条数据组合成一个json字符串,通过下面udf函数方法发送到kafka* <p>* 通过测试验证,Hive2KafkaUDF类在每次mr任务中,只会创建一次,所以producer可以做成单例** @Author liufu* @E-mail: 1151224929@qq.com* @CreateTime 2019/6/5 18:06*/
@Description(name = "hive2kafka", value = "_FUNC_(string, topic, map<string,string>) - Return ret ")
public class Hive2KafkaUDF extends UDF {private static Gson gson = new GsonBuilder().serializeNulls().create();private KafkaProducer<String, String> producer;public boolean evaluate(String kafkaParams, String topic, Map<String, String> dataMap) {KafkaProducer producerTemp = getProducer(kafkaParams);producerTemp.send(new ProducerRecord(topic, null, gson.toJson(dataMap)));return true;}private KafkaProducer getProducer(String kafkaParams) {if (producer == null) {synchronized ("getProducer") {if (producer == null) {Properties props = gson.fromJson(kafkaParams, Properties.class);producer = new KafkaProducer<>(props);}}}return producer;}
}
-
3.2、 如何使用这个UDF
利用map函数将数据组装成一个Map对象select hive2kafka("{'bootstrap.servers': 'gawh243:9092', 'acks': 'all', 'key.serializer': 'org.apache.kafka.common.serialization.StringSerializer', 'value.serializer': 'org.apache.kafka.common.serialization.StringSerializer'}", 'together001', // map函数,左边的name是最终的字段值,功能等同于username as namemap('name',username,'age',age)) from qwrenzixing.visual_deduction_kinship_relation
4、创建临时函数
-
4.1、打包成jar包,可以放在任何能够访问到的地方,比如hdfs://,本地文件系统file://
-
4.2、加载jar
hive> add jar /root/hive2kafka.udf-1.0.jar;Added [/root/elasticsearce-hadoop/hive2kafka.udf-1.0.jar] to class pathAdded resources: [/root/elasticsearce-hadoop/hive2kafka.udf-1.0.jar]hive> create temporary function hive2kafka as 'com.study.Hive2KafkaUDF';hive> create temporary function allTracksudf as 'com.study.AllTracksUDF';或者直接使用远端jar来创建,不一定需要先add jarhive> create temporary function hive2kafka as 'com.study.Hive2KafkaUDF' using jar 'hdfs://rsb:8082/udf/hive2es.udf-1.0.jar'
5、使用临时函数
-
5.1、第一个函数
select allTracksudf(create_time, 1L) as create_time from t_a;
-
5.2、第二个函数
利用map函数将数据组装成一个Map对象select hive2kafka("{'bootstrap.servers': 'gawh243:9092', 'acks': 'all', 'key.serializer': 'org.apache.kafka.common.serialization.StringSerializer', 'value.serializer': 'org.apache.kafka.common.serialization.StringSerializer'}", 'together001', // map函数,左边的name是最终的字段值,功能等同于username as namemap('name',username,'age',age)) from testDb.t_b;