简单封装kafka相关的api

一、针对于kafka版本

<dependency><groupId>org.apache.kafka</groupId><artifactId>kafka-clients</artifactId><version>0.8.2.2</version></dependency><dependency><groupId>org.apache.kafka</groupId><artifactId>kafka_2.11</artifactId><version>0.8.2.2</version>
</dependency>

二、操作topic、consumer相关方法

import kafka.admin.AdminUtils;
import kafka.admin.TopicCommand;
import kafka.api.TopicMetadata;
import kafka.tools.ConsumerOffsetChecker;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.commons.exec.CommandLine;
import org.apache.commons.exec.DefaultExecutor;
import org.apache.commons.exec.ExecuteWatchdog;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.tools.ant.taskdefs.Execute;public static class KafkaUtils {private static Logger LOGGER = LoggerFactory.getLogger(KafkaUtils.class);private static AutoZkClient zkClient;public static AutoZkClient getZkClient() {return zkClient;}public static void setZkClient(AutoZkClient zkClient) {KafkaUtils.zkClient = zkClient;}public static boolean topicExists(String topic) {Assert.notNull(zkClient, "zkclient is null");return AdminUtils.topicExists(zkClient, topic);}public static void topicChangeConfig(String topic, Properties properties) {Assert.notNull(zkClient, "zkclient is null");AdminUtils.changeTopicConfig(zkClient, topic, properties);}public static void topicAlterPartitions(String topic, int partitions) {Assert.notNull(zkClient, "zkclient is null");TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient);int curPartitions = topicMetadata.partitionsMetadata().size();if (curPartitions == partitions) {return;}if (curPartitions > partitions) {LOGGER.info(String.format("curPartitions=%d,不能修改partitions=%d,请确保大与当前分区数", curPartitions, partitions));return;}String[] args = {"--zookeeper", zkClient.zkServers,"--partitions", String.valueOf(partitions),"--alter","--topic", topic};TopicCommand.TopicCommandOptions alterOpt = new TopicCommand.TopicCommandOptions(args);alterOpt.checkArgs();TopicCommand.alterTopic(zkClient, alterOpt);}public static void topicDescribe(String topic) {Assert.notNull(zkClient, "zkclient is null");String[] args = {"--zookeeper", zkClient.zkServers,"--describe","--topic", topic};TopicCommand.TopicCommandOptions describeOpt = new TopicCommand.TopicCommandOptions(args);describeOpt.checkArgs();TopicCommand.describeTopic(zkClient, describeOpt);}public static void topicOverrideConfig(String topic, Properties properties) {Assert.notNull(zkClient, "zkclient is null");Properties oldProperties = KafkaUtils.topicConfig(topic);oldProperties.putAll(properties);AdminUtils.changeTopicConfig(zkClient, topic, oldProperties);}public static void topicCreate(TopicConfig topicConfig) {Assert.notNull(zkClient, "zkclient is null");int brokerSize = ZkUtils.getSortedBrokerList(zkClient).size();if (topicConfig.getReplicationFactor() > brokerSize) {topicConfig.setReplicationFactor(brokerSize);LOGGER.info(String.format("broker-size=%d < replication-factor=%d, 所以设置replication-factor大小为broker-size大小", brokerSize, topicConfig.getReplicationFactor()));}AdminUtils.createTopic(zkClient, topicConfig.getName(), topicConfig.getPartitions(), topicConfig.getReplicationFactor(), topicConfig.getProperties());}public static void topicDelete(String topic) {Assert.notNull(zkClient, "zkclient is null");AdminUtils.deleteTopic(zkClient, topic);}public static List<String> topicsList() {Assert.notNull(zkClient, "zkclient is null");return seqAsJavaList(ZkUtils.getAllTopics(zkClient));}public static Properties topicConfig(String topic) {Assert.notNull(zkClient, "zkclient is null");return AdminUtils.fetchTopicConfig(zkClient, topic);}public static Map<String, Properties> topicsConfig() {Assert.notNull(zkClient, "zkclient is null");return mapAsJavaMap(AdminUtils.fetchAllTopicConfigs(zkClient));}public static void consumerDetail(String topic, String group){String[] args = {"--zookeeper", zkClient.getZkServers(),"--group", group,"--topic", topic};ConsumerOffsetChecker.main(args);}public static Map<String, List<String>> getConsumersPerTopic(String group) {return mapAsJavaMap(ZkUtils.getConsumersPerTopic(zkClient, group, false)).entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> JavaConversions.seqAsJavaList(entry.getValue()).stream().map(consumerThreadId -> consumerThreadId.consumer()).collect(Collectors.toList())));}public static List<String> getConsumersInGroup(String group){return JavaConversions.seqAsJavaList(ZkUtils.getConsumersInGroup(zkClient, group));}public static String executeCommond(String commond) {LOGGER.info("begin to execute commond: " + commond);File tmpFileDir = Files.createTempDir();String tmpFileName = UUID.randomUUID().toString() + ".txt";String fileSavePath = tmpFileDir.getAbsolutePath() + tmpFileName;CommandLine oCmdLine = CommandLine.parse(commond + " > " + fileSavePath);DefaultExecutor executor = new DefaultExecutor();ExecuteWatchdog watchdog = new ExecuteWatchdog(20000);executor.setWatchdog(watchdog);int[] exitValues = {0, 1};executor.setExitValues(exitValues);try {if (Execute.isFailure(executor.execute(oCmdLine))) {watchdog.killedProcess();LOGGER.error("远程命令执行失败... commond=" + commond);} else {try (Stream<String> lines = java.nio.file.Files.lines(new File(fileSavePath).toPath(), Charset.defaultCharset())) {List<String> fileLines = lines.collect(toCollection(LinkedList::new));StringBuilder result = new StringBuilder();fileLines.forEach(line -> result.append(line).append(System.lineSeparator()));return result.toString();} finally {FileUtils.deleteQuietly(tmpFileDir);}}} catch (Exception e) {LOGGER.error("execute command error happened... commond=" + commond, e);}return StringUtils.EMPTY;}
}

三、控制层展示

import com.alibaba.fastjson.JSON;
import com.cmos.common.annotation.CompatibleOutput;
import com.cmos.core.logger.Logger;
import com.cmos.core.logger.LoggerFactory;
import com.cmos.wmhopenapi.web.config.KafkaMessageConfig;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.*;
import scala.Console;import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.List;import static com.cmos.wmhopenapi.web.config.KafkaMessageConfig.KafkaUtils;/*** @author hujunzheng* @create 2018-07-16 10:20**/
@RestController
@RequestMapping("/message/state")
@CompatibleOutput
public class MessageCenterStateController {private static Logger LOGGER = LoggerFactory.getLogger(MessageCenterStateController.class);@Autowiredprivate KafkaMessageConfig.NoAckConsumer noAckConsumer;@Autowiredprivate KafkaMessageConfig.AckConsumer ackConsumer;/*** 获取topic描述** @param topic**/@GetMapping("/topic-describe")public String topicDescribe(@RequestParam String topic) {try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {//scala 输出流重定向Console.setOut(new PrintStream(bos));KafkaUtils.topicDescribe(topic);String result = bos.toString();LOGGER.info(result);return String.format("%s%s%s", "<pre>", result, "</pre>");} catch (Exception e) {LOGGER.error("获取topic描述异常", e);}return StringUtils.EMPTY;}/*** 获取全部topic**/@GetMapping(value = "/topics-all", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)public String topicAll() {String result = JSON.toJSONString(KafkaUtils.topicsList());LOGGER.info(result);return result;}/*** 获取topic配置** @param topic**/@GetMapping(value = "/topic-config", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)public String topicConfig(@RequestParam String topic) {String result = JSON.toJSONString(KafkaUtils.topicConfig(topic));LOGGER.info(result);return result;}/*** 获取所有topic的配置**/@GetMapping(value = "/topics-configs", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)public String topicsConfigs() {String result = JSON.toJSONString(KafkaUtils.topicsConfig());LOGGER.info(result);return result;}/*** 展示在某个分组中的consumer** @param group**/@GetMapping(value = "/consumers-in-group", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)public String consumersInGroup(@RequestParam String group) {String result = JSON.toJSONString(KafkaUtils.getConsumersInGroup(group));LOGGER.info(result);return result;}/*** 展示在某个分组中的consumer,按照topic划分** @param group**/@GetMapping(value = "/consumers-per-topic", produces = MediaType.APPLICATION_JSON_UTF8_VALUE)public String consumersPerTopic(@RequestParam String group) {String result = JSON.toJSONString(KafkaUtils.getConsumersPerTopic(group));LOGGER.info(result);return result;}/*** 展示消费者消费详情** @param topic* @param group**/@GetMapping("/consumer-detail")public String consumerDetail(@RequestParam String topic, @RequestParam String group) {try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {//scala 输出流重定向Console.setOut(new PrintStream(bos));KafkaUtils.consumerDetail(topic, group);String result = bos.toString();LOGGER.info(result);return String.format("%s%s%s", "<pre>", result, "</pre>");} catch (Exception e) {LOGGER.error("获取消费详情", e);}return StringUtils.EMPTY;}/*** 消费消息并展示消息** @param topic* @param group* @param size 消费消息数量* @param ack 消费的消息是否需要进行ack操作**/@GetMapping("/consumer-consume")public String consumerConsume(@RequestParam String topic, @RequestParam(required = false, defaultValue = "default") String group, @RequestParam(required = false, defaultValue = "1") int size, @RequestParam(required = false, defaultValue = "false") boolean ack) {List<String> messages;if (ack) {messages = ackConsumer.consume(topic, group, size);} else {messages = noAckConsumer.consume(topic, group, size);}return JSON.toJSONString(messages);}/*** 运行kafka相关命令** @param sshRemote 连接远程主机命令(ssh user@host)* @param sshCommond kafka相关命令 (kafka-consumer.sh ...)**/@PostMapping("/commond-execute")public String commondExecute(@RequestParam(required = false) String sshRemote, @RequestParam String sshCommond) {String commond = sshCommond + StringUtils.EMPTY;if (StringUtils.isNotBlank(sshRemote)) {commond = String.format("%s \"%s\"", sshRemote, commond);}String result = KafkaUtils.executeCommond(commond);return String.format("%s%s%s", "<pre>", result, "</pre>");}
}

四、消费配置

  消费逻辑

package com.mochasoft.latte.data.kafka.consumer;import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import org.apache.commons.lang3.StringUtils;public class KafkaConsumerConfig
{private String zkConnect;private String zkSessionTimeoutMs;private String zkSyncTimeMs;private String autoCommitIntervalMs;private String groupId = "default";static enum OffSet{smallest,  largest;private OffSet() {}}private OffSet offset = OffSet.largest;private Properties properties;public KafkaConsumerConfig(){this.properties = new Properties();}public KafkaConsumerConfig(String zkConnect, String zkSessionTimeoutMs, String zkSyncTimeMs, String autoCommitIntervalMs){this.zkConnect = zkConnect;this.zkSessionTimeoutMs = zkSessionTimeoutMs;this.zkSyncTimeMs = zkSyncTimeMs;this.autoCommitIntervalMs = autoCommitIntervalMs;this.properties = new Properties();}public String getZkConnect(){return this.zkConnect;}public void setZkConnect(String zkConnect){this.zkConnect = zkConnect;}public String getZkSessionTimeoutMs(){return this.zkSessionTimeoutMs;}public void setZkSessionTimeoutMs(String zkSessionTimeoutMs){this.zkSessionTimeoutMs = zkSessionTimeoutMs;}public String getZkSyncTimeMs(){return this.zkSyncTimeMs;}public void setZkSyncTimeMs(String zkSyncTimeMs){this.zkSyncTimeMs = zkSyncTimeMs;}public String getAutoCommitIntervalMs(){return this.autoCommitIntervalMs;}public void setAutoCommitIntervalMs(String autoCommitIntervalMs){this.autoCommitIntervalMs = autoCommitIntervalMs;}public String getGroupId(){return this.groupId;}public void setGroupId(String groupId){if (StringUtils.isNotBlank(groupId)) {this.groupId = groupId;}}public OffSet getOffset(){return this.offset;}public void setOffset(OffSet offset){this.offset = offset;}public ConsumerConfig getConsumerConfig(){return new ConsumerConfig(getProperties());}public Properties getProperties(){if (StringUtils.isBlank(this.zkConnect)) {throw new IllegalArgumentException("Blank zkConnect");}if (StringUtils.isNotBlank(this.zkSessionTimeoutMs)) {this.properties.put("zookeeper.session.timeout.ms", this.zkSessionTimeoutMs);}if (StringUtils.isNotBlank(this.zkSyncTimeMs)) {this.properties.put("zookeeper.sync.time.ms", this.zkSyncTimeMs);}if (StringUtils.isNotBlank(this.autoCommitIntervalMs)) {this.properties.put("auto.commit.interval.ms", this.autoCommitIntervalMs);}if (StringUtils.isNotBlank(this.offset.name())) {this.properties.put("auto.offset.reset", this.offset.name());}this.properties.put("group.id", getGroupId());this.properties.put("zookeeper.connect", this.zkConnect);return this.properties;}
}

public
static final class NoAckConsumer extends TheConsumer {public NoAckConsumer(KafkaConsumerConfig kafkaConsumerConfig) {super(kafkaConsumerConfig, false);this.consumerConfigProperties.setProperty("auto.commit.enable", "false");} }public static final class AckConsumer extends TheConsumer {public AckConsumer(KafkaConsumerConfig kafkaConsumerConfig) {super(kafkaConsumerConfig, true);this.consumerConfigProperties.setProperty("auto.commit.enable", "true");} }public static class TheConsumer {protected Properties consumerConfigProperties;private boolean ack;private StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());private StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());public TheConsumer(KafkaConsumerConfig kafkaConsumerConfig, boolean ack) {this.ack = ack;this.consumerConfigProperties = new Properties();this.consumerConfigProperties.putAll(kafkaConsumerConfig.getProperties());}/*** @param topic 主题* @param group 分组* @param size 消费数量**/public List<String> consume(String topic, String group, int size) {if (StringUtils.isNotBlank(group)) {this.consumerConfigProperties.setProperty("group.id", group);}ConsumerConnector consumerConnector = null;try {consumerConnector = Consumer.createJavaConsumerConnector(new ConsumerConfig(this.consumerConfigProperties));Map<String, Integer> topics = new HashMap<>(1);topics.put(topic, 1);Map<String, List<KafkaStream<String, String>>> streams = consumerConnector.createMessageStreams(topics, keyDecoder, valueDecoder);if (!(CollectionUtils.isEmpty(streams) || CollectionUtils.isEmpty(streams.get(topic)))) {List<String> messages = new ArrayList<>();KafkaStream<String, String> messageStream = streams.get(topic).get(0);for (ConsumerIterator<String, String> it = messageStream.iterator(); it.hasNext(); ) {MessageAndMetadata<String, String> messageAndMetadata = it.next();messages.add(messageAndMetadata.message());if (this.ack) {consumerConnector.commitOffsets();}if (size <= messages.size()) {break;}}return messages;}} catch (Exception e) {LOGGER.error(String.format("%s ack consume has errors. topic=%s, group=%s, size=%d.", this.ack ? "" : "no", topic, group, size), e);} finally {if (consumerConnector != null) {consumerConnector.shutdown();}}return Collections.EMPTY_LIST;} }

  消费测试

public class KafkaTest extends BaseUnitTest {private static Logger LOGGER = LoggerFactory.getLogger(KafkaTest.class);@Value("${kafka.connection.zkconnect}")private String zkConnect;private static final AutoZkClient zkClient = new AutoZkClient("127.0.0.1:2181");private static final String TEST_TOPIC = "message-center-biz-expiration-reminder-topic";private static final String TEST_GROUP = "hjz-group";@Autowiredprivate NoAckConsumer noAckConsumer;@Autowiredprivate AckConsumer ackConsumer;@Autowiredprivate KafkaProducer kafkaProducer;private CountDownLatch finishCountDownLatch = new CountDownLatch(20);@Testpublic void testNoAckConsume() throws InterruptedException {class ConsumeRun implements Callable<List<String>> {private TheConsumer consumer;private CountDownLatch countDownLatch;public ConsumeRun(TheConsumer consumer, CountDownLatch countDownLatch) {this.consumer = consumer;this.countDownLatch = countDownLatch;}@Overridepublic List<String> call() {try {this.countDownLatch.await();} catch (InterruptedException e) {e.printStackTrace();}List<String> messages = consumer.consume(TEST_TOPIC, TEST_GROUP, 8);finishCountDownLatch.countDown();return messages;}}ExecutorService executorService = Executors.newFixedThreadPool(20);CountDownLatch countDownLatch = new CountDownLatch(1);List<Future<List<String>>> noAckConsumeFutures = new ArrayList<>(), ackConsumeFutures = new ArrayList<>();for (int i = 0; i < 10; ++i) {ConsumeRun consumeRun = new ConsumeRun(this.noAckConsumer, countDownLatch);noAckConsumeFutures.add(executorService.submit(consumeRun));}for (int i = 0; i < 10; ++i) {ConsumeRun consumeRun = new ConsumeRun(this.ackConsumer, countDownLatch);ackConsumeFutures.add(executorService.submit(consumeRun));}countDownLatch.countDown();finishCountDownLatch.await();System.out.println("no ack consumers response....");noAckConsumeFutures.forEach(future -> {try {System.out.println(future.get());} catch (Exception e){}});System.out.println("\n\nack consumers response....");ackConsumeFutures.forEach(future -> {try {System.out.println(future.get());} catch (Exception e) {e.printStackTrace();}});}@Testpublic void testProduce() {for (int i = 0; i < 100; ++i) {kafkaProducer.send(TEST_TOPIC, String.valueOf(i), "message " + i);}KafkaUtils.consumerDetail(TEST_TOPIC, TEST_GROUP);}public static void createTopic() {MessageCenterConstants.TopicConfig topicConfig = new MessageCenterConstants.TopicConfig();topicConfig.setName("kafka-test");KafkaMessageConfig.KafkaUtils.topicCreate(topicConfig);}public static void testKafka() {createTopic();System.out.println(KafkaUtils.topicsList());Properties properties = new Properties();properties.put("min.cleanable.dirty.ratio", "0.3");KafkaMessageConfig.KafkaUtils.topicChangeConfig(TEST_TOPIC, properties);System.out.println(KafkaMessageConfig.KafkaUtils.topicConfig(TEST_TOPIC));KafkaUtils.topicAlterPartitions(TEST_TOPIC, 7);KafkaMessageConfig.KafkaUtils.topicDescribe(TEST_TOPIC);kafka.utils.ZkUtils.getSortedBrokerList(zkClient);}public static void testTopicDescribe() {KafkaUtils.setZkClient(zkClient);new MessageCenterStateController().topicDescribe("message-center-recharge-transaction-push-topic");}public static void testConsumerDescribe() {KafkaUtils.setZkClient(zkClient);String[] args = {"--zookeeper", zkClient.getZkServers(),"--group", "","--topic", "message-center-recharge-transaction-push-topic"};ConsumerOffsetChecker.main(args);}public static void testConsumerList() {KafkaUtils.setZkClient(zkClient);String[] args = {"--broker-list", zkClient.getZkServers(),"--topic", "message-center-recharge-transaction-push-topic","--list"};SimpleConsumerShell.main(args);}public static void main(String[] args) {testConsumerList();}
}

  测试no ack 以及 ack的消费结果

no ack consumers response....
[message 8, message 14, message 23, message 32, message 41, message 50, message 8, message 14]
[message 14, message 23, message 32, message 41, message 50, message 12, message 21, message 30]
[message 17, message 26, message 35, message 44, message 53, message 62, message 71, message 80]
[message 19, message 28, message 37, message 46, message 55, message 64, message 73, message 82]
[message 89, message 98, message 89, message 98, message 19, message 28, message 37, message 46]
[message 0, message 39, message 48, message 57, message 66, message 75, message 84, message 93]
[message 1, message 49, message 58, message 67, message 76, message 85, message 94, message 77]
[message 8, message 14, message 23, message 32, message 41, message 50, message 89, message 98]
[message 17, message 26, message 35, message 44, message 53, message 62, message 71, message 80]
[message 2, message 59, message 68, message 77, message 86, message 95, message 0, message 39]ack consumers response....
[message 7, message 13, message 22, message 31, message 40, message 5, message 11, message 20]
[message 17, message 26, message 35, message 44, message 53, message 62, message 71, message 80]
[message 77, message 86, message 95, message 67, message 76, message 85, message 94, message 0]
[message 9, message 15, message 24, message 33, message 42, message 51, message 60, message 6]
[message 4, message 10, message 79, message 88, message 97, message 2, message 59, message 68]
[message 29, message 38, message 47, message 56, message 65, message 74, message 83, message 92]
[message 16, message 25, message 34, message 43, message 52, message 61, message 70, message 8]
[message 18, message 27, message 36, message 45, message 54, message 63, message 72, message 81]
[message 3, message 69, message 78, message 87, message 96, message 1, message 49, message 58]
[message 14, message 23, message 32, message 41, message 50, message 89, message 98, message 12]

  消费测试结果分析:no ack的consumer可以实现消息的窥探。

 

五、效果图

  获取topic详情

  

  获取所有的topic信息

  

   获取消费详情

  

   窥探消息

  

 

转载于:https://www.cnblogs.com/hujunzheng/p/9327927.html

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/531224.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

springmvc controller动态设置content-type

springmvc RequestMappingHandlerAdapter#invokeHandlerMethod 通过ServletInvocableHandlerMethod#invokeAndHandle调用目标方法&#xff0c;并处理返回值。 如果return value &#xff01; null&#xff0c;则通过returnvalueHandlers处理&#xff0c;内部会调用MessageConv…

springboot2.0 redis EnableCaching的配置和使用

一、前言 关于EnableCaching最简单使用&#xff0c;个人感觉只需提供一个CacheManager的一个实例就好了。springboot为我们提供了cache相关的自动配置。引入cache模块&#xff0c;如下。 二、maven依赖 <dependency><groupId>org.springframework.boot</groupId…

依赖配置中心实现注有@ConfigurationProperties的bean相关属性刷新

配置中心是什么 配置中心&#xff0c;通过keyvalue的形式存储环境变量。配置中心的属性做了修改&#xff0c;项目中可以通过配置中心的依赖&#xff08;sdk&#xff09;立即感知到。需要做的就是如何在属性发生变化时&#xff0c;改变带有ConfigurationProperties的bean的相关属…

java接口签名(Signature)实现方案

预祝大家国庆节快乐&#xff0c;赶快迎接美丽而快乐的假期吧&#xff01;&#xff01;&#xff01; 前言 在为第三方系统提供接口的时候&#xff0c;肯定要考虑接口数据的安全问题&#xff0c;比如数据是否被篡改&#xff0c;数据是否已经过时&#xff0c;数据是否可以重复提交…

Git rebase命令实战

一、前言 一句话&#xff0c;git rebase 可以帮助项目中的提交历史干净整洁&#xff01;&#xff01;&#xff01; 二、避免合并出现分叉现象 git merge操作 1、新建一个 develop 分支 2、在develop分支上新建两个文件 3、然后分别执行 add、commit、push 4、接着切换到master分…

windows系统nexus3安装和配置

一、前言 为什么要在本地开发机器上安装nexus&#xff1f;首先声明公司内部是有自己的nexus仓库&#xff0c;但是对上传jar包做了限制&#xff0c;不能畅快的上传自己测试包依赖。于是就自己在本地搭建了一个nexus私服&#xff0c;即可以使用公司nexus私服仓库中的依赖&#xf…

Springmvc借助SimpleUrlHandlerMapping实现接口开关功能

一、接口开关功能 1、可配置化&#xff0c;依赖配置中心 2、接口访问权限可控 3、springmvc不会扫描到&#xff0c;即不会直接的将接口暴露出去 二、接口开关使用场景 和业务没什么关系&#xff0c;主要方便查询系统中的一些状态信息。比如系统的配置信息&#xff0c;中间件的状…

log4j平稳升级到log4j2

一、前言 公司中的项目虽然已经用了很多的新技术了&#xff0c;但是日志的底层框架还是log4j&#xff0c;个人还是不喜欢用这个的。最近项目再生产环境上由于log4j引起了一场血案&#xff0c;于是决定升级到log4j2。 二、现象 虽然生产环境有多个结点分散高并发带来的压力&…

Springboot集成ES启动报错

报错内容 None of the configured nodes are available elasticsearch.yml配置 cluster.name: ftest node.name: node-72 node.master: true node.data: true network.host: 112.122.245.212 http.port: 39200 transport.tcp.port: 39300 discovery.zen.ping.unicast.hosts: [&…

kafka-manager配置和使用

kafka-manager配置 最主要配置就是用于kafka管理器状态的zookeeper主机。这可以在conf目录中的application.conf文件中找到。 kafka-manager.zkhosts"my.zookeeper.host.com:2181" 当然也可以声明为zookeeper集群。 kafka-manager.zkhosts"my.zookeeper.host.co…

kafka告警简单方案

一、前言 为什么要设计kafka告警方案&#xff1f;现成的监控项目百度一下一大堆&#xff0c;KafkaOffsetMonitor、KafkaManager、 Burrow等&#xff0c;具体参考&#xff1a;kafka的消息挤压监控。由于本小组的项目使用的kafka集群并没有被公司的kafka-manager管理&#xff0c;…

RedisCacheManager设置Value序列化器技巧

CacheManager基本配置 请参考博文&#xff1a;springboot2.0 redis EnableCaching的配置和使用 RedisCacheManager构造函数 /*** Construct a {link RedisCacheManager}.* * param redisOperations*/ SuppressWarnings("rawtypes") public RedisCacheManager(RedisOp…

HashMap 源码阅读

前言 之前读过一些类的源码&#xff0c;近来发现都忘了&#xff0c;再读一遍整理记录一下。这次读的是 JDK 11 的代码&#xff0c;贴上来的源码会去掉大部分的注释, 也会加上一些自己的理解。 Map 接口 这里提一下 Map 接口与1.8相比 Map接口又新增了几个方法&#xff1a;   …

SpringMvc接口中转设计(策略+模板方法)

一、前言 最近带着两个兄弟做支付宝小程序后端相关的开发&#xff0c;小程序首页涉及到很多查询的服务。小程序后端服务在我司属于互联网域&#xff0c;相关的查询服务已经在核心域存在了&#xff0c;查询这块所要做的工作就是做接口中转。参考了微信小程序的代码&#xff0c;发…

SpringSecurity整合JWT

一、前言 最近负责支付宝小程序后端项目设计&#xff0c;这里主要分享一下用户会话、接口鉴权的设计。参考过微信小程序后端的设计&#xff0c;会话需要依靠redis。相关的开发人员和我说依靠Redis并不是很靠谱&#xff0c;redis在业务高峰期不稳定&#xff0c;容易出现问题&…

Springboot定时任务原理及如何动态创建定时任务

一、前言 上周工作遇到了一个需求&#xff0c;同步多个省份销号数据&#xff0c;解绑微信粉丝。分省定时将销号数据放到SFTP服务器上&#xff0c;我需要开发定时任务去解析文件。因为是多省份&#xff0c;服务器、文件名规则、数据规则都不一定&#xff0c;所以要做成可配置是有…

转载:ThreadPoolExecutor 源码阅读

前言 之前研究了一下如何使用ScheduledThreadPoolExecutor动态创建定时任务(Springboot定时任务原理及如何动态创建定时任务)&#xff0c;简单了解了ScheduledThreadPoolExecutor相关源码。今天看了同学写的ThreadPoolExecutor 的源码解读&#xff0c;甚是NB&#xff0c;必须转…

使用pdfBox实现pdf转图片,解决中文方块乱码等问题

一、引入依赖 <dependency><groupId>org.apache.pdfbox</groupId><artifactId>fontbox</artifactId><version>2.0.13</version> </dependency> <dependency><groupId>org.apache.pdfbox</groupId><artif…

Spring异步调用原理及SpringAop拦截器链原理

一、Spring异步调用底层原理 开启异步调用只需一个注解EnableAsync Target(ElementType.TYPE) Retention(RetentionPolicy.RUNTIME) Documented Import(AsyncConfigurationSelector.class) public interface EnableAsync {/*** Indicate the async annotation type to be detec…

Spring MVC源码——Root WebApplicationContext

Spring MVC源码——Root WebApplicationContext 打算开始读一些框架的源码,先拿 Spring MVC 练练手,欢迎点击这里访问我的源码注释, SpringMVC官方文档一开始就给出了这样的两段示例: WebApplicationInitializer示例: public class MyWebApplicationInitializer implements Web…