cloudwatch_将CloudWatch Logs与Cloudhub Mule集成

cloudwatch

在此博客中,我将解释如何为您的Mule CloudHub应用程序启用AWS Cloudwatch日志 。 AWS提供了Cloudwatch Logs Services,以便您可以更好地管理日志。 它比松散便宜。 由于cloudhub会自动翻转超过100 MB的日志,因此我们需要一种机制来更有效地管理日志。 为此,我们创建此自定义附加程序,它将日志发送到cloudwatch。

package com.javaroots.appenders;import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;import java.io.Serializable;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.Filter;
import org.apache.logging.log4j.core.Layout;
import org.apache.logging.log4j.core.LogEvent;
import org.apache.logging.log4j.core.appender.AbstractAppender;
import org.apache.logging.log4j.core.config.plugins.Plugin;
import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
import org.apache.logging.log4j.core.config.plugins.PluginElement;
import org.apache.logging.log4j.core.config.plugins.PluginFactory;
import org.apache.logging.log4j.status.StatusLogger;import com.amazonaws.regions.Regions;
import com.amazonaws.services.logs.AWSLogs;
import com.amazonaws.services.logs.model.CreateLogGroupRequest;
import com.amazonaws.services.logs.model.CreateLogStreamRequest;
import com.amazonaws.services.logs.model.CreateLogStreamResult;
import com.amazonaws.services.logs.model.DataAlreadyAcceptedException;
import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
import com.amazonaws.services.logs.model.InputLogEvent;
import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
import com.amazonaws.services.logs.model.LogGroup;
import com.amazonaws.services.logs.model.LogStream;
import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.amazonaws.services.logs.model.PutLogEventsResult;@Plugin(name = "CLOUDW", category = "Core", elementType = "appender", printObject = true)
public class CloudwatchAppender extends AbstractAppender {/*** */private static final long serialVersionUID = 12321345L;private static Logger logger2 = LogManager.getLogger(CloudwatchAppender.class);private final Boolean DEBUG_MODE = System.getProperty("log4j.debug") != null;/*** Used to make sure that on close() our daemon thread isn't also trying to sendMessage()s*/private Object sendMessagesLock = new Object();/*** The queue used to buffer log entries*/private LinkedBlockingQueue loggingEventsQueue;/*** the AWS Cloudwatch Logs API client*/private AWSLogs awsLogsClient;private AtomicReference lastSequenceToken = new AtomicReference<>();/*** The AWS Cloudwatch Log group name*/private String logGroupName;/*** The AWS Cloudwatch Log stream name*/private String logStreamName;/*** The queue / buffer size*/private int queueLength = 1024;/*** The maximum number of log entries to send in one go to the AWS Cloudwatch Log service*/private int messagesBatchSize = 128;private AtomicBoolean cloudwatchAppenderInitialised = new AtomicBoolean(false);private CloudwatchAppender(final String name,final Layout layout,final Filter filter,final boolean ignoreExceptions,String logGroupName, String logStreamName,Integer queueLength,Integer messagesBatchSize) {super(name, filter, layout, ignoreExceptions);this.logGroupName = logGroupName;this.logStreamName = logStreamName;this.queueLength = queueLength;this.messagesBatchSize = messagesBatchSize;this.activateOptions();}@Overridepublic void append(LogEvent event) {if (cloudwatchAppenderInitialised.get()) {loggingEventsQueue.offer(event);} else {// just do nothing}}public void activateOptions() {if (isBlank(logGroupName) || isBlank(logStreamName)) {logger2.error("Could not initialise CloudwatchAppender because either or both LogGroupName(" + logGroupName + ") and LogStreamName(" + logStreamName + ") are null or empty");this.stop();} else {//below lines work with aws version 1.9.40 for local build//this.awsLogsClient = new AWSLogsClient();//awsLogsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_2));this.awsLogsClient = com.amazonaws.services.logs.AWSLogsClientBuilder.standard().withRegion(Regions.AP_SOUTHEAST_2).build();loggingEventsQueue = new LinkedBlockingQueue<>(queueLength);try {initializeCloudwatchResources();initCloudwatchDaemon();cloudwatchAppenderInitialised.set(true);} catch (Exception e) {logger2.error("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName, e);if (DEBUG_MODE) {System.err.println("Could not initialise Cloudwatch Logs for LogGroupName: " + logGroupName + " and LogStreamName: " + logStreamName);e.printStackTrace();}}}}private void initCloudwatchDaemon() {Thread t = new Thread(() -> {while (true) {try {if (loggingEventsQueue.size() > 0) {sendMessages();}Thread.currentThread().sleep(20L);} catch (InterruptedException e) {if (DEBUG_MODE) {e.printStackTrace();}}}});t.setName("CloudwatchThread");t.setDaemon(true);t.start();}private void sendMessages() {synchronized (sendMessagesLock) {LogEvent polledLoggingEvent;final Layout layout = getLayout();List loggingEvents = new ArrayList<>();try {while ((polledLoggingEvent = loggingEventsQueue.poll()) != null && loggingEvents.size() <= messagesBatchSize) {loggingEvents.add(polledLoggingEvent);}List inputLogEvents = loggingEvents.stream().map(loggingEvent -> new InputLogEvent().withTimestamp(loggingEvent.getTimeMillis()).withMessage(layout == null ?loggingEvent.getMessage().getFormattedMessage():new String(layout.toByteArray(loggingEvent), StandardCharsets.UTF_8))).sorted(comparing(InputLogEvent::getTimestamp)).collect(toList());if (!inputLogEvents.isEmpty()) {PutLogEventsRequest putLogEventsRequest = new PutLogEventsRequest(logGroupName,logStreamName,inputLogEvents);try {putLogEventsRequest.setSequenceToken(lastSequenceToken.get());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());} catch (DataAlreadyAcceptedException dataAlreadyAcceptedExcepted) {putLogEventsRequest.setSequenceToken(dataAlreadyAcceptedExcepted.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {dataAlreadyAcceptedExcepted.printStackTrace();}} catch (InvalidSequenceTokenException invalidSequenceTokenException) {putLogEventsRequest.setSequenceToken(invalidSequenceTokenException.getExpectedSequenceToken());PutLogEventsResult result = awsLogsClient.putLogEvents(putLogEventsRequest);lastSequenceToken.set(result.getNextSequenceToken());if (DEBUG_MODE) {invalidSequenceTokenException.printStackTrace();}}}} catch (Exception e) {if (DEBUG_MODE) {logger2.error(" error inserting cloudwatch:",e);e.printStackTrace();}}}}private void initializeCloudwatchResources() {DescribeLogGroupsRequest describeLogGroupsRequest = new DescribeLogGroupsRequest();describeLogGroupsRequest.setLogGroupNamePrefix(logGroupName);Optional logGroupOptional = awsLogsClient.describeLogGroups(describeLogGroupsRequest).getLogGroups().stream().filter(logGroup -> logGroup.getLogGroupName().equals(logGroupName)).findFirst();if (!logGroupOptional.isPresent()) {CreateLogGroupRequest createLogGroupRequest = new CreateLogGroupRequest().withLogGroupName(logGroupName);awsLogsClient.createLogGroup(createLogGroupRequest);}DescribeLogStreamsRequest describeLogStreamsRequest = new DescribeLogStreamsRequest().withLogGroupName(logGroupName).withLogStreamNamePrefix(logStreamName);Optional logStreamOptional = awsLogsClient.describeLogStreams(describeLogStreamsRequest).getLogStreams().stream().filter(logStream -> logStream.getLogStreamName().equals(logStreamName)).findFirst();if (!logStreamOptional.isPresent()) {CreateLogStreamRequest createLogStreamRequest = new CreateLogStreamRequest().withLogGroupName(logGroupName).withLogStreamName(logStreamName);CreateLogStreamResult o = awsLogsClient.createLogStream(createLogStreamRequest);}}private boolean isBlank(String string) {return null == string || string.trim().length() == 0;}protected String getSimpleStacktraceAsString(final Throwable thrown) {final StringBuilder stackTraceBuilder = new StringBuilder();for (StackTraceElement stackTraceElement : thrown.getStackTrace()) {new Formatter(stackTraceBuilder).format("%s.%s(%s:%d)%n",stackTraceElement.getClassName(),stackTraceElement.getMethodName(),stackTraceElement.getFileName(),stackTraceElement.getLineNumber());}return stackTraceBuilder.toString();}@Overridepublic void start() {super.start();}@Overridepublic void stop() {super.stop();while (loggingEventsQueue != null && !loggingEventsQueue.isEmpty()) {this.sendMessages();}}@Overridepublic String toString() {return CloudwatchAppender.class.getSimpleName() + "{"+ "name=" + getName() + " loggroupName=" + logGroupName+" logstreamName=" + logStreamName;}@PluginFactory@SuppressWarnings("unused")public static CloudwatchAppender createCloudWatchAppender(@PluginAttribute(value = "queueLength" ) Integer queueLength,@PluginElement("Layout") Layout layout,@PluginAttribute(value = "logGroupName") String logGroupName,@PluginAttribute(value = "logStreamName") String logStreamName,@PluginAttribute(value = "name") String name,@PluginAttribute(value = "ignoreExceptions", defaultBoolean = false) Boolean ignoreExceptions,@PluginAttribute(value = "messagesBatchSize") Integer messagesBatchSize){return new CloudwatchAppender(name, layout, null, ignoreExceptions, logGroupName, logStreamName ,queueLength,messagesBatchSize);}
}

我们在pom.xml文件中添加依赖项。

<dependency><groupId>com.amazonaws</groupId><artifactId>aws-java-sdk-logs</artifactId><!-- for local 3.8.5 we need to use this version cloudhub 3.8.5 has jackson 2.6.6 --><!-- <version>1.9.40</version> --><version>1.11.105</version><exclusions><exclusion>  <!-- declare the exclusion here --><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-1.2-api</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-core</artifactId></exclusion><exclusion>  <!-- declare the exclusion here --><groupId>com.fasterxml.jackson.core</groupId><artifactId>jackson-databind</artifactId></exclusion></exclusions></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-api</artifactId><version>2.5</version></dependency><!-- https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core --><dependency><groupId>org.apache.logging.log4j</groupId><artifactId>log4j-core</artifactId><version>2.5</version></dependency>

现在我们需要修改log4j2.xml。 还要添加自定义cloudwatch附加程序和CloudhubLogs附加程序,以便我们也可以获取cloudhub上的日志。

<?xml version="1.0" encoding="utf-8"?>
<Configuration status="trace" packages="au.edu.vu.appenders,com.mulesoft.ch.logging.appender"><!--These are some of the loggers you can enable. There are several more you can find in the documentation. Besides this log4j configuration, you can also use Java VM environment variablesto enable other logs like network (-Djavax.net.debug=ssl or all) and Garbage Collector (-XX:+PrintGC). These will be append to the console, so you will see them in the mule_ee.log file. --><Appenders><CLOUDW name="CloudW" logGroupName="test-log-stream" logStreamName="test44" messagesBatchSize="${sys:cloudwatch.msg.batch.size}" queueLength="${sys:cloudwatch.queue.length}"><PatternLayout pattern="%d [%t] %-5p %c - %m%n"/></CLOUDW><Log4J2CloudhubLogAppender name="CLOUDHUB"addressProvider="com.mulesoft.ch.logging.DefaultAggregatorAddressProvider"applicationContext="com.mulesoft.ch.logging.DefaultApplicationContext"appendRetryIntervalMs="${sys:logging.appendRetryInterval}"appendMaxAttempts="${sys:logging.appendMaxAttempts}"batchSendIntervalMs="${sys:logging.batchSendInterval}"batchMaxRecords="${sys:logging.batchMaxRecords}"memBufferMaxSize="${sys:logging.memBufferMaxSize}"journalMaxWriteBatchSize="${sys:logging.journalMaxBatchSize}"journalMaxFileSize="${sys:logging.journalMaxFileSize}"clientMaxPacketSize="${sys:logging.clientMaxPacketSize}"clientConnectTimeoutMs="${sys:logging.clientConnectTimeout}"clientSocketTimeoutMs="${sys:logging.clientSocketTimeout}"serverAddressPollIntervalMs="${sys:logging.serverAddressPollInterval}"serverHeartbeatSendIntervalMs="${sys:logging.serverHeartbeatSendIntervalMs}"statisticsPrintIntervalMs="${sys:logging.statisticsPrintIntervalMs}"><PatternLayout pattern="[%d{MM-dd HH:mm:ss}] %-5p %c{1} [%t] CUSTOM: %m%n"/></Log4J2CloudhubLogAppender></Appenders><Loggers><!-- Http Logger shows wire traffic on DEBUG --><AsyncLogger name="org.mule.module.http.internal.HttpMessageLogger" level="WARN"/><!-- JDBC Logger shows queries and parameters values on DEBUG --><AsyncLogger name="com.mulesoft.mule.transport.jdbc" level="WARN"/><!-- CXF is used heavily by Mule for web services --><AsyncLogger name="org.apache.cxf" level="WARN"/><!-- Apache Commons tend to make a lot of noise which can clutter the log--><AsyncLogger name="org.apache" level="WARN"/><!-- Reduce startup noise --><AsyncLogger name="org.springframework.beans.factory" level="WARN"/><!-- Mule classes --><AsyncLogger name="org.mule" level="INFO"/><AsyncLogger name="com.mulesoft" level="INFO"/><!-- Reduce DM verbosity --><AsyncLogger name="org.jetel" level="WARN"/><AsyncLogger name="Tracking" level="WARN"/><AsyncRoot level="INFO"><AppenderRef ref="CLOUDHUB" level="INFO"/><AppenderRef ref="CloudW" level="INFO"/></AsyncRoot></Loggers>
</Configuration>

最后,我们需要在cloudhub运行时管理器上禁用cloudhub日志。

这适用于cloudhub mule运行时版本3.8.4。 cloudhub 3.8.5版本存在一些问题,该版本已正确初始化并发送日志,但是缺少事件和消息。

翻译自: https://www.javacodegeeks.com/2017/10/integrate-cloudwatch-logs-cloudhub-mule.html

cloudwatch

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/334815.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

linux 权限 mask,【自学Linux】Linux用户、组、权限(一)

用户、组、权限权限&#xff1a;r,w,x文件:r:可读&#xff0c;可以使用类似cat等命令查看文件内容w:可写&#xff0c;可以编辑或删除此文件x:可执行&#xff0c;eXacutable&#xff0c;可以命令提示符下当中命令提交给内核运行0 000 ---&#xff1a;无权限1 001 --x&#xff1a…

要多大内存才满足_什么是延迟满足能力?“延迟满足”能力对孩子有多重要家长要清楚...

文丨饭饭妈记得去年过春节的时候&#xff0c;家里来了两个亲戚&#xff0c;他们分别有一个小孩&#xff0c;当时大家都在准备年夜饭。其中一份糯米团子准备好放在桌子上之后&#xff0c;亲戚家的两个小孩都非常想要吃&#xff0c;亲戚对孩子说&#xff1a;“你们现在不能吃&…

Linux 命令之 cd 命令-切换目录

文章目录介绍语法格式常用选项参考示例介绍 cd 命令是 change directory 中单词的首字母缩写&#xff0c;其英文释义是改变目录&#xff0c;所以该命令的功能是从当前目录切换到指定目录。 其中目录的路径可分为绝对路径和相对路径。若目录名称省略&#xff0c;则切换至使用者…

Linux 命令之 pgrep -- 用于检索(搜索进程/查找进程)当前正在运行的进程

文章目录命令介绍常用选项参考示例示例 1&#xff0c;查看所有名称为 httpd 的进程示例 2&#xff0c;查看进程号最小的&#xff0c;名称为 httpd 的进程示例 3&#xff0c;查看进程号最大的&#xff0c;名称为 httpd 的进程命令介绍 pgrep 的意思是"进程号全局正则匹配输…

java端到端_Java应用程序性能监控:复杂分布式应用程序的端到端性能

java端到端通过从您的应用程序学习企业APM产品&#xff0c;发现更快&#xff0c;更有效的性能监控。 参加AppDynamics APM导览&#xff01; 在最复杂和分布式环境中端到端监视Java应用程序性能-专注于业务事务。 自动发现的业务交易&#xff0c;动态基准&#xff0c;代码级诊断…

Linux内核链表访问链表头指针,linux内核——链表结构分析

http://blog.csdn.net/tigerjibo/article/details/8299584简单整理(使用linux3.0内核)这里首先学习的是内核中一种抽象定义的双向链表&#xff0c;为了提高其扩展性。内核中链表的描述数据结构位置&#xff1a;Types.h (linux-3.0.12\include\linux) 5444 2011/11/2922…

遗传算法求二元函数极值怎么编码_用遗传算法求复杂函数的极值点

确定遗传在元素个体&#xff0c;遗传得到的个体和变异个体中选取最好的30个个体(对应的函数值最大的30个个体)作为下一次迭代的父样本。from random import randintfrom numpy import sindef decode(g):return [((g&0xfff) - 2048) * 0.001, ((g>>12) - 2048) * 0.00…

Linux 命令之 ulimit 命令-控制shell程序的资源

文章目录介绍常用选项参考示例介绍 用来限制系统用户对 shell 资源的访问。如果不懂什么意思&#xff0c;下面一段内容可以帮助你理解&#xff1a; 假设有这样一种情况&#xff0c;当一台 Linux 主机上同时登陆了 10 个人&#xff0c;在系统资源无限制的情况下&#xff0c;这…

java jvm虚拟机_Java虚拟机(JVM)简介

java jvm虚拟机什么是JVM Java虚拟机&#xff08;JVM&#xff09;是使计算机能够运行Java程序的抽象计算机。 JVM有三个概念&#xff1a; 1.规格 2.实施 3.实例。 该规范是正式描述JVM实现要求的文档。 具有单一规范可确保所有实现都可互操作。 JVM实现是满足JVM规范要求的…

linux qt应用程序全屏,QT在ubuntu下实现界面全屏,侧边栏隐藏,上边栏隐藏【实例】...

最近做一个Qt项目(ubuntu 14.04)&#xff0c;需要将界面全屏&#xff0c;全屏之后&#xff0c;ubuntu侧边栏隐藏&#xff0c;上边栏也隐藏&#xff0c;只显示Qt的界面。那么先介绍几个函数&#xff1a;Qt全屏显示函数&#xff1a;showFullScreen()Qt最大化显示函数&#xff1a;…

copyof java_JDK10——copyOf方法

package com.common.learn;import java.util.ArrayList;import java.util.HashMap;import java.util.HashSet;import java.util.List;import java.util.Map;import java.util.Set;/*** author 30378** JDK10中新增73个新功能API类库* 在java.util.List java.util.Set java.util…

Linux 命令之 w 命令-显示目前登入系统的用户信息

文章目录介绍常用选项参考示例显示目前登入系统的用户信息不打印头信息显示用户从哪登录使用短输出格式介绍 w 命令用于显示已经登陆系统的用户列表&#xff0c;并显示用户正在执行的指令。执行这个命令可得知目前登入系统的用户有那些人&#xff0c;以及他们正在执行的程序。…

linux系统引导分区,揭秘Linux(二)——操作系统引导与硬盘分区

通过前面的介绍想必大家对Linux有了个基础的了解&#xff0c;那么各位肯定该说是不是要装操作系统了&#xff0c;对不起让各位失望了&#xff0c;这次所讲解的是Linux运行原理与硬盘分区&#xff0c;这是重中之重啊&#xff01;请一定要细细品读。为了更好地了解Linux系统的运行…

jax-rs/jersey_JAX-RS 2.1的Jersey客户端依赖性

jax-rs/jersey泽西岛是JAX-RS 2.1的参考实现。 为了在企业容器外部运行具有JSON-P和JSON-B映射的JAX-RS 2.1客户端&#xff0c;需要以下Jersey依赖项。 Jersey客户端2.6版实现了JAX-RS 2.1 API。 以下依赖性将客户端运行时添加到项目中&#xff1a; <dependency><gr…

get占位符传多个参数_mybatis多个参数(不使用@param注解情况下),sql参数占位符正确写法...

useActualParamName配置useActualParamName允许使用方法签名中的名称作为语句参数名称。 为了使用该特性&#xff0c;你的工程必须采用Java 8编译&#xff0c;并且加上-parameters选项。(从3.4.1开始)true | falsetruemybatis的全局配置useActualParamName决定了mapper中参数的…

HTTP协议简介_请求消息/请求数据包/请求报文_响应消息/响应数据包/响应报文

文章目录HTTP 介绍请求数据包/请求消息/请求报文请求数据包解析响应数据包/响应消息/响应报文HTTP 介绍 概念&#xff1a;Hyper Text Transfer Protocol 超文本传输协议 传输协议&#xff1a;定义了客户端和服务器端通信时发送数据的格式 特点: 1.基于TCP/IP的高级协议 2.默认…

Linux的open函数的调用过程,Linux 中open系统调用实现原理

用户空间的函数在内核里面的入口函数是sys_open通过grep open /usr/include/asm/unistd_64.h查找到的#define __NR_open2__SYSCALL(__NR_open, sys_open)观察unistd_64.h&#xff0c;我们可以猜测用户空间open函数最终调用的系统调用号是2来发起的sys_open系统调用(毕竟glibc一…

java登录界面命令_Java命令行界面(第29部分):自己动手

java登录界面命令本系列有关从Java解析命令行参数的系列简要介绍了28个开源库&#xff0c;这些库可用于处理Java代码中的命令行参数。 即使涵盖了这28个库&#xff0c;该系列也没有涵盖用于解析Java命令行选项的所有可用开源库。 例如&#xff0c;本系列文章未涉及docopt &…

log nginx 客户端请求大小_nginx

博主会将与Nginx有关的知识点总结到"nginx短篇系列"文章中&#xff0c;如果你对nginx不是特别了解&#xff0c;请按照顺序阅读"nginx短篇系列"&#xff0c;以便站在前文的基础上理解新的知识点。当我们访问nginx服务时&#xff0c;nginx会记录日志&#xf…

Linux 下如何查询 tomcat 的安装目录

在命令终端输入如下命令&#xff1a; [roothtlwk0001host ~]# sudo find / -name *tomcat*