17、ELK

17、ELK

helm 安装 elkfk(kafka 集群外可访问)

ES/Kibana <— Logstash <— Kafka <— Filebeat

部署顺序:

1、elasticsearch

2、kibana

3、kafka

4、logstash

5、filebeat

kubectl create ns elk

helm3部署elkfk

1、elasticsearch
helm repo add elastic https://helm.elastic.cohelm repo listhelm repo update
helm search repo elastic/elasticsearchcd && helm pull elastic/elasticsearch --untar --version 7.17.3cd elasticsearch
cat > values-prod.yaml << EOF
# 集群名称
clusterName: "elasticsearch"
# ElasticSearch 6.8+ 默认安装了 x-pack 插件,部分功能免费,这里选禁用
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.17.3"
imagePullPolicy: "IfNotPresent"esConfig:elasticsearch.yml: |network.host: 0.0.0.0cluster.name: "elasticsearch"xpack.security.enabled: false
resources:limits:cpu: "2"memory: "4Gi"requests:cpu: "1"memory: "2Gi"
volumeClaimTemplate:storageClassName: "nfs-storage"accessModes: [ "ReadWriteOnce" ]resources:requests:storage: 2Ti
service:type: NodePortport: 9000nodePort: 31311
EOF

禁用 Kibana 安全提示(Elasticsearch built-in security features are not enabled)xpack.security.enabled: false

helm upgrade --install --namespace elk es -f ./values-prod.yaml .

验证

curl 192.168.1.200:31311/_cat/healthcurl 192.168.1.200:31311/_cat/nodes
2、kibana
helm search repo elastic/kibanacd && helm pull elastic/kibana --untar --version 7.17.3cd kibana
cat > values-prod.yaml << 'EOF'
kibanaConfig:kibana.yml: |server.port: 5601server.host: "0.0.0.0"elasticsearch.hosts: [ "http://elasticsearch-master-headless:9200" ]
resources:limits:cpu: "2"memory: "2Gi"requests:cpu: "1"memory: "1Gi"
kibanaConfig:kibana.yml: |i18n.locale: "zh-CN"
service:#type: ClusterIPtype: NodePortloadBalancerIP: ""port: 5601nodePort: "30026"
EOF
helm upgrade --install --namespace elk kibana -f ./values-prod.yaml .
cat > ~/kibana/kibana-Ingress.yml << 'EOF'
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:name: kibana-ingressnamespace: elkannotations:nginx.ingress.kubernetes.io/ssl-redirect: 'true'nginx.ingress.kubernetes.io/proxy-body-size: '4G'nginx.ingress.kubernetes.io/auth-type: basicnginx.ingress.kubernetes.io/auth-secret: kibana-auth-secretnginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - admin'
spec:ingressClassName: nginxrules:- host: kibana.huanghuanhui.cloudhttp:paths:- path: /pathType: Prefixbackend:service:name: kibana-kibanaport:number: 5601tls:- hosts:- kibana.huanghuanhui.cloudsecretName: kibana-ingress-tls
EOF
yum -y install httpd-toolscd ~/kibana && htpasswd -bc auth admin Admin@2024kubectl create secret generic kibana-auth-secret --from-file=auth -n elk
kubectl create secret -n elk \
tls kibana-ingress-tls \
--key=/root/ssl/huanghuanhui.cloud.key \
--cert=/root/ssl/huanghuanhui.cloud.crt
kubectl apply -f ~/kibana/kibana-Ingress.yml 

访问地址:kibana.huanghuanhui.cloud

账号密码:admin、Admin@2024

http://192.168.1.201:30026/app/dev_tools#/consoleGET _cat/nodesGET _cat/healthGET _cat/indices
3、kafka(k8s部署kafka集群 ==》外部访问)
mkdir -p ~/kafka-yml && cd ~/kafka-yml
cat > ~/kafka-yml/zk.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:labels:app: zookeeper-clusternamespace: elkname: zookeeper-cluster
spec:selector:app: zookeeper-clusterports:- name: clientport: 2181targetPort: 2181- name: followerport: 2888targetPort: 2888- name: leaderport: 3888targetPort: 3888clusterIP: None
---
apiVersion: v1
kind: Service
metadata:namespace: elkname: zookeeper-cs
spec:selector:app: zookeeper-clustertype: NodePortports:- name: clientport: 2181nodePort: 30152
---
apiVersion: apps/v1
kind: StatefulSet
metadata:namespace: elkname: crs-zookeeper
spec:replicas: 3podManagementPolicy: ParallelserviceName: zookeeper-clusterselector:matchLabels:app: zookeeper-clustertemplate:metadata:labels:component: zookeeper-clusterapp: zookeeper-clusterspec:containers:- name: zookeeperimage: bitnami/zookeeper:3.8.2imagePullPolicy: IfNotPresentsecurityContext:runAsUser: 0ports:- containerPort: 2181- containerPort: 2888- containerPort: 3888lifecycle:postStart:exec:command:- "sh"- "-c"- >echo $(( $(cat /etc/hosts | grep zookeeper | awk '{print($3)}' | awk '{split($0,array,"-")} END{print array[3]}') + 1 )) > /bitnami/zookeeper/data/myidenv:- name: ALLOW_ANONYMOUS_LOGINvalue: "yes"- name: ZOO_SERVERSvalue:  crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2888:3888volumeMounts:- name: zoodata-outermountPath: /bitnami/zookeepervolumeClaimTemplates:- metadata:name: zoodata-outerspec:storageClassName: nfs-storageaccessModes:- "ReadWriteOnce"resources:requests:storage: 2Ti
EOF
kubectl apply -f ~/kafka-yml/zk.yml
cat > ~/kafka-yml/kafka.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:namespace: elkname: kafka-headless
spec:selector:app: kafka-clusterports:- name: clientport: 9092targetPort: 9092clusterIP: None
---
apiVersion: v1
kind: Service
metadata:name: kafka-0namespace: elklabels:app: kafka-cluster
spec:ports:- port: 9092targetPort: 9092nodePort: 30127name: servertype: NodePortselector:statefulset.kubernetes.io/pod-name: crs-kafka-0
#    app: kafka-cluster---
apiVersion: v1
kind: Service
metadata:name: kafka-1namespace: elklabels:app: kafka-cluster
spec:ports:- port: 9092targetPort: 9092nodePort: 30128name: servertype: NodePortselector:statefulset.kubernetes.io/pod-name: crs-kafka-1---
apiVersion: v1
kind: Service
metadata:name: kafka-2namespace: elklabels:app: kafka-cluster
spec:ports:- port: 9092targetPort: 9092nodePort: 30129name: servertype: NodePortselector:statefulset.kubernetes.io/pod-name: crs-kafka-2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:namespace: elkname: crs-kafka
spec:replicas: 3podManagementPolicy: ParallelserviceName: kafka-clusterselector:matchLabels:app: kafka-clustertemplate:metadata:labels:app: kafka-clusterspec:hostname: kafkacontainers:- name: kafkacommand:- bash- -ec- |HOSTNAME=`hostname -s`if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; thenORD=${BASH_REMATCH[2]}PORT=$((ORD + 30127))export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.1.200:$PORT"elseecho "Failed to get index from hostname $HOST"exit 1fiexec /entrypoint.sh /run.shimage: bitnami/kafka:3.5.1#        image: bitnami/kafka:latestimagePullPolicy: IfNotPresentsecurityContext:runAsUser: 0
#          resources:
#            requests:
#              memory: "1G"
#              cpu: "0.5"ports:- containerPort: 9092env:- name: KAFKA_CFG_ZOOKEEPER_CONNECTvalue: crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2181#          value: zookeeper-cluster:2181- name: ALLOW_PLAINTEXT_LISTENERvalue: "yes"volumeMounts:- name: kafkadata-outermountPath: /bitnami/kafkavolumeClaimTemplates:- metadata:name: kafkadata-outerspec:storageClassName: nfs-storageaccessModes:- "ReadWriteOnce"resources:requests:storage: 2Ti
EOF
kubectl apply -f ~/kafka-yml/kafka.yml

注意修改yml文件98行里面的export的ip地址

这里修改为公网的ip:58.34.61.154(内网192.168.1.200)

kafka ui

docker pull provectuslabs/kafka-ui:latestdocker pull freakchicken/kafka-ui-lite
docker run -d \
--name kafka-ui1 \
--restart always \
--privileged=true \
-p 8888:8080 \
-e KAFKA_CLUSTERS_0_NAME=k8s-kafka \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129 \
provectuslabs/kafka-ui:latest

访问地址:192.168.1.200:8888

docker run -d \
--name kafka-ui2 \
--restart always \
--privileged=true \
-p 8889:8889 \
freakchicken/kafka-ui-lite

访问地址:192.168.1.200:8889

4、filebeat

1、k8s方式

helm search repo elastic/filebeatcd && helm pull elastic/filebeat --untar --version 7.17.3cd filebeat
cat > values-prod.yaml << 'EOF'
daemonset:filebeatConfig:filebeat.yml: |filebeat.inputs:- type: containerpaths:- /var/log/containers/*.logoutput.elasticsearch:enabled: falsehost: '${NODE_NAME}'hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'output.kafka:enabled: truehosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]topic: k8s-logs
EOF
helm upgrade --install --namespace elk filebeat -f ./values-prod.yaml .

2、docker方式

cat > filebeat.yml << 'EOF'
# 日志输入配置(可配置多个)
filebeat.inputs:
- type: logenabled: truepaths:- /mnt/nfs/logs/*/*.logtags: ["dev-c"]fields:server: dev-cfields_under_root: true
#日志输出配置
output.kafka:enabled: truehosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]topic: "dev-c"partition.round_robin:reachable_only: falserequired_acks: 1compression: gzipmax_message_bytes: 1000000
EOF
docker run -d --name filebeat \
--user=root \
--restart=always \
-v /mnt/nfs/logs/:/mnt/nfs/logs/ \
-v /root/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
elastic/filebeat:7.17.3 \
5、logstash
helm search repo elastic/logstashcd && helm pull elastic/logstash --untar --version 7.17.3cd logstash
cat > values-prod.yaml << 'EOF'
logstashConfig:logstash.yml: |xpack.monitoring.enabled: falselogstashPipeline: logstash.yml: |input {kafka {bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"topics => ["k8s-logs"]#group_id => "mygroup"#如果使用元数据就不能使用下面的byte字节序列化,否则会报错#key_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"#value_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"consumer_threads => 1#默认为false,只有为true的时候才会获取到元数据decorate_events => trueauto_offset_reset => "earliest"}}filter {mutate {#从kafka的key中获取数据并按照逗号切割split => ["[@metadata][kafka][key]", ","]add_field => {#将切割后的第一位数据放入自定义的“index”字段中"index" => "%{[@metadata][kafka][key][0]}"}}}output { elasticsearch {pool_max => 1000pool_max_per_route => 200hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]index => "k8s-logs-%{+YYYY.MM.dd}"}}# 资源限制
resources:requests:cpu: "100m"memory: "256Mi"limits:cpu: "1000m"memory: "1Gi"persistence:enabled: truevolumeClaimTemplate:accessModes: ["ReadWriteOnce"]storageClassName: nfs-storageresources:requests:storage: 2Ti
EOF
helm upgrade --install --namespace elk logstash -f ./values-prod.yaml .

手撕yml

mkdir -p ~/logstash-yml && cd ~/logstash-yml
cat > logstash.yaml << 'EOF'
apiVersion: v1
kind: ConfigMap
metadata:name: logstash-dev-configmapnamespace: elk
data:logstash.yml: |http.host: "0.0.0.0"path.config: /usr/share/logstash/pipelinelogstash.conf: |input {kafka {bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"topics => ["dev"]codec => "json"type => "dev"group_id => "dev"consumer_threads => 1}}filter {if [type] == "dev" {json {source => ["message"]remove_field => ["offset","host","beat","@version","event","agent","ecs"]}mutate {add_field => {project_path => "%{[log][file][path]}"}}mutate {split => ["project_path", "/"]add_field => {"project_name" => "%{[project_path][-3]}"}}date {match => ["time","yyyy-MM-dd HH:mm:ss.SSS"]timezone => "Asia/Shanghai"target => "@timestamp"}mutate {remove_field => ["log","project_path","time","input"]}}}output {elasticsearch {hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]index => "dev-%{+YYYY.MM.dd}"}}
---
apiVersion: apps/v1
kind: Deployment
metadata:name: logstash-devnamespace: elk
spec:selector:matchLabels:app: logstash-devreplicas: 1template:metadata:labels:app: logstash-devspec:containers:- name: logstash-devimage: docker.elastic.co/logstash/logstash:7.17.3ports:- containerPort: 5044volumeMounts:- name: logstash-pipeline-volumemountPath: /usr/share/logstash/pipeline- mountPath: /etc/localtimename: localtimevolumes:- name: logstash-pipeline-volumeconfigMap:name: logstash-dev-configmapitems:- key: logstash.confpath: logstash.conf- hostPath:path: /etc/localtimename: localtime
---
kind: Service
apiVersion: v1
metadata:name: logstash-devnamespace: elk
spec:selector:app: logstashtype: ClusterIPports:- protocol: TCPport: 5044targetPort: 5044
EOF
kubectl apply -f logstash.yaml

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/667824.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

基础面试题篇2

前言 前两天又比较忙&#xff0c;放假前的赶工。今天已经到家啦&#xff0c;咱们继续分享一下常用的基础知识。 基础面试题篇2 BIO AIO NIO有何区别&#xff1f; BIO&#xff1a;同步阻塞式 IO&#xff0c;就是我们平常使用的传统 IO&#xff0c;它的特点是模式简单使用方便…

使用PySpark处理DataFrame以拆分数组列

问题&#xff1a;用pyspark 处理df1,df1 有三列&#xff0c;第一列是商品pid,第二列是商品name,第三列是候选标品cid_list(有多个cid),将df1中的cid_list拆开,转换成一个商品id和name对应一个cid&#xff0c;但是有多行 from pyspark.sql.functions import explode, col# 假设…

神经网络 | 基于 CNN 模型实现土壤湿度预测

Hi&#xff0c;大家好&#xff0c;我是半亩花海。在现代农业和环境监测中&#xff0c;了解土壤湿度的变化对于作物生长和水资源管理至关重要。通过深度学习技术&#xff0c;特别是卷积神经网络&#xff0c;我们可以利用过去的土壤湿度数据来预测未来的湿度趋势。本文将使用 Pad…

深入了解关联查询和子查询

推荐阅读 给软件行业带来了春天——揭秘Spring究竟是何方神圣&#xff08;一&#xff09; 给软件行业带来了春天——揭秘Spring究竟是何方神圣&#xff08;二&#xff09; 文章目录 推荐阅读关联查询子查询 关联查询 关联查询 从多张表中查询对应记录的信息&#xff0c;关联查…

字节、十六进制、二进制之间的关系

字节、十六进制和二进制是计算机领域中常用的术语&#xff0c;它们之间有着密切的关系。在这篇文章中&#xff0c;我们将探讨字节、十六进制和二进制之间的关系&#xff0c;并提供一些例子来说明它们的应用。 首先&#xff0c;让我们了解一下字节。字节是计算机存储和传输数据…

组合数学基础

隔板法 X 1 X 2 . . . X n m , X i > 0 X_1X_2...X_nm,\quad X_i>0 X1​X2​...Xn​m,Xi​>0 求方程解的个数 求方程解的个数 求方程解的个数 m 个球插入 n − 1 个板将 m 个球分成 n 份 m个球插入n-1个板将m个球分成n份 m个球插入n−1个板将m个球分成n份 方程…

Ubuntu下的磁盘管理,分区管理,挂载和卸载分区

探索Ubuntu下的磁盘管理 在Ubuntu操作系统中&#xff0c;磁盘管理是系统维护中至关重要的一部分。它涉及到分区、格式化、挂载、监视以及维护磁盘等操作。本文将带您深入了解Ubuntu下的磁盘管理&#xff0c;并介绍一些常用的工具和技术。 1. 磁盘基础知识 在开始磁盘管理之前…

Acwing---3302. 表达式求值

表达式求值 1.题目2.基本思想3.代码实现 1.题目 给定一个表达式&#xff0c;其中运算符仅包含 ,-,*,/&#xff08;加 减 乘 整除&#xff09;&#xff0c;可能包含括号&#xff0c;请你求出表达式的最终值。 注意&#xff1a; 数据保证给定的表达式合法。题目保证符号 - 只作…

服务器和云服务器哪个更安全?

随着云计算技术的不断发展&#xff0c;越来越多的企业开始选择使用云服务器来存储和处理数据。然而&#xff0c;对于一些企业来说&#xff0c;他们可能更倾向于使用传统的服务器。在这种情况下&#xff0c;安全性成为了一个重要的考虑因素。那么&#xff0c;服务器和云服务器哪…

mac下载工具:JDownloader 2 for Mac 中文版

JDownloader是一款开源的下载管理工具&#xff0c;主要使用Java编程语言开发&#xff0c;因此它能够在支持Java的操作系统上运行&#xff0c;包括Windows、Linux和Mac OS。这款软件专门为那些需要通过网盘下载文件的用户提供便利&#xff0c;它支持众多流行的网盘服务&#xff…

11、SystemInit函数解读

1、系统时钟初始化函数&#xff1a;SystemInit(); 使用库函数的时候&#xff0c;在系统启动之后会自动调用 2、首先如果使用外部时钟源HSE&#xff0c;要配置外部晶振频率&#xff1a;stm32f4xx.h 3、初始化之前首先通过宏定义定义下面变量来定义系统时钟频率&#xff1a; …

python将Excel文档转成.db数据库文件

python实现Excel转.db数据库 1.程序实现 程序实现以下功能&#xff1a; 1.读取一个Excel文件,文件名通过函数传参数传入 2.将文件读取的内容保存到一个数据库文件中 3.数据库的文件名以传入的Excel文件的文件名命名 4.将excel文件的工作簿的名字作为数据库的表单名 5.将Excel…

idea修改项目git地址

大家好&#xff0c;今天给大家分享的知识是如何在idea中修改项目的git地址。 一、修改地址 首先我们先找到菜单栏中Git选项&#xff0c;然后点击管理远程&#xff08;Manage Remote&#xff09; 之后双击origin之后就可以定义名称或者URL了。

电路设计(10)——超温报警电路的proteus仿真

1.题目背景 在现实生活中&#xff0c;常有一种工程技术&#xff0c;即带有自动温度补偿的设备&#xff0c;能在规定温度内正常工作。但是为了设备安全&#xff0c;需设定工作的上限温度&#xff0c;万一温控补偿失效&#xff0c;设备温度一旦超出上限温度时&#xff0c;便立即切…

前端excel带样式导出 exceljs 插件的使用

案例 <!DOCTYPE html> <html><head><meta charset"utf-8" /><meta name"viewport" content"widthdevice-width, initial-scale1"><title>exceljs 使用</title></head><body><button …

ReactNative实现宽度变化实现的动画效果

效果如上图所示&#xff0c;通过修改设备宽度实现动画效果 import React, {useRef, useEffect, useState} from react; import {Animated, Text, View, Image} from react-native;const FadeInView props > {const fadeAnim useRef(new Animated.Value(0)).current;React…

PyTorch、NCNN、Numpy三者张量的shape

目录 一、PyTorch二、NCNN三、Numpy 一、PyTorch 在 PyTorch 中&#xff0c;张量&#xff08;Tensor&#xff09;的形状通常按照 (N, C, H, W) 的顺序排列&#xff0c;其中&#xff1a; N 是批量大小&#xff08;batch size&#xff09; C 是通道数&#xff08;channel number…

【Node系列】连接数据库

文章目录 一、连接MySql二、连接MongoDB三、相关链接 一、连接MySql 首先&#xff0c;您需要安装mysql模块。在命令行中&#xff0c;导航到您的项目目录并输入以下命令&#xff1a; npm install mysql然后&#xff0c;您可以在Node.js代码中使用mysql模块来连接MySQL数据库、…

在vs code的terminal,debug执行python main.py --train True

GPT4告诉我&#xff1a; 在VS Code中以debug状态执行带有参数&#xff08;如--train&#xff09;的main.py文件&#xff0c;你需要在launch.json配置文件中正确设置参数。以下是详细步骤&#xff1a; 打开你的main.py文件&#xff1a;确保你的main.py文件已经在VS Code中打开…

鸿蒙 状态管理-应用存储

前提&#xff1a;基于官网3.1/4.0文档。参考官网文档 基于Android开发体系来进行比较和思考。&#xff08;或有偏颇&#xff0c;自行斟酌&#xff09; 1.概念 装饰器&#xff08;State、Prop等&#xff09;是用于组件的状态修饰符&#xff0c;本篇讲的是更上一层级别&#xff…