percona-xtrabackup备份

#!/bin/bash
# 需要安装 percona-xtrabackup
# xtrabackup: https://www.percona.com/downloads/Percona-XtraBackup-2.4/LATEST/
# xtrabackup 版本:2.4.24  (RPM安装)
# MySQL 版本: 5.7.36 (RPM安装)
# version: 22.01.17# 备份服务器 ip
DB_BACKUP_SERVER="localhost"
DB_BACKUP_SERVER_PORT="16036"
# username
DB_BACKUP_USER="backup_user"
# password
DB_BACKUP_USER_PASSWD="2iBa#I9Cc8GH80ky"
# xtrabackup 登录变量
AUTH="--host=${DB_BACKUP_SERVER} --user=${DB_BACKUP_USER} --password=${DB_BACKUP_USER_PASSWD} --port=${DB_BACKUP_SERVER_PORT}"# MySQL数据目录
DB_DATA_PATH="/data/mysql57"
# 备份目录, 自动备份目录格式: 年/月/日
BD_BACKUP_PATH="/data/mysqlbackup"
# 手动备份目录
DB_MANUAL_BACKUP_PATH="/data/mysqlbackup/manual"
# 手动备份归档目录
DB_ARCHIVE_PATH="${DB_MANUAL_BACKUP_PATH}/archive"
# 配置文件
DB_CONFIG_FILE="/etc/my.cnf"
# backup time
DB_BACKUP_TIME=`date +%Y%m%d_%H%M`# 备份日志
DB_BACKUP_LOG_PATH="${BD_BACKUP_PATH}/logs"
DB_BACKUP_LOG_FILE="${DB_BACKUP_LOG_PATH}/mysql-backup-$(date +%Y%m).log"# xtrabackup 日志
XTRABACKUP_LOG_PATH="${DB_BACKUP_LOG_PATH}/xtrabackup"
XTRABACKUP_LOG_FILE="${XTRABACKUP_LOG_PATH}/xtrabackup-$(date +%Y%m%d%H).log"[ -d ${DB_BACKUP_LOG_PATH} ] || mkdir -p ${DB_BACKUP_LOG_PATH}
[ -d ${XTRABACKUP_LOG_PATH} ] || mkdir -p ${XTRABACKUP_LOG_PATH}
[ -d ${DB_MANUAL_BACKUP_PATH} ] || mkdir -p ${DB_MANUAL_BACKUP_PATH}
[ -d ${DB_ARCHIVE_PATH} ] || mkdir -p ${DB_ARCHIVE_PATH}checkXtrabackupStatus() {# 判断备份情况status=`tail -n 20 ${XTRABACKUP_LOG_FILE} |grep -v 'prints "completed OK!' |grep "completed OK" |wc -l`if [ ${status} -ge 1 ];thenecho -e "SUCCESS: status: successful, xtrabackup status: completed OK." >> ${DB_BACKUP_LOG_FILE}SUCCESS=`tail -n 5 ${XTRABACKUP_LOG_FILE} |grep -v "IMPORTANT"`echo -e "SUCCESS INFO: \n${SUCCESS}" >> ${DB_BACKUP_LOG_FILE}elseecho -e "ERROR: status: failed." >> ${DB_BACKUP_LOG_FILE}ERROR=`tail -n 5 ${XTRABACKUP_LOG_FILE} |grep -v "IMPORTANT"`echo -e "ERROR INFO: \n${ERROR}" >> ${DB_BACKUP_LOG_FILE}fi# 打包xtrabackup日志cd ${XTRABACKUP_LOG_PATH}tar zcf `ls |grep "xtrabackup-$(date +%Y%m%d%H).log" |grep -v ".tar.gz"`.tar.gz `ls |grep "xtrabackup-$(date +%Y%m%d%H).log" |grep -v ".tar.gz"` --remove-filesecho -e "XTRABACKUP_LOG_FILE: $(ls ${XTRABACKUP_LOG_FILE}.tar.gz)" >> ${DB_BACKUP_LOG_FILE}
}# 手动全量备份:不指定数据库即全量备
fullBackup() {echo -e "################### Manual Full Backups ###################" >> ${DB_BACKUP_LOG_FILE}echo -e "Backup Time: $(date "+%Y/%m/%d %H:%M:%S")" >> ${DB_BACKUP_LOG_FILE}if [ -d ${DB_MANUAL_BACKUP_PATH}/data ];thencd ${DB_MANUAL_BACKUP_PATH}mkdir -p data_${DB_BACKUP_TIME}mv data data_${DB_BACKUP_TIME}INC_DIR=`ls ${DB_MANUAL_BACKUP_PATH} |grep -v "archive"| tail -1`if  [[ "${INC_DIR}" =~ "inc_" ]];thenmv inc_* data_${DB_BACKUP_TIME}fitar zcf ${DB_ARCHIVE_PATH}/data_${DB_BACKUP_TIME}.tar.gz data_${DB_BACKUP_TIME} --remove-filesfiecho -e "full backup: ${DB_MANUAL_BACKUP_PATH}/data" >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync ${DB_MANUAL_BACKUP_PATH} >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${DB_MANUAL_BACKUP_PATH}mv -v `ls |grep "20" |grep -v "archive"` data >> ${DB_BACKUP_LOG_FILE}# 判断Xtrabackup执行状态checkXtrabackupStatuscd ${DB_MANUAL_BACKUP_PATH}/archive# 保留3份手动备份RE=3FN=`ls *.tar.gz |wc -l`[ ${FN} -le ${RE} ] || for f in `ls -crt`;do rm -rvf `ls ${f} |awk -F".tar.gz" '{print$1}'`.tar.gz >> ${DB_BACKUP_LOG_FILE}; let FN=(${FN}-1); if [ ${FN} -eq ${RE} ];then break; fi; done;
}# 手动增量备份
incrementalBackup() {echo -e "################### Manual Incremental Backups ###################" >> ${DB_BACKUP_LOG_FILE}echo -e "BackupTime: $(date "+%Y/%m/%d %H:%M:%S")" >> ${DB_BACKUP_LOG_FILE}[ -d ${DB_MANUAL_BACKUP_PATH} ] || mkdir -p ${DB_MANUAL_BACKUP_PATH}END_DIR=`ls ${DB_MANUAL_BACKUP_PATH} |grep -v "archive"| tail -1`# 如果数据目录中只有data,那么就针对data目录进行增量备份if [ "${END_DIR}" == "data" ];thenecho -e "incremental backup: ${DB_MANUAL_BACKUP_PATH}/inc_1" >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync --incremental ${DB_MANUAL_BACKUP_PATH} --incremental-basedir=${DB_MANUAL_BACKUP_PATH}/data >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${DB_MANUAL_BACKUP_PATH}mv -v `ls |grep $(date +%Y-%m-%d)` inc_1 >> ${DB_BACKUP_LOG_FILE}#如果数据目录中已经含有inc的目录,那么根据inc数字最大的目录进行增量备份elif  [[ "${END_DIR}" =~ "inc" ]];thenNUM=`echo ${END_DIR} | grep -o [0-9]*`let NUM_ADD=(NUM + 1)echo -e "incremental backup: ${DB_MANUAL_BACKUP_PATH}/inc_${NUM_ADD}" >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync --incremental ${DB_MANUAL_BACKUP_PATH} --incremental-basedir=${DB_MANUAL_BACKUP_PATH}/inc_${NUM} >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${DB_MANUAL_BACKUP_PATH}mv -v `ls |grep $(date +%Y-%m-%d)` inc_${NUM_ADD} >> ${DB_BACKUP_LOG_FILE}fi# 判断Xtrabackup执行状态checkXtrabackupStatus
}# 手动恢复数据
manualRecovery() {echo -e "################### Manual Recovery ###################" >> ${DB_BACKUP_LOG_FILE}echo -e "RecoveryTime: $(date "+%Y/%m/%d %H:%M:%S")" >> ${DB_BACKUP_LOG_FILE}[ ! -d ${DB_DATA_PATH}/data ] || [ "`ls ${DB_DATA_PATH}/data`" == "" ] || { echo "ERROR: MySQL data directory is not empty: ${DB_DATA_PATH}/data" >> ${DB_BACKUP_LOG_FILE};exit 2; }INC_NUM=`ls -d ${DB_MANUAL_BACKUP_PATH}/inc_* 2> /dev/null | wc -l`# 如果没有增量数据直接还原if [ "${INC_NUM}" -eq 0 ];thenecho -e "prepare full backup: ${DB_MANUAL_BACKUP_PATH}/data" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log ${DB_MANUAL_BACKUP_PATH}/data >> ${XTRABACKUP_LOG_FILE} 2>&1elif [ "${INC_NUM}" -ge 1 ];then# 有增量数据的时候,根据增量数据的文件夹个数,循环恢复echo -e "prepare full backup: ${DB_MANUAL_BACKUP_PATH}/data" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log --redo-only ${DB_MANUAL_BACKUP_PATH}/data >> ${XTRABACKUP_LOG_FILE} 2>&1for i in `seq 1 ${INC_NUM}`;doif [ $i -eq ${INC_NUM} ];thenecho -e "prepare incremental backup: ${DB_MANUAL_BACKUP_PATH}/inc_${i}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log ${DB_MANUAL_BACKUP_PATH}/data --incremental-dir=${DB_MANUAL_BACKUP_PATH}/inc_${i} >> ${XTRABACKUP_LOG_FILE} 2>&1breakfiecho -e "prepare incremental backup: ${DB_MANUAL_BACKUP_PATH}/inc_${i}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log --redo-only ${DB_MANUAL_BACKUP_PATH}/data --incremental-dir=${DB_MANUAL_BACKUP_PATH}/inc_${i} >> ${XTRABACKUP_LOG_FILE} 2>&1donefi#  开始恢复数据echo -e "recovery backup: ${DB_MANUAL_BACKUP_PATH}/data" >> ${DB_BACKUP_LOG_FILE}innobackupex --copy-back ${DB_MANUAL_BACKUP_PATH}/data >> ${XTRABACKUP_LOG_FILE} 2>&1chown -R mysql:mysql ${DB_DATA_PATH}/data# 判断Xtrabackup执行状态checkXtrabackupStatus
}# 自动备份数据: 周一全量备份,周二~周日增量备份
# 备份目录格式:${BD_BACKUP_PATH}/YEAR/MONTH/DAY
# ${BD_BACKUP_PATH}/2022/01/05
autoBackup() {echo -e "################### Automatic Backup ###################" >> ${DB_BACKUP_LOG_FILE}echo -e "Backup Time: $(date "+%Y/%m/%d %H:%M:%S")" >> ${DB_BACKUP_LOG_FILE}YEAR=`date +%Y`MONTH=`date +%m`DAY=`date +%d`DAY_OF_WEEK=`date -d ${YEAR}${MONTH}${DAY} +%u`YESTERDAY=`date +%Y/%m/%d --date="-1 day"`FULL_DIR="${BD_BACKUP_PATH}/${YEAR}/${MONTH}"mkdir -p ${FULL_DIR}# 周一全量备份if [ ${DAY_OF_WEEK} -eq 1 ];thenecho -e "full backup: ${FULL_DIR}/${DAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync ${FULL_DIR} >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${FULL_DIR}mv -v `ls ${FULL_DIR} |grep $(date "+%Y-%m-%d_%H")` ${DAY} >> ${DB_BACKUP_LOG_FILE}elif [ ! -d ${BD_BACKUP_PATH}/${YESTERDAY} ];then# 如果前一天备份不存在,返回错误或者进行全量备份echo -e "ERROR: Not exist yesterday backup data: ${BD_BACKUP_PATH}/${YESTERDAY}" >> ${DB_BACKUP_LOG_FILE}echo -e "now full backup: ${FULL_DIR}/${DAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync ${FULL_DIR} >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${FULL_DIR}mv -v `ls ${FULL_DIR} |grep $(date "+%Y-%m-%d_%H")` ${DAY} >> ${DB_BACKUP_LOG_FILE}else# 周二~周日增量备份echo -e "incremental backup: ${FULL_DIR}/${DAY} " >> ${DB_BACKUP_LOG_FILE}innobackupex --defaults-file=${DB_CONFIG_FILE} ${AUTH} --rsync --incremental ${FULL_DIR} --incremental-basedir=${BD_BACKUP_PATH}/${YESTERDAY} >> ${XTRABACKUP_LOG_FILE} 2>&1cd ${FULL_DIR}mv -v `ls ${FULL_DIR} |grep $(date "+%Y-%m-%d_%H")` ${DAY} >> ${DB_BACKUP_LOG_FILE}fi# 判断Xtrabackup执行状态checkXtrabackupStatus# 保留最近2份全量备份的数据cd ${BD_BACKUP_PATH}# 获取上周一的前一天日期let GET_LAST_WEEK=(${DAY_OF_WEEK} + 7)for x in `seq 0 6`;dolet REMOVE_DAY=(${GET_LAST_WEEK} + x)LAST_WEEK=`date -d ${YEAR}${MONTH}${DAY}" -${REMOVE_DAY} days" +%Y/%m/%d`[ ! -d ${BD_BACKUP_PATH}/${LAST_WEEK} ] || { echo "REMOVE: ${LAST_WEEK}" >> ${DB_BACKUP_LOG_FILE}; rm -rf ${LAST_WEEK}; }done
}# 自动恢复备份
autoRecovery() {echo -e "################### Automatic Recovery ###################" >> ${DB_BACKUP_LOG_FILE}echo -e "Recovery Time: $(date "+%Y/%m/%d %H:%M:%S")" >> ${DB_BACKUP_LOG_FILE}# 恢复数据库DATA目录如果非空,报错退出[ ! -d ${DB_DATA_PATH}/data ] || [ "`ls ${DB_DATA_PATH}/data`" == "" ] || { echo "ERROR: MySQL data directory is not empty: ${DB_DATA_PATH}/data" >> ${DB_BACKUP_LOG_FILE};exit 2; }YEAR=${2}MONTH=$3DAY=$4DAY_OF_WEEK=`date -d ${YEAR}${MONTH}${DAY} +%u`# 如果日期的目录不存在,报错退出[ -d ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY} ] || { echo "ERROR: Wrong input date: ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY}" >> ${DB_BACKUP_LOG_FILE};exit 3; }# 如果日期目录为空,报错退出[ "`ls ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY}`" == "" ] && { echo "ERROR: Dir no data: ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY}" >> ${DB_BACKUP_LOG_FILE};exit 4; }let F_DAY=(${DAY_OF_WEEK} - 1)MONDAY=`date -d ${YEAR}${MONTH}${DAY}" -${F_DAY} days" +%Y/%m/%d `# 如果为星期一(1), 则恢复全量备份if [ ${DAY_OF_WEEK} -eq 1 ];thenecho -e "prepare full backup: ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log ${BD_BACKUP_PATH}/${YEAR}/${MONTH}/${DAY} >> ${XTRABACKUP_LOG_FILE} 2>&1# 判断Xtrabackup执行状态checkXtrabackupStatuselif [ ${DAY_OF_WEEK} -ge 2 ];then# 如果非星期一, 则使用全量+增量进行恢复# 准备全量备份目录echo -e "prepare full backup: ${BD_BACKUP_PATH}/${MONDAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log --redo-only ${BD_BACKUP_PATH}/${MONDAY} >> ${XTRABACKUP_LOG_FILE} 2>&1# 准备增量备份目录for n in `seq 1 ${F_DAY}`;dolet N_DAY=(${n}-1)INC_DAY=`date -d ${YEAR}${MONTH}${DAY}" -${N_DAY} days" +%Y/%m/%d`if [ $n -eq 0 ];thenecho -e "prepare incremental backup: ${BD_BACKUP_PATH}/${INC_DAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log ${BD_BACKUP_PATH}/${MONDAY} --incremental-dir=${BD_BACKUP_PATH}/${INC_DAY} >> ${XTRABACKUP_LOG_FILE} 2>&1breakfiecho -e "prepare incremental backup: ${BD_BACKUP_PATH}/${INC_DAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --apply-log --redo-only ${BD_BACKUP_PATH}/${MONDAY} --incremental-dir=${BD_BACKUP_PATH}/${INC_DAY} >> ${XTRABACKUP_LOG_FILE} 2>&1donefi# 开始恢复数据echo -e "recovery backup: ${BD_BACKUP_PATH}/${MONDAY}" >> ${DB_BACKUP_LOG_FILE}innobackupex --copy-back ${BD_BACKUP_PATH}/${MONDAY} >> ${XTRABACKUP_LOG_FILE} 2>&1chown -R mysql:mysql ${DB_DATA_PATH}# 判断Xtrabackup执行状态checkXtrabackupStatus
}# 备份列表
backupList() {echo -e "################### List Of Available Backups ###################"echo -e "最近2份全量备份" echo -e "------------------------------------------" for i in `ls ${BD_BACKUP_PATH} |grep "20"`;do for x in `ls ${BD_BACKUP_PATH}/$i`;dofor z in `ls  ${BD_BACKUP_PATH}/$i/$x`;do BACKUP_TYPE=`cat  ${BD_BACKUP_PATH}/$i/$x/$z/xtrabackup_checkpoints |grep backup_type |awk -F" = " '{print$2}'`echo -e "$i $x $z (${BACKUP_TYPE})"donedonedoneecho -e "------------------------------------------" 
}
# 帮助信息
help() {echo -e "\
MySQL 自动备份恢复 (使用 xtrabackup - innobackupex);
注意: xtrabackup/innobackupex 只能备份InnoDB和XtraDB两种存储引擎的表, 不支持备份MyISAM数据表;用法: $0 [-f/-i/-m/-a/-r/-h]-f : 手动全量备份(保留最近3份备份的数据); -i : 手动增量备份(必须进行全量备份后才能进行增量备份); -m : 手动恢复备份(恢复全量和增量备份数据); -l : 查看可用于恢复的备份(自动备份); -a : 自动备份数据(按照: 年/月/日 进行备份, 周一为全量, 周二~周日为增量), 保留最近2份全量备份(一份全量包含当周N天增量); -r : 使用autobackup备份的数据进行还原, 用法: $0 年 月 日 , 恢复至2020.01.07日: $0 2022 01 07;-h : 查看帮助;
"
}case $1 in-f)fullBackupecho "--------------------------------------------------------------------------------------------------" >> ${DB_BACKUP_LOG_FILE};;-i)incrementalBackupecho "--------------------------------------------------------------------------------------------------" >> ${DB_BACKUP_LOG_FILE};;-m)manualRecoveryecho "--------------------------------------------------------------------------------------------------" >> ${DB_BACKUP_LOG_FILE};;-a)autoBackupecho "--------------------------------------------------------------------------------------------------" >> ${DB_BACKUP_LOG_FILE};;-l)backupList;;-r)autoRecovery $1 $2 $3 $4echo "--------------------------------------------------------------------------------------------------" >> ${DB_BACKUP_LOG_FILE};;-h)help;;"" )help;;
esac

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/535299.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

k8---proxy

kube-proxy 通过 Informer知道了Service、endpoints对象的创建,然后把service身上的CLUSTER-IP 和端口已经端点信息拿出来,创建iptable NAT规则做转发或通过ipvs模块创建VS服务器,这样经过CLUSTER-IP的流量都被转发到后端pod。 iptables模式 …

k8s--configmap

当卷中使用的ConfigMap被更新时,所投射的键最终也会被更新。kubelet组件会在每次周期性同步时检查所挂载的ConfigMap是否为最新。 不过,kubelet使用的是其本地的高速缓存来获得ConfigMap的当前值。 ConfigMap既可以通过watch操作实现内容传播(默认形式&…

etcd命令

[rootlocalhost calico]# etcdctl get /coreos.com/network/subnets/4.0.32.0-24 {"PublicIP":"10.8.65.53"} 从etcd中查询出4.0.32.0/24的子网的宿主机host的ip10.8.65.53。

docker搭建ldap

1.下载镜像 docker pull osixia/openldap docker pull docker.io/osixia/phpldapadmin 2.运行镜像 docker run -dit --name ldap23 --restartalways -p 389:389 -p 636:636 -e LDAP_ORGANISATION”test” -e LDAP_DOMAIN”test.com” -e LDAP_ADMIN_PASSWORD123456″ osixi…

centos 缩减逻辑卷

在使用期间磁盘空间不足,发现/dev/mapper/centos-home下还有很多空间,如果想要将home下面的磁盘空间分配到root磁盘下面,可以使用以下方法 查看磁盘空间,每个人的磁盘名称可能不同 df -h 1 备份home文件 tar cvf /tmp/home.tar /…

zabbix自动发现监控脚本

自动发现端口列表脚本 # -*- coding: utf-8 -*- import os import json data{} tcp_list[] port_list[] commandos.popen("ss -4tln | awk -F [ :] NR>2{print $5}") for port in command:port_list.append(port.strip()) for port in port_list:port_dict{}por…

获取cpu使用率脚本

#!/bin/bash # 获取要监控的本地服务器IP地址 IPifconfig | grep inet | grep -vE inet6|127.0.0.1 | awk {print $2} echo -e "\033[32mIP地址:\033[0m" $IP echo -e "\033[31m获取CPU\033[0m" # 获取cpu总核数 cpu_numgrep -c "model n…

获取磁盘使用率

#!/bin/bash #用途:监控磁盘的使用情况。 #定义检查磁盘的空间使用率函数 userroot topnum10 local_diskdir/data/jiankong/disk mkdirdisk() { if [ ! -d $disktmpdir ];then mkdir $disktmpdir fi } chk_sp() {ip$1n$(wc -l $local_diskdir/$ip/chk_sp.log | awk …

python邮件脚本

#用于发送测试报告(带附件)或其它内容# -*- coding:utf-8 -*- import os import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipartclass SendEmail(object):def __init__(self, receivers):mail_host "127.0.0.1" # …

Rsync+Inotify

RsyncInotify Rsync这个一般使用系统自带的服务,服务端需要启动客户端无需启动,服务端设置开机自动启动 systemctl start rsyncd systemctl status rsyncd systemctl restart rsyncd systemctl enable rsyncd Inotify单独下载安装&#xff0c…

logstash密码设置步骤

生成密钥存储仓库 cd /app/logstash-6.8.4/bin/ ./logstash-keystore create 输入y 添加Logstash中可引用的变量 ./logstash-keystore add es_user 输入elastic ./logstash-keystore add es_pwd 输入bamBoo123 ./logstash-keystore add kafka_pwd 输入bamBoo123 ./logstash…

清理cacahe脚本

#! /bin/bash # 需要释放内存的,内存使用百分比,可以传参,默认是85% max_rate$1 if [ ! "$max_rate" ] ; thenmax_rate85 fi echo "max_rate: $max_rate"totalfree -m | awk NR2 | awk {print $2} usedfree -m | awk NR2…

Prometheus node_exporter 指标说明及告警规则表达

Prometheus node_exporter 指标说明及告警规则表达_独步秋风的博客-CSDN博客_node exporter 指标

UnicodeEncodeError: 'gbk' codec can't encode character '\xeb' in position 20: illegal multibyte sequ

源代码:with open (os.path.join(self.root,filename),mode‘w’,newline’’) as f: writercsv.writer(f) for img in images: nameimg.split(os.sep)[-2] labelself.name2lable[name] writer.writerow([img,label]) 解决方法 在open()里面…

列表生成式(List)

列表生成式即List Comprehensions,是Python内置的非常简单却强大的可以用来创建list的生成式。 list(range(1,6)) [1,2,3,4,5]print([x*x for x in range(10)]) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]print([x*x for x in range(10) if x%20]) [0, 4, 16, 36, 64]

uint8 转换为 float

znp.arange(3,dtypenp.uintj8) >>> z.astype(float) array([ 0., 1., 2.]) >>> np.int8(z) array([0, 1, 2], dtypeint8)

AttributeError: 'NoneType' object has no attribute 'astype'

img cv2.imread(path)#path为图片地址 print(type(img))>>> <class numpy.ndarray> <class numpy.ndarray> <class numpy.ndarray> <class NoneType>错误问题为没有正确读到图片&#xff0c;返回一个Nonetype类型&#xff0c;

pycharm 自动补全括号 引号

过程&#xff1a; File->settings->Editior->General->Smart Keys 勾选Insert paired barckets(),[],{},<>

RuntimeError: freeze_support()

train_dataloaderDataLoader(train_dataset,batch_sizebatch_size,shuffleTrue,num_workers4) 把nu_worker4 去掉 train_dataloaderDataLoader(train_dataset,batch_sizebatch_size,shuffleTrue)