object detection训练自己数据

1、用labelImg标自己数据集。

并将图片存放在JPEGImages中,xml存放在Annotations中

2、分离训练和测试数据

import os
import randomtrainval_percent = 0.66
train_percent = 0.5
xmlfilepath = 'Annotations'
txtsavepath = 'ImageSets\Main'
total_xml = os.listdir(xmlfilepath)
print(total_xml)
num=len(total_xml)
list=range(num)
tv=int(num*trainval_percent)
tr=int(tv*train_percent)
trainval= random.sample(list,tv)
train=random.sample(trainval,tr)ftrainval = open('ImageSets/Main/trainval.txt', 'w')
ftest = open('ImageSets/Main/test.txt', 'w')
ftrain = open('ImageSets/Main/train.txt', 'w')
fval = open('ImageSets/Main/val.txt', 'w')for i  in list:name=total_xml[i][:-4]+'\n'if i in trainval:ftrainval.write(name)if i in train:ftrain.write(name)else:fval.write(name)else:ftest.write(name)ftrainval.close()
ftrain.close()
fval.close()
ftest .close()

此时ImageSets/Main/目录下生成这四个文件

3、根据train.txt 和 test.txt中内容分别建立train和test文件(存放xml文件)

import os
import shutil
files = os.listdir('E:\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\Annotations')
file= open('E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\ImageSets\\Main\\test.txt','r')
newpath='E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\test'
num1=0
for line in file.readlines():num1=num1+1message=line.split('\n')num=message[0]xmlName=num+'.xml'for i in files:if i==xmlName:oldpath='E:\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\Annotations'+"\\"+ishutil.copy(oldpath,newpath)break
print(num1)

此时train和test文件中存放的就是train.txt  test.txt中所列的xml文件

4、将xml转换为tfrecord文件

方法一: xml转为csv,csv转为tfrecord

方法二:直接采用object_detection文件中的create_pascal_tf_record.py 

本人采用方法二失败了。。方法一成功了。所以就列出来方法一的配置过程:

(1)将xml转为csv

'''
function:xml2csv
'''
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ETdef xml_to_csv(path):xml_list = []for xml_file in glob.glob(path + '/*.xml'):tree = ET.parse(xml_file)root = tree.getroot()for member in root.findall('object'):value = (root.find('filename').text,int(root.find('size')[0].text),int(root.find('size')[1].text),member[0].text,int(member[4][0].text),int(member[4][1].text),int(member[4][2].text),int(member[4][3].text))xml_list.append(value)column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']xml_df = pd.DataFrame(xml_list, columns=column_name)return xml_dfdef main():for directory in ['train', 'test']:project_path = 'E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007'image_path = os.path.join(project_path, directory)xml_df = xml_to_csv(image_path)xml_df.to_csv('E:/gitcode/tensorflow-model/VOCPolice/VOC2007/{}_labels.csv'.format(directory), index=None)print('Successfully converted xml to csv.')main()

运行完后VOC2007下有两个文件:train_labels.csv test_labels.csv。如下图所示:

这里运行的前提是之前产生的存放xml的train和test文件夹在project_path下。

(2)将csv文件转换为tfrecord文件

# generate_tfrecord.py# -*- coding: utf-8 -*-"""
Usage:# From tensorflow/models/# Create train data:python generate_tfrecord.py --csv_input=data/tv_vehicle_labels.csv  --output_path=train.record# Create test data:python generate_tfrecord.py --csv_input=data/test_labels.csv  --output_path=test.record
"""import os
import io
import pandas as pd
import tensorflow as tffrom PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDictos.chdir('E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection')flags = tf.app.flags
flags.DEFINE_string('csv_input', 'E:/gitcode/tensorflow-model/VOCPolice/VOC2007/train_labels.csv', 'Path to the CSV input')
flags.DEFINE_string('output_path', 'train.record', 'Path to output TFRecord')
FLAGS = flags.FLAGS# TO-DO replace this with label map
def class_text_to_int(row_label):if row_label == 'police':     # 需改动return 1else:Nonedef split(df, group):data = namedtuple('data', ['filename', 'object'])gb = df.groupby(group)return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]def create_tf_example(group, path):with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:encoded_jpg = fid.read()encoded_jpg_io = io.BytesIO(encoded_jpg)image = Image.open(encoded_jpg_io)width, height = image.sizefilename = group.filename.encode('utf8')image_format = b'jpg'xmins = []xmaxs = []ymins = []ymaxs = []classes_text = []classes = []for index, row in group.object.iterrows():xmins.append(row['xmin'] / width)xmaxs.append(row['xmax'] / width)ymins.append(row['ymin'] / height)ymaxs.append(row['ymax'] / height)classes_text.append(row['class'].encode('utf8'))classes.append(class_text_to_int(row['class']))tf_example = tf.train.Example(features=tf.train.Features(feature={'image/height': dataset_util.int64_feature(height),'image/width': dataset_util.int64_feature(width),'image/filename': dataset_util.bytes_feature(filename),'image/source_id': dataset_util.bytes_feature(filename),'image/encoded': dataset_util.bytes_feature(encoded_jpg),'image/format': dataset_util.bytes_feature(image_format),'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),'image/object/class/text': dataset_util.bytes_list_feature(classes_text),'image/object/class/label': dataset_util.int64_list_feature(classes),}))return tf_exampledef main(_):writer = tf.python_io.TFRecordWriter(FLAGS.output_path)path = os.path.join(os.getcwd(), 'images/test')         #  需改动examples = pd.read_csv(FLAGS.csv_input)grouped = split(examples, 'filename')for group in grouped:tf_example = create_tf_example(group, path)writer.write(tf_example.SerializeToString())writer.close()output_path = os.path.join(os.getcwd(), FLAGS.output_path)print('Successfully created the TFRecords: {}'.format(output_path))if __name__ == '__main__':tf.app.run()

修改上述代码中flags.DEFINE_string的csv_input路径为上述步骤产生的train和test的csv路径。然后将output_path分别命名为train.record和test.record文件.运行两次,即可产生train.record 和test.record(注意你自己选择的output路径)

5、准备训练

(1)下载模型ssd_mobilenet_v1_coco_2017_11_17并解压在object_detection目录下

(2)修改pascal_label_map.pbtxt文件。也可新建自己的pbtxt文件。

因为我这里只有一类,所以修改如上。

(3)修改模型的配置文件

修改位置如下:

num_classes: 1  #你的类别数
batch_size: 2   #你的电脑能承受的数量
fine_tune_checkpoint: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/ssd_mobilenet_v1_coco_2017_11_17/model.ckpt"#改为你的模型所在的位置 我这里全部使用绝对路径
num_steps: 50000  #改为你所需要训练次数train_input_reader {label_map_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/data/pascal_label_map.pbtxt"tf_record_input_reader {input_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/train.record"}
}  #分别将label_map_path 和train input_path 改为你的文件所在的位置eval_input_reader {label_map_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/data/pascal_label_map.pbtxt"shuffle: falsenum_readers: 1tf_record_input_reader {input_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/test.record"}
}  #分别将label_map_path 和test input_path 改为你的文件所在的位置

6、开始训练

在object_detection/legacy/train.py 文件中

修改

flags.DEFINE_string中train_dir为你的结果存放path
flags.DEFINE_string中'pipeline_config_path为刚才下载模型的.config所在路径

然后运行即可。

7、查看训练曲线

在object_detection目录下:  logdir目录就是train_dir目录

tensorboard --logdir=E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/ssdmodel1231

8、 导出模型

修改export_inference_graph.py 文件中

flags.DEFINE_string中pipeline_config_path为你的ssd模型.config所在路径
flags.DEFINE_string中trained_checkpoint_prefix为你训练结果的model.ckpt-训练次数
flags.DEFINE_string中output_directory为你的导出路径

然后运行即可保存

9、测试模型

在object_detection下新建myTest.py 文件

# coding: utf-8# # Object Detection Demo
# Welcome to the object detection inference walkthrough!  This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.from distutils.version import StrictVersion
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfilefrom collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops# if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
#   raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')# ## Env setup# In[2]:# This is needed to display the images.
# get_ipython().magic(u'matplotlib inline')# ## Object detection imports
# Here are the imports from the object detection module.from object_detection.utils import label_map_utilfrom object_detection.utils import visualization_utils as vis_util# # Model preparation# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.# In[4]:# What model to download.
MODEL_NAME = './Police_detection1231/'
# MODEL_FILE = MODEL_NAME + '.tar.gz'
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'pascal_label_map.pbtxt')NUM_CLASSES = 1# ## Download Model# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
'''
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():file_name = os.path.basename(file.name)if 'frozen_inference_graph.pb' in file_name:tar_file.extract(file, os.getcwd())
'''# ## Load a (frozen) Tensorflow model into memory.detection_graph = tf.Graph()
with detection_graph.as_default():od_graph_def = tf.GraphDef()with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:serialized_graph = fid.read()od_graph_def.ParseFromString(serialized_graph)tf.import_graph_def(od_graph_def, name='')# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`.  Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be finelabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)# ## Helper code# In[8]:def load_image_into_numpy_array(image):(im_width, im_height) = image.sizereturn np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)# # Detection# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_police'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 22) ]# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)# In[10]:config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def run_inference_for_single_image(image, graph):with graph.as_default():with tf.Session(config=config) as sess:# Get handles to input and output tensorsops = tf.get_default_graph().get_operations()all_tensor_names = {output.name for op in ops for output in op.outputs}tensor_dict = {}for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']:tensor_name = key + ':0'if tensor_name in all_tensor_names:tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)if 'detection_masks' in tensor_dict:# The following processing is only for single imagedetection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[0], image.shape[1])detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)# Follow the convention by adding back the batch dimensiontensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')# Run inferenceoutput_dict = sess.run(tensor_dict,feed_dict={image_tensor: np.expand_dims(image, 0)})# all outputs are float32 numpy arrays, so convert types as appropriateoutput_dict['num_detections'] = int(output_dict['num_detections'][0])output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)output_dict['detection_boxes'] = output_dict['detection_boxes'][0]output_dict['detection_scores'] = output_dict['detection_scores'][0]if 'detection_masks' in output_dict:output_dict['detection_masks'] = output_dict['detection_masks'][0]return output_dict# In[ ]:for image_path in TEST_IMAGE_PATHS:image = Image.open(image_path)# the array based representation of the image will be used later in order to prepare the# result image with boxes and labels on it.image_np = load_image_into_numpy_array(image)# Expand dimensions since the model expects images to have shape: [1, None, None, 3]image_np_expanded = np.expand_dims(image_np, axis=0)# Actual detection.output_dict = run_inference_for_single_image(image_np, detection_graph)# Visualization of the results of a detection.vis_util.visualize_boxes_and_labels_on_image_array(image_np,output_dict['detection_boxes'],output_dict['detection_classes'],output_dict['detection_scores'],category_index,instance_masks=output_dict.get('detection_masks'),use_normalized_coordinates=True,line_thickness=8)# plt.subplot(1, 2, 1)# plt.imshow(image)# plt.subplot(1, 2, 2)# plt.imshow(image_np)plt.savefig(image_path + '_labeled.jpg')# plt.show()# plt.legend()
print("finished")

修改MODEL_NAME为你导出模型的路径

修改 PATH_TO_LABELS为你的.pbtxt文件所在路径

修改NUM_CLASSES为你的类别数

修改PATH_TO_TEST_IMAGES_DIR为你的测试图片所在路径。同时命名形式为image1.jpg、image2.jpg。

在TEST_IMAGE_PATHS中range输入你的图片范围

运行即可。

10 评估模型

修改legacy/eval.py中以下参数。并运行如下

flags.DEFINE_string('checkpoint_dir', 'your trained model path','Directory containing checkpoints to evaluate, typically ''set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', 'your eval model save path', 'Directory to write eval summaries to.')
flags.DEFINE_string('pipeline_config_path', 'your .config path','Path to a pipeline_pb2.TrainEvalPipelineConfig config ''file. If provided, other configs are ignored')

大功告成

参考自

https://blog.csdn.net/Arvin_liang/article/details/84752427

https://chtseng.wordpress.com/2019/02/16/%E5%A6%82%E4%BD%95%E4%BD%BF%E7%94%A8google-object-detection-api%E8%A8%93%E7%B7%B4%E8%87%AA%E5%B7%B1%E7%9A%84%E6%A8%A1%E5%9E%8B/

https://www.cnblogs.com/zongfa/p/9663649.html

https://blog.csdn.net/linolzhang/article/details/87121875

https://blog.csdn.net/Arvin_liang/article/details/84752427

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/567746.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

计算机检索word文档检索式,完整word版)中国知网等文献检索的一般方法

《完整word版)中国知网等文献检索的一般方法》由会员分享&#xff0c;可在线阅读&#xff0c;更多相关《完整word版)中国知网等文献检索的一般方法(9页珍藏版)》请在人人文库网上搜索。1、文献检索一般方法同学们&#xff1a;可能你们目前接触到的文献并不多&#xff0c; 但以后…

python数据处理和数据分析的区别_python数据处理(七)之数据探索和分析

1.探索数据 1.1 安装agate库 1.2 导入数据 1.3 探索表函数 a.排序 b.最值&#xff0c;均值 c.清除缺失值 d.过滤 e.百分比 1.4 连结多个数据集 a.捕捉异常 b.去重 c.缺失数据的处理 d.联结数据集 1.5 识别相关性 利用numpy分析 1.6 找出离群值 a.使用标准差 b.使用绝对中位差 &…

object detection错误Message type object_detection.protos.SsdFeatureExtractor has no field named bat

google.protobuf.text_format.ParseError: 35:7 : Message type "object_detection.protos.SsdFeatureExtractor" has no field named "batch_norm_trainable" 将pipeline.config中的 batch_norm_trainable: true 删除就可以。 在运行ssd_mobilenet_v1…

测试人多少岁的软件,抖音你的三观多少岁免费测试app

抖音你的三观多少岁免费测试app是一款非常好玩的三观测试软件&#xff0c;所谓三观&#xff0c;指的是价值观、人生观和世界观。三观对一个人的生活起着决定作用,所以拥有一个正确的三观太重要了。你的三观你了解么&#xff1f;如果对自己的三观比较模糊的用户可以通过这款软件…

基础功能4-画loss

1添加loss到txt文件 a[] for i in range(2):a.append(i) fileopen(data.txt,w) file.write(a) file.close()2从txt中读取并画图 list [] with open(data.txt, r) as f:for line in f.readlines():arr line.split(,) print(arr.__len__()) arr[0]arr[0][1:] arr[-1]arr[-1][…

苹果怎样用小米云服务器,苹果换华为/小米,怎么同步数据?教程来了!

原标题&#xff1a;苹果换华为/小米&#xff0c;怎么同步数据&#xff1f;教程来了&#xff01;由于系统不同&#xff0c;iOS和Android之间数据迁移一直都是跨平台换机用户的“绊脚石”。而且iOS是封闭系统&#xff0c;不像Android那么开源。尽管市面上的App基本都支持两大系统…

python中变量和函数的区别_python中带下划线的变量和函数的意义

Python 的代码风格由 PEP 8 描述。这个文档描述了 Python 编程风格的方方面面。在遵守这个文档的条件下&#xff0c;不同程序员编写的 Python 代码可以保持最大程度的相似风格。这样就易于阅读&#xff0c;易于在程序员之间交流。 1 变量 常量 : 大写加下划线1 USER_CONSTANT 对…

VS2015qt配置完成后添加菜单栏

再vs 中将.ui文件打开 即打开qt 在qt中 新建 mainWindow文件有菜单&#xff0c;保存 然后在vs 中的formfile中添加.ui文件 ,右击编译 会自动生成ui_XXX.h 文件 在自己建立.h .cpp文件

mycloud搭建网页服务器,WD MyCloud(V04)中配置WebDAV服务

MyCloud 升到V04(当前v04.01.00-408)后总是很脆弱&#xff0c;装个aria2一不小心就砖了&#xff0c;但是由于自带的Dashboard 就是基于 Apache &#xff0c;而且各种线索证明搭建WebDAV还是有戏的&#xff0c;步骤如下&#xff1a;准备工作0.SSH登陆到MyCloud1.建立目录首先建立…

github搜索技巧_和逛知乎、刷微博一样高效使用 GitHub

自打毕业之后&#xff0c;可以说每天打开 Github 或Email 看有没有 watch 项目的消息或者自己项目的 issue&#xff0c;然后在Explore 看看社区内项目的走势&#xff0c;紧接着开始写代码搬砖的工作&#xff0c;偶尔也会关注下 Github 的 Blog, 看看有没有大新闻&#xff0c;亦…

方舟服务器维护驯龙,方舟生存进化新手图文攻略 最详细的驯龙教程方法

《方舟&#xff1a;生存进化》很多新手玩家不知道前期如何生存&#xff0c;不知道有什么技巧&#xff0c;下面小编就为大家带来方舟生存进化的图文攻略&#xff0c;也就是新手图文攻略教程&#xff0c;希望对想要接触或这刚刚接触这款游戏的玩家有所帮助。首先&#xff0c;进入…

pyqt5 获取Qlabel中的图片并对其进行处理(包括Qimage转换为Mat)

1、提取Qlabel中的图片 qimgself.showScreenImgLabel.pixmap().toImage() 2、将Qimage转换为mat def qimage2mat(self,qimg):ptr qimg.constBits()ptr.setsize(qimg.byteCount())mat np.array(ptr).reshape(qimg.height(), qimg.width(), 4) # 注意这地方通道数一定要填4&…

服务器ios文件,ios 文件到服务器

ios 文件到服务器 内容精选换一换安装传输工具在本地主机和Windows云服务器上分别安装数据传输工具&#xff0c;将文件上传到云服务器。例如QQ.exe。在本地主机和Windows云服务器上分别安装数据传输工具&#xff0c;将文件上传到云服务器。例如QQ.exe。本地磁盘映射(推荐使用)使…

pyqt出现“Ui_Dialog has no attribute “show””

将设计好的.ui文件经过pycharm中的Pyuic命令后生成对应的.py文件。 此时生成的.py文件并没有初始化。需要额外添加初始化代码 class Ui_MainWindow(QtGui.QMainWindow):def __init__(self):super(Ui_MainWindow,self).__init__()self.setupUi(self)self.retranslateUi(self) …

python二维图颜色函数_Python绘图之二维图与三维图详解

各位工程师累了吗? 推荐一篇可以让你技术能力达到出神入化的网站"持久男" 1.二维绘图 a. 一维数据集 用 Numpy ndarray 作为数据传入 ply 1. import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt np.random.seed(1000) y np.random.stan…

Windows10配置CUDA10.0+cudnn7.5.1

1、安装CUDA10.0 &#xff08;1&#xff09;下载&#xff1a;https://developer.nvidia.com/cuda-10.0-download-archive?target_osWindows&target_archx86_64&target_version10&target_typeexelocal 双击安装包进行安装&#xff0c;路径全部默认不做修改 &#…

金蝶云系统服务器,金蝶系统云服务器已离线

金蝶系统云服务器已离线 内容精选换一换本节操作介绍在Windows和Linux环境中使用SSH密码方式远程登录Linux云耀云服务器的操作步骤。云耀云服务器状态为“运行中”。云耀云服务器已经绑定弹性公网IP。所在安全组入方向已开放22端口&#xff0c;配置方式请参见配置安全组规则。使…

python 并列条形图_python – 来自两个pandas数据框的分组条形图

我有两个包含不同值但结构相同的数据框&#xff1a; df1 0 1 2 3 4 D 0.003073 0.014888 0.155815 0.826224 NaN E 0.000568 0.000435 0.000967 0.002956 0.067249 df2 0 1 2 3 4 D 0.746689 0.185769 0.060107 0.007435 NaN E 0.764552 0.000000 0.070288 0.101148 0.053499…

微信公众平台服务器 反馈,微信公众号开发之大坑

问题&#xff1a;ascii codec cant encode characters in position 0-25: ordinal not in range(128)解决&#xff1a;import sysreload(sys)sys.setdefaultencoding()报错&#xff1a;{"errcode":41011,"errmsg":"missing agentid"}解决&#x…

Python正则表达式re.sub使用

1、引入正则表达式 import re2、使用re.sub进行字符串替换 re.sub(pattern, repl, string, count0, flags0) 其中三个必选参数&#xff1a;pattern, repl, string两个可选参数&#xff1a;count, flagspattern &#xff0c;表示正则中的模式字符串&#xff0c;其中反斜杠加数…