由于在内网发送http请求同一个token会限制次数,所以很容易达到网关流量上限。
业务中使用了多线程并发,一个线程发起一次http请求,得到正确结果后返回。这里采用的策略是,如果解析出来达到流量上限,那么该线程休眠一段时间,然后重试请求,如果还是失败,那么继续休眠,每次休眠的时间随着重试轮次增加:
# 探测是否触及网关流量上限
def probe_func(m_url, m_headers, m_json, m_timeout):json_rep = requests.post(url = m_url, headers = m_headers,json = m_json,timeout = m_timeout)zhiyan_data = json_rep.json()if(zhiyan_data['code'] != 0):return Noneelse:return json_rep# 解析数据包,不涉及probe_func中的检测内容
def parse(json_rep, room_name, metric_name):if json_rep == None: logging.info(room_name + " json_rep == None")return 0if (json_rep.content and json_rep.status_code != 204 and json_rep.headers["content-type"].strip().startswith("application/json")):zhiyan_data = json_rep.json()if len(zhiyan_data['data']) == 0:logging.warning(zhiyan_data['日志信息拉取无结果'])return 0else:res = zhiyan_data['data']['chart_info'][0]['key_data_list'][3]['current']logging.info(room_name + str(res))if str(res) == "None":logging.warning(room_name + ":拉取zhiyan_data:" + metric_name + " 出现了问题,拉取数据为None")return 0else:return reselse:return 0# 具有可靠性地获取数据
def request_post_reliable(m_url, m_headers, m_json, m_timeout):sleep_time_s = 1sleep_time_max = 60res = probe_func(m_url, m_headers, m_json, m_timeout)# 如果探测失败则线程睡眠一段时间后再尝试while (res == None):logging.info("探测失败,线程睡眠"+str(sleep_time_s)+"秒")time.sleep(sleep_time_s)tmp = sleep_time_s * 2if tmp < sleep_time_max:sleep_time_s = tmpelse:sleep_time_s = sleep_time_maxlogging.info("睡眠结束,线程重新探测")res = probe_func(m_url, m_headers, m_json, m_timeout)# 直到探测成功,返回正确结果return res