前言:
Windows上搭建nginx-rtsp流媒体服务器,实现FFmpeg推流、录像转rtsp推流 - WayWayWayne - 博客园参考上述文章和一些webRTC前端拉流文章
主要是缕一缕思路和每个部分的代码功能,文件命名高度相似导致。
效果:
代码:
使用ffmpeg 的 H264格式的视频流循环推流
import time
import subprocess# 推流命令
command = "your address\\ffmpeg-master-latest-win64-gpl\\bin\\ffmpeg.exe -re -i your address/natural.mp4 -rtsp_transport tcp -vcodec h264 -b:v 2000k -f rtsp rtsp://192.168.233.169:8556/live/stream"count = 0
while (count <= 20):# 使用subprocess运行命令process = subprocess.Popen(command, shell=True)# 等待命令执行完成process.wait()# 检查命令的退出状态if process.returncode == 0:print("命令执行成功")else:print("命令执行失败")count += 1print('count:%d' % count)time.sleep(5)
python拉流使用cv处理,直接显示处理后的视频
import cv2rtsp_url = 'rtsp://192.168.233.169:8556/live/stream'
cap = cv2.VideoCapture(rtsp_url)if not cap.isOpened():print("Could not open RTSP stream.")exit(-1)face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')while True:ret, frame = cap.read()if not ret:print("Failed to read frame.")breakgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))# 存储检测到的人员框信息person_boxes = [(x, y, x + w, y + h) for (x, y, w, h) in faces]# 在原帧上绘制矩形框for box in person_boxes:cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)# 显示带有框的帧(可选,仅用于本地调试)cv2.imshow('RTSP Video Stream with Person Detection', frame)if cv2.waitKey(1) & 0xFF == ord('q'):break
第二段代码的综合,完成处理后推流效果 (使用ffmpeg)
import cv2
import ffmpeg# RTSP推流地址
rtsp_output_url = 'rtsp://192.168.233.169:8556/live/cv'# 假设的摄像头分辨率和帧率
frame_width = 640
frame_height = 480
fps = 20# 初始化摄像头
cap = cv2.VideoCapture(0)# 初始化人脸检测的级联分类器
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')# 创建一个空的Input对象作为输入源,使用管道接收数据
input_pipe = ffmpeg.input(pipe='pipe:', format='rawvideo', pix_fmt='bgr24', s='{}x{}'.format(frame_width, frame_height))# 创建RTSP输出对象
output = ffmpeg.output(rtsp_output_url, vcodec='libx264', pix_fmt='yuv420p', r=fps)# 启动ffmpeg进程,连接输入管道和输出RTSP地址
ffmpeg_process = (input_pipe.global_args('-hide_banner').global_args('-loglevel', 'warning').output(output).run_async(pipe_stdin=True)
)while True:ret, frame = cap.read()if not ret:print("Failed to read frame.")breakgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))person_boxes = [(x, y, x + w, y + h) for (x, y, w, h) in faces]for box in person_boxes:cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)# 将BGR格式转换为RGB格式,因为FFmpeg需要RGB格式的帧frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)# 将RGB帧转换为字节流frame_bytes = frame_rgb.tobytes()# 将处理后的帧送入ffmpeg进程ffmpeg_process.stdin.write(frame_bytes)# 显示带有框的帧(可选,仅用于本地调试)cv2.imshow('RTSP Video Stream with Person Detection', frame)if cv2.waitKey(1) & 0xFF == ord('q'):break# 关闭输入管道,结束ffmpeg进程
ffmpeg_process.stdin.close()
ffmpeg_process.wait()# 释放摄像头资源
cap.release()
cv2.destroyAllWindows()
说明:进行视频流拉流时也可以使用VLC的【网络串流】来进行拉流测试