修改一下配置文件就可以运行了
配置文件
config.py
video_path = 'xxxx/dataset/data/huaping/BXDQ05-花屏-1.mp4'#要处理的视频路径
frame_path = 'xxxx/dataset/frame'#处理成帧之后保存的路径
flow_path = 'xxxx/dataset/flow'#处理成光流之后保存的路径
save_video_path = 'xxxx/fetch-flow/output/result.mp4'#最终将光流帧变成视频的路径
fps = 30
2.将视频拆分为帧
fetchFrame.py
from __future__ import print_function
import sys
import numpy as np
import os
import imageio#用于读取和写入图像和视频的库
import cv2
import config
Height = 512
Width = 512def get_frame(file_dir):with imageio.get_reader(file_dir, 'ffmpeg') as vid:# 使用imageio库打开视频文件,并创建一个读取器对象。此对象被命名为vid。'ffmpeg'是用于读取视频的编解码器。nframes = vid.get_meta_data()['nframes']#从视频元数据中获取帧数,并保存到nframes变量中。for i, frame in enumerate(vid):n_frames = iframe = cv2.resize(frame, (Width, Height), interpolation = cv2.INTER_CUBIC)imageio.imwrite(config.frame_path+'/'+str(i)+'.jpg', frame)#将调整大小后的帧保存为JPEG文件。文件的路径和名称是由前面的路径、字符串"frame_"和当前帧索引组合而成的。np.save('nframes.npy', n_frames)#将视频的总帧数保存到一个NumPy文件中。文件名是'nframes.npy',而文件中的数据是n_frames变量。
2.计算光流
fetchFlow.py
import os
import numpy as np
import cv2
from glob import glob_IMAGE_SIZE = 256# 用于计算视频中每一帧之间的光流。它首先根据视频路径获取所有以'.jpg'结尾的帧图像文件路径,并按顺序排序。然后,它使用OpenCV库读取并转换每一帧的图像,计算当前帧与前一帧之间的光流,并将光流添加到一个列表中。最后,返回包含所有光流的列表。
import os
import cv2
from glob import globdef cal_for_frames(video_path):# 获取视频路径下的所有jpg图片路径print(video_path)# 对图片路径进行排序frames = [f for f in os.listdir(video_path)]frames = sorted(frames, key=lambda x:int(x[:-4]))#字符串排序顺序会是1,10,11这样,所以要进行关键字排序 frames = [os.path.join(video_path, f) for f in frames]print(frames)flow = []prev = cv2.imread(frames[0]) # 读取第一帧图像prev = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY) # 将第一帧图像转换为灰度图像for i, frame_curr in enumerate(frames):print(frame_curr)curr = cv2.imread(frame_curr) # 读取当前帧图像curr = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY) # 将当前帧图像转换为灰度图像tmp_flow = compute_TVL1(prev, curr) # 计算当前帧和前一帧之间的光流flow.append(tmp_flow) # 将光流添加到列表中prev = curr # 更新前一帧图像print(i)return flow # 返回光流列表def compute_TVL1(prev, curr, bound=15):"""计算TV-L1光流。参数:prev: numpy.ndarray, 输入图像序列中的前一帧图像。curr: numpy.ndarray, 输入图像序列中的当前帧图像。bound: int, 光流值的边界限制,默认为15。返回值:flow: numpy.ndarray, 计算得到的光流图像。"""# 创建TV-L1光流估计算法对象TVL1 = cv2.DualTVL1OpticalFlow_create()# 使用TV-L1算法计算光流flow = TVL1.calc(prev, curr, None)assert flow.dtype == np.float32# 将光流值进行缩放和取整操作flow = (flow + bound) * (255.0 / (2*bound))flow = np.round(flow).astype(int)# 将光流值限制在0-255范围内flow[flow >= 255] = 255flow[flow <= 0] = 0return flowdef save_flow(video_flows, flow_path):"""保存视频的光流图参数:video_flows:视频的光流图(三维数组)flow_path:保存光流图的路径(字符串)返回值:无"""for i, flow in enumerate(video_flows):# 保存u分量的光流图cv2.imwrite(os.path.join(flow_path.format('u'), "{:06d}.jpg".format(i)),flow[:, :, 0])# 保存v分量的光流图cv2.imwrite(os.path.join(flow_path.format('v'), "{:06d}.jpg".format(i)),flow[:, :, 1])#u代表水平分量,v代表垂直分量def extract_flow(video_path,flow_path):flow = cal_for_frames(video_path)print("finish1")save_flow(flow, flow_path)print('finish2')print('complete:' + flow_path)return
3.将帧写入视频
main.py
import imageio
import PIL.Image as Image
import numpy as np
from fetchFlow import extract_flow
from fetchFrame import get_frame
import configvideo_path = config.video_path
fps = config.fps
flow_path = config.flow_path
frame_path = config.frame_path
save_video_path = config.save_video_path#get_frame(video_path)
extract_flow(frame_path, flow_path)flow_list = [Image.open(os.path.join(flow_path,f)) for f in os.listdir(flow_path)]
print(flow_list)
with imageio.get_writer(save_video_path, fps = fps) as video:for image in flow_list:image = image.convert('RGB')image = np.array(image)video.append_data(image)
参考:提取光流