zerogpu-upscaler-interpolation / video_processing.py
inoculatemedia's picture
Upload 23 files
e950119 verified
import os
import cv2
import torch
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
import skvideo.io
from queue import Queue, Empty
from model.pytorch_msssim import ssim_matlab
warnings.filterwarnings("ignore")
def transferAudio(sourceVideo, targetVideo):
import shutil
import moviepy.editor
tempAudioFileName = "./temp/audio.mkv"
# split audio from original video file and store in "temp" directory
if True:
# clear old "temp" directory if it exits
if os.path.isdir("temp"):
# remove temp directory
shutil.rmtree("temp")
# create new "temp" directory
os.makedirs("temp")
# extract audio from video
os.system('ffmpeg -y -i "{}" -c:a copy -vn {}'.format(sourceVideo, tempAudioFileName))
targetNoAudio = os.path.splitext(targetVideo)[0] + "_noaudio" + os.path.splitext(targetVideo)[1]
os.rename(targetVideo, targetNoAudio)
# combine audio file and new video file
os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
if os.path.getsize(targetVideo) == 0: # if ffmpeg failed to merge the video and audio together try converting the audio to aac
tempAudioFileName = "./temp/audio.m4a"
os.system('ffmpeg -y -i "{}" -c:a aac -b:a 160k -vn {}'.format(sourceVideo, tempAudioFileName))
os.system('ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(targetNoAudio, tempAudioFileName, targetVideo))
if (os.path.getsize(targetVideo) == 0): # if aac is not supported by selected format
os.rename(targetNoAudio, targetVideo)
print("Audio transfer failed. Interpolated video will have no audio")
else:
print("Lossless audio transfer failed. Audio was transcoded to AAC (M4A) instead.")
# remove audio-less video
os.remove(targetNoAudio)
else:
os.remove(targetNoAudio)
# remove temp directory
shutil.rmtree("temp")
def process_video(video, output, modelDir, fp16, UHD, scale, skip, fps, png, ext, exp, multi):
if exp != 1:
multi = (2 ** exp)
assert (not video is None)
if skip:
print("skip flag is abandoned, please refer to issue #207.")
if UHD and scale==1.0:
scale = 0.5
assert scale in [0.25, 0.5, 1.0, 2.0, 4.0]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
if(fp16):
torch.set_default_tensor_type(torch.cuda.HalfTensor)
from rife.train_log.RIFE_HDv3 import Model
model = Model()
if not hasattr(model, 'version'):
model.version = 0
model.load_model(modelDir, -1)
print("Loaded 3.x/4.x HD model.")
model.eval()
model.device()
videoCapture = cv2.VideoCapture(video)
fps_in = videoCapture.get(cv2.CAP_PROP_FPS)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
videoCapture.release()
if fps is None:
fpsNotAssigned = True
fps_out = fps_in * multi
else:
fpsNotAssigned = False
fps_out = fps
videogen = skvideo.io.vreader(video)
lastframe = next(videogen)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video_path_wo_ext, video_ext = os.path.splitext(video)
print('{}.{}, {} frames in total, {}FPS to {}FPS'.format(video_path_wo_ext, ext, tot_frame, fps_in, fps_out))
if png == False and fpsNotAssigned == True:
print("The audio will be merged after interpolation process")
else:
print("Will not merge audio because using png or fps flag!")
h, w, _ = lastframe.shape
vid_out_name = None
vid_out = None
if png:
if not os.path.exists('vid_out'):
os.mkdir('vid_out')
else:
if output is not None:
vid_out_name = output
else:
vid_out_name = '{}_{}X_{}fps.{}'.format(video_path_wo_ext, multi, int(np.round(fps_out)), ext)
vid_out = cv2.VideoWriter(vid_out_name, fourcc, fps_out, (w, h))
def clear_write_buffer(user_args, write_buffer):
cnt = 0
while True:
item = write_buffer.get()
if item is None:
break
if user_args.png:
cv2.imwrite('vid_out/{:0>7d}.png'.format(cnt), item[:, :, ::-1])
cnt += 1
else:
vid_out.write(item[:, :, ::-1])
def build_read_buffer(user_args, read_buffer, videogen):
try:
for frame in videogen:
read_buffer.put(frame)
except:
pass
read_buffer.put(None)
def make_inference(I0, I1, n):
if model.version >= 3.9:
res = []
for i in range(n):
res.append(model.inference(I0, I1, (i+1) * 1. / (n+1), scale))
return res
else:
middle = model.inference(I0, I1, scale)
if n == 1:
return [middle]
first_half = make_inference(I0, middle, n=n//2)
second_half = make_inference(middle, I1, n=n//2)
if n%2:
return [*first_half, middle, *second_half]
else:
return [*first_half, *second_half]
def pad_image(img):
if(fp16):
return F.pad(img, padding).half()
else:
return F.pad(img, padding)
tmp = max(128, int(128 / scale))
ph = ((h - 1) // tmp + 1) * tmp
pw = ((w - 1) // tmp + 1) * tmp
padding = (0, pw - w, 0, ph - h)
pbar = tqdm(total=tot_frame)
write_buffer = Queue(maxsize=500)
read_buffer = Queue(maxsize=500)
_thread.start_new_thread(build_read_buffer, ((), read_buffer, videogen))
_thread.start_new_thread(clear_write_buffer, ((), write_buffer))
I1 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
I1 = pad_image(I1)
temp = None # save lastframe when processing static frame
while True:
if temp is not None:
frame = temp
temp = None
else:
frame = read_buffer.get()
if frame is None:
break
I0 = I1
I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
I1 = pad_image(I1)
I0_small = F.interpolate(I0, (32, 32), mode='bilinear', align_corners=False)
I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
break_flag = False
if ssim > 0.996:
frame = read_buffer.get() # read a new frame
if frame is None:
break_flag = True
frame = lastframe
else:
temp = frame
I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0).float() / 255.
I1 = pad_image(I1)
I1 = model.inference(I0, I1, scale=scale)
I1_small = F.interpolate(I1, (32, 32), mode='bilinear', align_corners=False)
ssim = ssim_matlab(I0_small[:, :3], I1_small[:, :3])
frame = (I1[0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w]
if ssim < 0.2:
output = []
for i in range(multi - 1):
output.append(I0)
else:
output = make_inference(I0, I1, multi - 1)
write_buffer.put(lastframe)
for mid in output:
mid = (((mid[0] * 255.).byte().cpu().numpy().transpose(1, 2, 0)))
write_buffer.put(mid[:h, :w])
pbar.update(1)
lastframe = frame
if break_flag:
break
write_buffer.put(lastframe)
write_buffer.put(None)
import time
while(not write_buffer.empty()):
time.sleep(0.1)
pbar.close()
if not vid_out is None:
vid_out.release()
if png == False and fpsNotAssigned == True and not video is None:
try:
transferAudio(video, vid_out_name)
except:
print("Audio transfer failed. Interpolated video will have no audio")
targetNoAudio = os.path.splitext(vid_out_name)[0] + "_noaudio" + os.path.splitext(vid_out_name)[1]
os.rename(targetNoAudio, vid_out_name)
return vid_out_name