Spaces:
Running
Running
import cv2 | |
import numpy as np | |
from moviepy.editor import VideoFileClip | |
# βββββ Effect Functions βββββ # | |
def blur_effect(frame, steps=30, max_blur=15): | |
""" | |
Applies a gradually increasing blur. | |
""" | |
h, w = frame.shape[:2] | |
blurred_frames = [] | |
for i in range(steps): | |
blur_strength = int((i / (steps - 1)) * max_blur) | |
if blur_strength % 2 == 0: # Blur kernel must be odd | |
blur_strength += 1 | |
blurred = cv2.GaussianBlur(frame, (blur_strength, blur_strength), 0) | |
blurred_frames.append(blurred) | |
return blurred_frames | |
def brightness_pulse_effect(frame, steps=30, max_change=50): | |
""" | |
Pulses brightness up and down. | |
""" | |
frame = frame.astype(np.float32) | |
output_frames = [] | |
for i in range(steps): | |
factor = 1 + (np.sin(i / steps * 2 * np.pi) * (max_change / 255)) | |
bright = np.clip(frame * factor, 0, 255).astype(np.uint8) | |
output_frames.append(bright) | |
return output_frames | |
def fade_in_effect(frame, steps=30): | |
""" | |
Fades in the frame from black. | |
""" | |
output_frames = [] | |
black = np.zeros_like(frame) | |
for i in range(steps): | |
alpha = i / (steps - 1) | |
blended = cv2.addWeighted(frame, alpha, black, 1 - alpha, 0) | |
output_frames.append(blended) | |
return output_frames | |
def slide_in_left_effect(frame, steps=30): | |
""" | |
Slides the frame in from the left. | |
""" | |
h, w = frame.shape[:2] | |
output_frames = [] | |
for i in range(steps): | |
x_offset = int(w * (1 - i / (steps - 1))) | |
canvas = np.zeros_like(frame) | |
canvas[:, max(0, x_offset):] = frame[:, :w - max(0, x_offset)] | |
output_frames.append(canvas) | |
return output_frames | |
def rotate_effect(frame, steps=30, max_angle=15): | |
""" | |
Rotates frame gradually. | |
""" | |
h, w = frame.shape[:2] | |
center = (w // 2, h // 2) | |
output_frames = [] | |
for i in range(steps): | |
angle = max_angle * (i / (steps - 1)) | |
matrix = cv2.getRotationMatrix2D(center, angle, 1.0) | |
rotated = cv2.warpAffine(frame, matrix, (w, h), borderMode=cv2.BORDER_REFLECT) | |
output_frames.append(rotated) | |
return output_frames | |
def zoom_center_effect(frame, steps=30, zoom_factor=1.2): | |
h, w = frame.shape[:2] | |
frames = [] | |
for i in range(steps): | |
scale = 1 + (zoom_factor - 1) * (i / (steps - 1)) | |
new_w, new_h = int(w / scale), int(h / scale) | |
x1, y1 = (w - new_w) // 2, (h - new_h) // 2 | |
crop = frame[y1:y1 + new_h, x1:x1 + new_w] | |
resized = cv2.resize(crop, (w, h), interpolation=cv2.INTER_LINEAR) | |
frames.append(resized) | |
return frames | |
def grayscale_effect(frame, steps=30): | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
gray_colored = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) | |
return [gray_colored] * steps | |
def none_effect(frame, steps=30): | |
return [frame] * steps | |
# βββββ Frame Extractor βββββ # | |
def extract_frames_one_per_second(video_path): | |
cap = cv2.VideoCapture(video_path) | |
if not cap.isOpened(): | |
raise ValueError(f"Error opening video: {video_path}") | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
duration_sec = int(total_frames / fps) | |
frames = [] | |
for sec in range(duration_sec): | |
cap.set(cv2.CAP_PROP_POS_MSEC, sec * 1000) | |
success, frame = cap.read() | |
if success: | |
frames.append(frame) | |
else: | |
print(f"β οΈ Skipped second {sec}") | |
cap.release() | |
return frames | |
def extract_frames_by_interval(video_path, interval_sec=1.0): | |
cap = cv2.VideoCapture(video_path) | |
if not cap.isOpened(): | |
raise ValueError(f"Error opening video: {video_path}") | |
input_fps = cap.get(cv2.CAP_PROP_FPS) | |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
duration_sec = total_frames / input_fps | |
timestamps = np.arange(0, duration_sec, interval_sec) | |
frames = [] | |
for t in timestamps: | |
cap.set(cv2.CAP_PROP_POS_MSEC, t * 1000) | |
success, frame = cap.read() | |
if success: | |
frames.append(frame) | |
else: | |
print(f"β οΈ Skipped timestamp {t:.2f}s") | |
cap.release() | |
return frames, duration_sec | |
# βββββ Video Creator βββββ # | |
# def create_effect_video(frames, output_path, fps=30, effect_fn=None, **kwargs): | |
# if not frames: | |
# raise ValueError("No frames provided.") | |
# | |
# h, w = frames[0].shape[:2] | |
# fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
# out = cv2.VideoWriter(output_path, fourcc, fps, (w, h)) | |
# | |
# for idx, frame in enumerate(frames): | |
# if effect_fn is not None: | |
# effect_frames = effect_fn(frame, steps=fps, **kwargs) | |
# else: | |
# effect_frames = [frame] * fps | |
# | |
# for f in effect_frames: | |
# out.write(f) | |
# | |
# out.release() | |
# print(f"β Video saved to: {output_path}") | |
# | |
def create_effect_video(frames, output_path, duration_sec, fps=30, effect_fn=None, **kwargs): | |
if not frames: | |
raise ValueError("No frames provided.") | |
h, w = frames[0].shape[:2] | |
total_output_frames = int(duration_sec * fps) | |
frames_per_input = total_output_frames // len(frames) | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
out = cv2.VideoWriter(output_path, fourcc, fps, (w, h)) | |
for frame in frames: | |
if effect_fn is not None: | |
effect_frames = effect_fn(frame, steps=frames_per_input, **kwargs) | |
else: | |
effect_frames = [frame] * frames_per_input | |
for f in effect_frames: | |
out.write(f) | |
# Add padding if needed | |
actual_written = len(frames) * frames_per_input | |
remaining = total_output_frames - actual_written | |
if remaining > 0: | |
last_frame = frames[-1] | |
for _ in range(remaining): | |
out.write(last_frame) | |
out.release() | |
print(f"β Video saved to: {output_path} ({duration_sec:.2f} sec @ {fps} fps)") | |
# βββββ MAIN βββββ # | |
def main(video_file="your_video.mp4", output_file="final_output.mp4", effect_name="Zoom Center", interval = 5, fps = 30): | |
effect_fn, effect_kwargs = EFFECTS[effect_name] | |
frames, duration = extract_frames_by_interval(video_file, interval_sec=interval) | |
# Temporary video without audio | |
temp_video_path = "temp_no_audio.mp4" | |
create_effect_video(frames, temp_video_path, duration_sec=duration, fps=fps, effect_fn=effect_fn, **effect_kwargs) | |
# Add original audio back using moviepy | |
original_clip = VideoFileClip(video_file) | |
processed_clip = VideoFileClip(temp_video_path) | |
final_clip = processed_clip.set_audio(original_clip.audio) | |
final_clip.write_videofile(output_file, codec="libx264", audio_codec="aac") | |
return output_file | |
# βββββ Usage Example βββββ # | |
EFFECTS = { | |
"Zoom Center": (zoom_center_effect, {"zoom_factor": 1.2}), | |
"Grayscale": (grayscale_effect, {}), | |
"Blur": (blur_effect, {"max_blur": 15}), | |
"None": (none_effect, {}), | |
"Brightness Pulse": (brightness_pulse_effect, {"max_change": 50}), | |
"Fade In": (fade_in_effect, {}), | |
"Slide In Left": (slide_in_left_effect, {}), | |
"Rotate": (rotate_effect, {"max_angle": 15}), | |
} | |
if __name__ == "__main__": | |
video_file = "your_video.mp4" | |
output_file = "final_output.mp4" | |
# fps = 30 | |
fps = 30 | |
interval = 5 | |
zoom_factor = 1.2 | |
# Available effects: zoom_center_effect, grayscale_effect, none_effect, or custom | |
# chosen_effect = rotate_effect | |
effect_kwargs = {} | |
chosen_effect = zoom_center_effect | |
# effect_kwargs = {'zoom_factor': zoom_factor} # Optional arguments for the effect | |
# frames = extract_frames_one_per_second(video_file) | |
# create_effect_video(frames, output_file, fps=fps, effect_fn=chosen_effect, **effect_kwargs) | |
frames, duration = extract_frames_by_interval(video_file, interval_sec=interval) | |
create_effect_video(frames, output_file, duration_sec=duration, fps=fps, effect_fn=chosen_effect, **effect_kwargs) | |