KeyframesAI / app.py
acmyu's picture
image resize api
7aba721
from main import run_app, run_train, run_inference, run_generate_frame, run_interpolate_frames, resize_images
from evaluate import run_evaluate, get_score
import spaces
from PIL import Image
import cv2
import os
import gradio as gr
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
char_imgs = gr.Gallery(type="pil", label="Images of the Character")
mocap = gr.Video(label="Motion-Capture Video")
frame_imgs = gr.Gallery(type="pil", label="Reference Images of Each Frame")
poses = gr.JSON(label="Pose Coordinates")
tr_steps = gr.Number(label="Training steps", value=10)
inf_steps = gr.Number(label="Inference steps", value=10)
fps = gr.Number(label="Output frame rate", value=12)
modelId = gr.Text(label="Model Id", value="fine_tuned_pcdms")
remove_bg = gr.Checkbox(label="Remove background", value=False)
resize_inputs = gr.Checkbox(label="Resize images to match video", value=True)
img_width = gr.Number(label="Output width", value=1920)
img_height = gr.Number(label="Output height", value=1080)
interp_frame1 = gr.Image(type="pil", label="Interpolation Start Frame")
interp_frame2 = gr.Image(type="pil", label="Interpolation End Frame")
times_to_interp = gr.Number(label="Times to Interpolate", value=1)
name = gr.Text(label="Name", value="")
train_btn = gr.Button(value="Train")
inference_btn = gr.Button(value="Inference")
generate_frame_btn = gr.Button(value="Generate Frame")
submit_btn = gr.Button(value="Generate")
interp_btn = gr.Button(value="Interpolate Frames")
eval_btn = gr.Button(value="Evaluate All")
eval_btn2 = gr.Button(value="Evaluate")
resize_btn = gr.Button(value="Resize")
with gr.Column():
animation = gr.Video(label="Result")
frames = gr.Gallery(type="pil", label="Frames", format="png")
frames_thumb = gr.Gallery(type="pil", label="Thumbnails", format="png")
pose_coords = gr.JSON(label="Pose Coordinates")
reference = gr.Gallery(type="pil", label="Reference Images", format="png")
eval_scores = gr.JSON(label="Evaluation Scores")
submit_btn.click(
run_app, inputs=[char_imgs, mocap, tr_steps, inf_steps, fps, remove_bg, resize_inputs], outputs=[animation, frames]
)
train_btn.click(
run_train, inputs=[char_imgs, tr_steps, modelId, remove_bg, resize_inputs], outputs=[]
)
inference_btn.click(
run_inference, inputs=[char_imgs, mocap, frame_imgs, tr_steps, inf_steps, fps, modelId, img_width, img_height, remove_bg, resize_inputs], outputs=[animation, frames, frames_thumb, pose_coords, reference]
)
generate_frame_btn.click(
run_generate_frame, inputs=[char_imgs, poses, tr_steps, inf_steps, modelId, img_width, img_height, remove_bg, resize_inputs], outputs=[frames, frames_thumb]
)
interp_btn.click(
run_interpolate_frames, inputs=[interp_frame1, interp_frame2, times_to_interp, remove_bg], outputs=[frames, frames_thumb]
)
eval_btn.click(
run_evaluate, inputs=[], outputs=[eval_scores]
)
eval_btn2.click(
get_score, inputs=[name, char_imgs, mocap, tr_steps, inf_steps, fps, remove_bg], outputs=[eval_scores]
)
resize_btn.click(
resize_images, inputs=[frame_imgs, img_width, img_height], outputs=[frames]
)
demo.launch(share=True, show_error=True)