File size: 1,107 Bytes
3af5f64 536d2b0 3af5f64 b2ad090 3af5f64 7473c3f 536d2b0 7473c3f 536d2b0 7473c3f 536d2b0 7473c3f 3af5f64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import gradio as gr
import subprocess
# Function to run Wav2Lip model
def run_wav2lip(video, audio):
# Define the command to run the Wav2Lip model
command = f"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face '{video}' --audio '{audio}' --outfile output_result.mp4"
# Execute the command
subprocess.run(command, shell=True, check=True)
# Return the output video file path
return "output_result.mp4"
# Gradio Interface
with gr.Blocks() as interface:
gr.Markdown("# Wav2Lip Model")
gr.Markdown("Upload a video and an audio file to generate a lip-synced video.")
# Input components
video_input = gr.Video(label="Input Video")
audio_input = gr.Audio(label="Input Audio")
# Output component
output_video = gr.Video(label="Output Video")
# Button to trigger Wav2Lip processing
run_button = gr.Button("Run Wav2Lip")
# Define the button click action
run_button.click(run_wav2lip, inputs=[video_input, audio_input], outputs=output_video)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch()
|