lip_sync / app.py
darshankr's picture
Update app.py
b2ad090 verified
import gradio as gr
import subprocess
# Function to run Wav2Lip model
def run_wav2lip(video, audio):
# Define the command to run the Wav2Lip model
command = f"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face '{video}' --audio '{audio}' --outfile output_result.mp4"
# Execute the command
subprocess.run(command, shell=True, check=True)
# Return the output video file path
return "output_result.mp4"
# Gradio Interface
with gr.Blocks() as interface:
gr.Markdown("# Wav2Lip Model")
gr.Markdown("Upload a video and an audio file to generate a lip-synced video.")
# Input components
video_input = gr.Video(label="Input Video")
audio_input = gr.Audio(label="Input Audio")
# Output component
output_video = gr.Video(label="Output Video")
# Button to trigger Wav2Lip processing
run_button = gr.Button("Run Wav2Lip")
# Define the button click action
run_button.click(run_wav2lip, inputs=[video_input, audio_input], outputs=output_video)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch()