|
import gradio as gr |
|
import subprocess |
|
|
|
|
|
def run_wav2lip(video, audio): |
|
|
|
command = f"python inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face '{video}' --audio '{audio}' --outfile output_result.mp4" |
|
|
|
|
|
subprocess.run(command, shell=True, check=True) |
|
|
|
|
|
return "output_result.mp4" |
|
|
|
|
|
with gr.Blocks() as interface: |
|
gr.Markdown("# Wav2Lip Model") |
|
gr.Markdown("Upload a video and an audio file to generate a lip-synced video.") |
|
|
|
|
|
video_input = gr.Video(label="Input Video") |
|
audio_input = gr.Audio(label="Input Audio") |
|
|
|
|
|
output_video = gr.Video(label="Output Video") |
|
|
|
|
|
run_button = gr.Button("Run Wav2Lip") |
|
|
|
|
|
run_button.click(run_wav2lip, inputs=[video_input, audio_input], outputs=output_video) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|