File size: 23,568 Bytes
6dfeac9
 
fbc8e93
6cf745b
 
e82304b
b9c9d91
12707da
 
8be977a
 
7006185
 
e12f166
6dfeac9
 
 
 
 
 
 
 
 
 
b46af48
6dfeac9
 
 
 
7559c5b
 
6dfeac9
12707da
 
 
 
 
8be977a
 
 
b46af48
 
fb7c8ee
b46af48
8be977a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6dfeac9
 
 
b46af48
 
6dfeac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b46af48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6dfeac9
 
 
 
 
 
7006185
6dfeac9
8e099e0
6dfeac9
 
 
 
 
 
 
 
 
7006185
 
 
 
 
 
 
 
 
 
 
 
 
 
6cf745b
 
 
 
 
 
 
 
 
 
 
83d2c37
6cf745b
 
 
 
 
 
 
 
 
 
83d2c37
 
6cf745b
3d09648
6cf745b
 
 
 
3d09648
6cf745b
 
 
 
 
6dfeac9
06968de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7006185
 
 
06968de
7006185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12707da
7006185
 
 
12707da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7006185
12707da
 
 
6dfeac9
 
 
 
8be977a
6dfeac9
12707da
 
6dfeac9
 
 
12707da
 
8be977a
12707da
7006185
 
8be977a
 
7006185
8be977a
 
 
7006185
6dfeac9
8be977a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12707da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
080136c
12707da
043d0a9
080136c
043d0a9
 
12707da
 
8be977a
 
 
 
12707da
 
 
 
8be977a
 
 
12707da
 
 
 
 
6dfeac9
12707da
6dfeac9
043d0a9
 
 
 
6dfeac9
 
 
 
 
 
 
deff70e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
import tempfile
import os
import shutil
import librosa
import json
import subprocess
import gc
import requests
import time
import random
import re
from googletrans import Translator
import asyncio
from flask import Flask, request, jsonify, send_from_directory
from omegaconf import OmegaConf
import torch
from diffusers import AutoencoderKL, DDIMScheduler
from latentsync.models.unet import UNet3DConditionModel
from latentsync.pipelines.lipsync_pipeline import LipsyncPipeline
from diffusers.utils.import_utils import is_xformers_available
from accelerate.utils import set_seed
from latentsync.whisper.audio2feature import Audio2Feature
from openai import OpenAI
from elevenlabs import set_api_key, generate, play, clone, Voice, VoiceSettings
from torch.cuda.amp import autocast
 
# Initialize the Flask app
app = Flask(__name__)
TEMP_DIR = None
VIDEO_DIRECTORY = os.path.abspath("videos")
os.makedirs(VIDEO_DIRECTORY, exist_ok=True)

# HeyGen API Configuration
HEYGEN_API_KEY = "NGM2N2VjNmM4NWM0NGQxMjkyNWFiMjg4OTdlMTI2MDItMTcyNDQ5ODM1MA=="
HEYGEN_GENERATE_URL = "https://api.heygen.com/v2/video/generate"
HEYGEN_STATUS_URL = "https://api.heygen.com/v1/video_status.get"

# Initialize OpenAI client
client = OpenAI(api_key="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")

def clear_cuda_memory():
    torch.cuda.empty_cache()
    gc.collect()

def openai_chat_avatar(text_prompt):
    """Summarize text using OpenAI GPT-4o-mini"""
    response = client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
            {"role": "system", "content": "Summarize the following paragraph into a complete and accurate single sentence with no more than 30 words. The summary should capture the gist of the paragraph and make sense and remove the citation and document name from the end."},
            {"role": "user", "content": f"Please summarize the following paragraph into one sentence with 30 words or fewer, ensuring it makes sense and captures the gist and remove the citation from the end: {text_prompt}"},
        ],
        max_tokens = len(text_prompt),
    )
    return response

def ryzedb_chat_avatar(question, app_id):
    """Query RyzeDB API for response"""
    url = "https://inference.dev.ryzeai.ai/v2/chat/stream"
    print("ryze db question", question)
    
    payload = {
        "input": {
            "app_id": app_id,
            "query": question,
            "chat_history": []
        },
        "config": {
            "thread_id": "123456"
        }
    }
    
    headers = {
        'Content-Type': 'application/json'
    }
    
    response = requests.post(url, json=payload, headers=headers, stream=True)
    
    try:
        raw_text = response.text.strip()
    
        if raw_text.startswith("data:"):
            raw_text = raw_text[len("data:"):].strip()
    
        json_data = json.loads(raw_text)
    
        response_content = json_data.get("content", "")
        return response_content

    except Exception as e:
        print("Error parsing response:", e)
        return ""

def run_inference(video_path, audio_path, video_out_path,
                  inference_ckpt_path, unet_config_path="configs/unet/second_stage.yaml",
                  inference_steps=20, guidance_scale=1.0, seed=1247):
    clear_cuda_memory()
    
    # Load configuration
    config = OmegaConf.load(unet_config_path)
 
    # Determine proper dtype based on GPU capabilities
    is_fp16_supported = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] > 7
    dtype = torch.float16 if is_fp16_supported else torch.float32
 
    # Setup scheduler
    scheduler = DDIMScheduler.from_pretrained("configs")
 
    # Choose whisper model based on config settings
    if config.model.cross_attention_dim == 768:
        whisper_model_path = "checkpoints/whisper/small.pt"
    elif config.model.cross_attention_dim == 384:
        whisper_model_path = "checkpoints/whisper/tiny.pt"
    else:
        raise NotImplementedError("cross_attention_dim must be 768 or 384")
 
    # Initialize the audio encoder
    audio_encoder = Audio2Feature(model_path=whisper_model_path,
                                  device="cuda", num_frames=config.data.num_frames)
 
    # Load VAE
    vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=dtype)
    vae.config.scaling_factor = 0.18215
    vae.config.shift_factor = 0
 
    # Load UNet model from the checkpoint
    unet, _ = UNet3DConditionModel.from_pretrained(
        OmegaConf.to_container(config.model),
        inference_ckpt_path,  # load checkpoint
        device="cpu",
    )
    unet = unet.to(dtype=dtype)
 
    # Optionally enable memory-efficient attention if available
    if is_xformers_available():
        unet.enable_xformers_memory_efficient_attention()
 
    # Initialize the pipeline and move to GPU
    pipeline = LipsyncPipeline(
        vae=vae,
        audio_encoder=audio_encoder,
        unet=unet,
        scheduler=scheduler,
    ).to("cuda")
 
    # Set seed
    if seed != -1:
        set_seed(seed)
    else:
        torch.seed()
 
    with autocast():
        try:
            pipeline(
                video_path=video_path,
                audio_path=audio_path,
                video_out_path=video_out_path,
                video_mask_path=video_out_path.replace(".mp4", "_mask.mp4"),
                num_frames=config.data.num_frames,
                num_inference_steps=inference_steps,
                guidance_scale=guidance_scale,
                weight_dtype=dtype,
                width=config.data.resolution,
                height=config.data.resolution,
            )
        finally:
            clear_cuda_memory()

def create_temp_dir():
    return tempfile.TemporaryDirectory()

def generate_audio(voice_cloning, text_prompt):
    if voice_cloning == 'yes':
        print('Entering Custom Audio creation using elevenlabs')
        set_api_key('92e149985ea2732b4359c74346c3daee')
        voice = Voice(voice_id="wu4wgNArkao4Vy9SnHzL",name="alex costa cacau",settings=VoiceSettings(
                        stability=0.71, similarity_boost=0.9, style=0.0, use_speaker_boost=True),)

        audio = generate(text = text_prompt, voice = voice, model = "eleven_multilingual_v2",stream=True, latency=4)
        with tempfile.NamedTemporaryFile(suffix=".mp3", prefix="cloned_audio_",dir=TEMP_DIR.name, delete=False) as temp_file:
            for chunk in audio:
                temp_file.write(chunk)
            driven_audio_path = temp_file.name
            print('driven_audio_path',driven_audio_path)

        return driven_audio_path

    elif voice_cloning == 'no':
        voice = 'echo'
        print('Entering Default Audio creation using elevenlabs')
        set_api_key('92e149985ea2732b4359c74346c3daee')
        audio = generate(text = text_prompt, voice = "Daniel", model = "eleven_multilingual_v2",stream=True, latency=4)
        with tempfile.NamedTemporaryFile(suffix=".mp3", prefix="default_audio_",dir=TEMP_DIR.name, delete=False) as temp_file:
            for chunk in audio:
                temp_file.write(chunk)
            driven_audio_path = temp_file.name
            print('driven_audio_path',driven_audio_path)
        return driven_audio_path

def get_video_duration(video_path):
    """Extracts video duration dynamically using ffprobe."""
    cmd = [
        "ffprobe", "-v", "error", "-show_entries", "format=duration", 
        "-of", "json", video_path
    ]
    result = subprocess.run(cmd, capture_output=True, text=True)
    duration = json.loads(result.stdout)["format"]["duration"]
    return float(duration)

def extend_video_simple(video_path, audio_path, output_path):
    """Extends video duration by appending a reversed version if audio is longer."""
    audio_duration = librosa.get_duration(path=audio_path)
    video_duration = get_video_duration(video_path)

    print(f"Video Duration: {video_duration:.2f} sec")
    print(f"Audio Duration: {audio_duration:.2f} sec")

    if audio_duration > video_duration:
        print("Extending video by adding reversed version.")

        # Create a reversed version of the full video
        reversed_clip = tempfile.NamedTemporaryFile(dir=TEMP_DIR.name, delete=False, suffix=".mp4").name
        
        subprocess.run(
            f"ffmpeg -y -i {video_path} -vf reverse -an {reversed_clip}", shell=True
        )

        # Merge original + reversed
        subprocess.run(
            f"ffmpeg -y -i {video_path} -i {reversed_clip} -filter_complex \"[0:v:0][1:v:0]concat=n=2:v=1[outv]\" -map \"[outv]\" -an {output_path}",
            shell=True
        )
    else:
        print("Audio is not longer than video. No extension needed.")
        subprocess.run(f"cp {video_path} {output_path}", shell=True)

def extend_video_loop(video_path, audio_path, output_path):
    """Extends video duration by repeating original and reversed video until it meets/exceeds audio duration."""
    audio_duration = librosa.get_duration(path=audio_path)
    video_duration = get_video_duration(video_path)

    print(f"Video Duration: {video_duration:.2f} sec")
    print(f"Audio Duration: {audio_duration:.2f} sec")

    if audio_duration > video_duration:
        print("Extending video by repeating original and reversed versions.")

        # Create reversed video
        reversed_clip = tempfile.NamedTemporaryFile(dir=TEMP_DIR.name, delete=False, suffix=".mp4").name
        subprocess.run(
            f"ffmpeg -y -i {video_path} -vf reverse -an {reversed_clip}", shell=True
        )

        # Generate a list of clips to reach/exceed audio duration
        video_clips = [video_path, reversed_clip]
        total_duration = video_duration * 2  # Original + reversed

        while total_duration < audio_duration:
            video_clips.append(video_path)
            video_clips.append(reversed_clip)
            total_duration += video_duration * 2

        print(f"Total Clips: {len(video_clips)}")

        # Use FFmpeg filter_complex concat for seamless merging
        concat_filter = "".join(f"[{i}:v:0]" for i in range(len(video_clips))) + f"concat=n={len(video_clips)}:v=1[outv]"
        input_files = " ".join(f"-i {clip}" for clip in video_clips)

        subprocess.run(
            f"ffmpeg -y {input_files} -filter_complex \"{concat_filter}\" -map \"[outv]\" -an {output_path}",
            shell=True
        )

        print(f"Extended video saved to {output_path}")

    else:
        print("Audio is not longer than video. No extension needed.")
        subprocess.run(f"cp {video_path} {output_path}", shell=True)

def translate_text(text, target_language):
    if not text or text.strip() == "":
        return ""
    LANGUAGE_CODES = {"english": "en", "hindi": "hi"}   
    try:
        # Convert language name to code
        target_language_code = LANGUAGE_CODES.get(target_language.lower())
        
        # Use Google Translate with proper coroutine handling
        async def perform_translation():
            translator = Translator()
            result = await translator.translate(text, dest=target_language_code)
            return result.text if hasattr(result, 'text') else text
        
        # Run the async function in the event loop
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        result = loop.run_until_complete(perform_translation())
        loop.close()
        
        return result
    except Exception as e:
        print(f"Error translating text: {e}")
        # Return original text if translation fails
        return text

def generate_heygen_video(text_prompt, avatar_id=None, voice_id=None, background_color="#f6f6fc"):
    """Generate video using HeyGen API"""
    print("Generating video using HeyGen API...")
    
    # Default avatar and voice IDs if not provided
    default_avatar_id = "b2fe0a1b3393465db58ec15d92f69ef5"
    default_voice_id = "0503bcc31c3f43d895be188940fae86b"
    
    payload = {
        "caption": True,
        "title": "AI Avatar Video",
        "callback_id": f"avatar_{int(time.time())}",
        "dimension": {
            "width": 1280,
            "height": 720
        },
        "video_inputs": [
            {
                "character": {
                    "type": "avatar",
                    "avatar_id": avatar_id or default_avatar_id,
                    "talking_photo_id": "",
                    "scale": 1,
                    "avatar_style": "normal",
                    "offset": {
                        "x": 0,
                        "y": 0
                    },
                    "matting": True,
                    "circle_background_color": "#ffffff",
                    "talking_photo_style": "",
                    "talking_style": "stable",
                    "expression": "default",
                    "super_resolution": False
                },
                "voice": {
                    "type": "text",
                    "voice_id": voice_id or default_voice_id,
                    "input_text": text_prompt,
                    "speed": 1,
                    "pitch": 0,
                    "emotion": "Excited",
                    "locale": "en-US"
                },
                "background": {
                    "type": "color",
                    "value": background_color
                }
            }
        ]
    }

    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "x-api-key": HEYGEN_API_KEY
    }

    # Step 1: Send the video generation request
    response = requests.post(HEYGEN_GENERATE_URL, json=payload, headers=headers)
    
    try:
        response_data = response.json()
    except ValueError:
        raise Exception("Invalid response from HeyGen API")

    # Extract video_id from the response
    video_id = None
    if "data" in response_data:
        video_id = response_data["data"].get("video_id")
    else:
        video_id = response_data.get("video_id")

    if not video_id:
        raise Exception(f"No video_id returned from HeyGen API. Response: {response_data}")

    print(f"HeyGen video generation started with video_id: {video_id}")

    # Step 2: Poll for video status
    return poll_heygen_video_status(video_id, headers)

def poll_heygen_video_status(video_id, headers, poll_interval=10, max_attempts=60):
    """Poll HeyGen API for video completion status"""
    attempts = 0
    
    while attempts < max_attempts:
        try:
            params = {"video_id": video_id}
            status_response = requests.get(HEYGEN_STATUS_URL, headers=headers, params=params)
            
            if status_response.status_code == 200:
                status_data = status_response.json()
                status = status_data.get("data", {}).get("status")
                print(f"HeyGen polling attempt {attempts+1}: Video status is '{status}'")
                
                if status == "completed":
                    video_url = status_data.get("data", {}).get("video_url")
                    if video_url:
                        return download_heygen_video(video_url)
                    else:
                        raise Exception("Video completed but no video URL found")
                elif status == "failed":
                    error_msg = status_data.get("data", {}).get("error", "Unknown error")
                    raise Exception(f"HeyGen video generation failed: {error_msg}")
            else:
                print(f"Error polling HeyGen status: {status_response.status_code} - {status_response.text}")
                break
                
        except Exception as e:
            print(f"Exception during HeyGen polling: {e}")
            break
        
        attempts += 1
        time.sleep(poll_interval)
    
    raise Exception("HeyGen video generation timed out")

def download_heygen_video(video_url):
    """Download the completed video from HeyGen"""
    print(f"Downloading video from: {video_url}")
    
    try:
        response = requests.get(video_url, stream=True)
        response.raise_for_status()
        
        # Create a temporary file to store the downloaded video
        temp_video = tempfile.NamedTemporaryFile(dir=TEMP_DIR.name, delete=False, suffix="_heygen.mp4")
        
        with open(temp_video.name, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        
        print(f"HeyGen video downloaded to: {temp_video.name}")
        return temp_video.name
        
    except Exception as e:
        raise Exception(f"Failed to download HeyGen video: {e}")

@app.route('/run', methods=['POST'])
def generate_video():
    global TEMP_DIR
    TEMP_DIR = create_temp_dir()
    start_time = time.time()

    # Get form parameters
    text_prompt = request.form.get('text_prompt', '').strip()
    if not text_prompt:
        return jsonify({'error': 'Input text prompt cannot be blank'}), 400
    
    print('Input text prompt:', text_prompt)
    
    # Get processing parameters
    use_heygen = request.form.get('use_heygen', 'no').lower() == 'yes'
    voice_cloning = request.form.get('voice_cloning', 'no')
    target_language = request.form.get('target_language', 'original_text')
    chat_model_used = request.form.get('chat_model_used', 'ryzedb')
    app_id = request.form.get('app_id', '')
    
    # Validate app_id if using RyzeDB
    if chat_model_used == 'ryzedb' and not app_id:
        return jsonify({'error': 'App ID cannot be blank when using RyzeDB'}), 400

    try:
        # Process text prompt based on chat model selection
        if chat_model_used == 'ryzedb':
            start_time_ryze = time.time()
            print("Processing text with RyzeDB...")
            
            # Get response from RyzeDB
            ryze_response = ryzedb_chat_avatar(text_prompt, app_id)
            print("Response from RyzeDB inference:", ryze_response)
            
            # Clean up response if needed
            if "No information available" in ryze_response:
                ryze_response = re.sub(r'\\+', '', ryze_response)
            
            # Summarize with OpenAI
            openai_response = openai_chat_avatar(ryze_response)
            text_prompt = openai_response.choices[0].message.content.strip()
            
            end_time_ryze = time.time()
            ryze_processing_time = end_time_ryze - start_time_ryze
            print(f'Final processed text prompt using RyzeDB + OpenAI: {text_prompt}')
            print(f'Time to process with RyzeDB + OpenAI: {ryze_processing_time:.2f} seconds')
            
        elif chat_model_used == 'self':
            print("Using original text prompt without processing...")
            text_prompt = text_prompt.strip()
        else:
            print("Unknown chat model specified, using original text...")
            text_prompt = text_prompt.strip()
    
        # Translate text if needed
        if target_language != 'original_text':
            translated_text = translate_text(text_prompt, target_language)
            text_prompt = translated_text.strip()
            print('Translated input text prompt:', text_prompt)

        if use_heygen:
            print("Using HeyGen API for video generation...")
            
            # Get HeyGen-specific parameters
            avatar_id = request.form.get('heygen_avatar_id')
            voice_id = request.form.get('heygen_voice_id')
            background_color = request.form.get('background_color', '#f6f6fc')
            
            # Generate video using HeyGen
            final_output_video = generate_heygen_video(
                text_prompt=text_prompt,
                avatar_id=avatar_id,
                voice_id=voice_id,
                background_color=background_color
            )
            
        else:
            print("Using local AI avatar for video generation...")
            
            # Check if video file is provided for local processing
            if 'video' not in request.files:
                return jsonify({'error': 'Video file is required for local AI avatar processing.'}), 400
            
            video_file = request.files['video']
            
            # Generate audio using ElevenLabs
            temp_audio_path = generate_audio(voice_cloning, text_prompt)
            
            # Save uploaded video to temporary file
            with tempfile.NamedTemporaryFile(suffix=".mp4", prefix="input_", dir=TEMP_DIR.name, delete=False) as temp_file:
                temp_video_path = temp_file.name
                video_file.save(temp_video_path)
                print('temp_video_path:', temp_video_path)

            # Get inference parameters
            inference_ckpt_path = request.form.get('inference_ckpt_path', 'checkpoints/latentsync_unet.pt')
            unet_config_path = request.form.get('unet_config_path', 'configs/unet/second_stage.yaml')

            # Extend video to match audio duration
            output_video = tempfile.NamedTemporaryFile(dir=TEMP_DIR.name, delete=False, suffix=".mp4").name
            extend_video_loop(temp_video_path, temp_audio_path, output_video)
            
            # Generate final lip-sync video
            final_output_video = tempfile.NamedTemporaryFile(dir=TEMP_DIR.name, delete=False, suffix="_final_extended.mp4").name
            
            run_inference(
                video_path=output_video,
                audio_path=temp_audio_path,
                video_out_path=final_output_video,
                inference_ckpt_path=inference_ckpt_path,
                unet_config_path=unet_config_path,
                inference_steps=int(request.form.get('inference_steps', 20)),
                guidance_scale=float(request.form.get('guidance_scale', 1.0)),
                seed=int(request.form.get('seed', 1247))
            )

        # Save the final video to the videos directory
        if final_output_video and final_output_video.endswith('.mp4'):
            filename = f"avatar_video_{int(time.time())}.mp4"
            destination_path = os.path.join(VIDEO_DIRECTORY, filename)
            shutil.copy(final_output_video, destination_path)
            video_url = f"/videos/{filename}"
            
            processing_method = "HeyGen API" if use_heygen else "Local AI Avatar"
            
            # Calculate total processing time
            end_time = time.time()
            total_time = end_time - start_time
            
            return jsonify({
                "message": f"Video processed successfully using {processing_method}.",
                "output_video": video_url,
                "processing_method": processing_method,
                "text_prompt": text_prompt,
                "chat_model_used": chat_model_used,
                "time_taken": round(total_time, 2),
                "status": "success"
            }), 200
        else:
            return jsonify({'error': 'Failed to generate video'}), 500
            
    except Exception as e:
        print(f"Error generating video: {e}")
        return jsonify({'error': str(e)}), 500

@app.route("/videos/<string:filename>", methods=['GET'])
def serve_video(filename):
    return send_from_directory(VIDEO_DIRECTORY, filename, as_attachment=False)
 
@app.route("/health", methods=["GET"])
def health_status():
    response = {"online": "true"}
    return jsonify(response)

if __name__ == '__main__':
    app.run(debug=True)