Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,14 @@ import gradio as gr
|
|
5 |
from dia.model import Dia
|
6 |
from huggingface_hub import InferenceClient
|
7 |
import numpy as np
|
|
|
8 |
|
9 |
# Hardcoded podcast subject
|
10 |
PODCAST_SUBJECT = "The future of AI and its impact on society"
|
11 |
|
12 |
# Initialize the inference client
|
13 |
client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", provider="cerebras", token=os.getenv("HF_TOKEN"))
|
14 |
-
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16"
|
15 |
|
16 |
# Queue for audio streaming
|
17 |
audio_queue = queue.Queue()
|
@@ -49,7 +50,7 @@ def process_audio_chunks(podcast_text):
|
|
49 |
for chunk in chunks:
|
50 |
if stop_signal.is_set():
|
51 |
break
|
52 |
-
|
53 |
raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False)
|
54 |
audio_chunk = np.array(raw_audio, dtype=np.float32)
|
55 |
audio_queue.put((sample_rate, audio_chunk))
|
|
|
5 |
from dia.model import Dia
|
6 |
from huggingface_hub import InferenceClient
|
7 |
import numpy as np
|
8 |
+
from transformers import set_seed
|
9 |
|
10 |
# Hardcoded podcast subject
|
11 |
PODCAST_SUBJECT = "The future of AI and its impact on society"
|
12 |
|
13 |
# Initialize the inference client
|
14 |
client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", provider="cerebras", token=os.getenv("HF_TOKEN"))
|
15 |
+
model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
|
16 |
|
17 |
# Queue for audio streaming
|
18 |
audio_queue = queue.Queue()
|
|
|
50 |
for chunk in chunks:
|
51 |
if stop_signal.is_set():
|
52 |
break
|
53 |
+
set_seed(42)
|
54 |
raw_audio = model.generate(chunk, use_torch_compile=True, verbose=False)
|
55 |
audio_chunk = np.array(raw_audio, dtype=np.float32)
|
56 |
audio_queue.put((sample_rate, audio_chunk))
|