m-ric HF Staff commited on
Commit
1a6d10d
·
verified ·
1 Parent(s): 7998fe3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -0
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import queue
2
+ import threading
3
+
4
+ import gradio as gr
5
+ from dia.model import Dia
6
+ from huggingface_hub import InferenceClient
7
+
8
+ # Hardcoded podcast subject
9
+ PODCAST_SUBJECT = "The future of AI and its impact on society"
10
+
11
+ # Initialize the inference client
12
+ client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct", provider="together")
13
+ model = Dia.from_pretrained("nari-labs/Dia-1.6B", compute_dtype="float16")
14
+
15
+ # Queue for audio streaming
16
+ audio_queue = queue.Queue()
17
+ stop_signal = threading.Event()
18
+
19
+
20
+ def generate_podcast_text(subject):
21
+ prompt = f"""Generate a podcast told by 2 hosts about {subject}.
22
+ The podcast should be an insightful discussion, with some amount of playful banter.
23
+ Separate dialog as follows using [S1] for the male host and [S2] for the female host, for instance:
24
+ [S1] Hello, how are you?
25
+ [S2] I'm good, thank you. How are you?
26
+ [S1] I'm good, thank you. (laughs)
27
+ [S2] Great.
28
+ Now go on, make 2 minutes of podcast.
29
+ """
30
+ response = client.chat_completion([{"role": "user", "content": prompt}], max_tokens=1000)
31
+ return response.choices[0].message.content
32
+
33
+
34
+ def split_podcast_into_chunks(podcast_text, chunk_size=10):
35
+ lines = podcast_text.strip().split("\n")
36
+ chunks = []
37
+
38
+ for i in range(0, len(lines), chunk_size):
39
+ chunk = "\n".join(lines[i : i + chunk_size])
40
+ chunks.append(chunk)
41
+
42
+ return chunks
43
+
44
+
45
+ def process_audio_chunks(podcast_text):
46
+ chunks = split_podcast_into_chunks(podcast_text)
47
+
48
+ for chunk in chunks:
49
+ if stop_signal.is_set():
50
+ break
51
+
52
+ audio_chunk = model.generate(chunk, use_torch_compile=True, verbose=False)
53
+ audio_queue.put(audio_chunk)
54
+
55
+ audio_queue.put(None)
56
+
57
+
58
+ def stream_audio_generator(podcast_text):
59
+ """Creates a generator that yields audio chunks for streaming"""
60
+ stop_signal.clear()
61
+
62
+ # Start audio generation in a separate thread
63
+ gen_thread = threading.Thread(target=process_audio_chunks, args=(podcast_text,))
64
+ gen_thread.start()
65
+
66
+ sample_rate = 22050
67
+
68
+ try:
69
+ while True:
70
+ # Get next chunk from queue
71
+ chunk = audio_queue.get()
72
+
73
+ # None signals end of generation
74
+ if chunk is None:
75
+ break
76
+
77
+ # Yield the audio chunk with sample rate
78
+ yield (sample_rate, chunk)
79
+
80
+ except Exception as e:
81
+ print(f"Error in streaming: {e}")
82
+
83
+
84
+ def stop_generation():
85
+ stop_signal.set()
86
+ return "Generation stopped"
87
+
88
+
89
+ def generate_podcast():
90
+ podcast_text = generate_podcast_text(PODCAST_SUBJECT)
91
+ return podcast_text
92
+
93
+
94
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
95
+ gr.Markdown("# NotebookLM Podcast Generator")
96
+
97
+ with gr.Row():
98
+ with gr.Column(scale=2):
99
+ gr.Markdown(f"## Current Topic: {PODCAST_SUBJECT}")
100
+ gr.Markdown("This app generates a podcast discussion between two hosts about the specified topic.")
101
+
102
+ generate_btn = gr.Button("Generate Podcast Script", variant="primary")
103
+ podcast_output = gr.Textbox(label="Generated Podcast Script", lines=15)
104
+
105
+ gr.Markdown("## Audio Preview")
106
+ gr.Markdown("Click below to hear the podcast with realistic voices:")
107
+
108
+ with gr.Row():
109
+ start_audio_btn = gr.Button("▶️ Generate Podcast", variant="secondary")
110
+ stop_btn = gr.Button("⏹️ Stop", variant="stop")
111
+
112
+ audio_output = gr.Audio(label="Podcast Audio", streaming=True)
113
+ status_text = gr.Textbox(label="Status", visible=True)
114
+
115
+ generate_btn.click(fn=generate_podcast, outputs=podcast_output)
116
+
117
+ start_audio_btn.click(fn=stream_audio_generator, inputs=podcast_output, outputs=audio_output)
118
+ stop_btn.click(fn=stop_generation, outputs=status_text)
119
+
120
+ if __name__ == "__main__":
121
+ demo.queue().launch()