File size: 1,295 Bytes
8e35477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

import os
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from bark import SAMPLE_RATE, generate_audio, preload_models
from scipy.io.wavfile import write as write_wav

# Load local models for LLM and TTS
preload_models()

# Load the Hugging Face model locally
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

def generate_script(system_prompt: str, input_text: str, output_model=None):
    """Generate dialogue script using local GPT-Neo model."""
    try:
        # Generate text using the local Hugging Face model
        response = generator(system_prompt + input_text, max_length=1000, do_sample=True)
        dialogue = response[0]["generated_text"]
        return dialogue
    except Exception as e:
        return f"Error generating script: {str(e)}"

def generate_podcast_audio(text: str, output_file="podcast_audio.wav"):
    """Generate audio from text using Bark (local TTS)."""
    try:
        audio_array = generate_audio(text)
        write_wav(output_file, SAMPLE_RATE, audio_array)
        return output_file
    except Exception as e:
        return f"Error generating audio: {str(e)}"