File size: 2,542 Bytes
c575010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Using a robust and stable model for the free tier
MODEL = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL)

def generate_content(topic, lang_choice, content_type):
    # Language selection based on user input
    if lang_choice == "Arabic":
        if content_type == "YouTube Script":
            prompt = f"اكتب سكربت يوتيوب احترافي باللغة العربية عن: {topic}. اجعله دقيقة واحدة مع مقدمة جذابة ونقاط أساسية وخاتمة."
        elif content_type == "YouTube Title":
            prompt = f"أنشئ عنوان يوتيوب جذاب قصير لا يتجاوز 60 حرفاً عن: {topic}."
        elif content_type == "Description + SEO":
            prompt = f"اكتب وصف يوتيوب مُحسَّن للسيو عن: {topic} مع هاشتاغات مقترحة."
        else: # LinkedIn Post
            prompt = f"اكتب منشور لينكدإن احترافي قصير بالعربية عن: {topic}."
    else: # English
        if content_type == "YouTube Script":
            prompt = f"Write a professional YouTube script in English about: {topic}. Make it one minute long with an engaging intro, key points, and a conclusion."
        elif content_type == "YouTube Title":
            prompt = f"Create a catchy YouTube title in English, under 60 characters, about: {topic}."
        elif content_type == "Description + SEO":
            prompt = f"Write an SEO-friendly YouTube description about: {topic} with suggested hashtags."
        else: # LinkedIn Post
            prompt = f"Write a professional LinkedIn post in English about: {topic}."

    # Using try-except block to handle potential model errors
    try:
        inputs = tokenizer(prompt, return_tensors="pt")
        outs = model.generate(**inputs, max_length=300)
        return tokenizer.decode(outs[0], skip_special_tokens=True)
    except Exception as e:
        return f"حدث خطأ: {str(e)}. حاول مرة أخرى أو غيّر الموضوع."

iface = gr.Interface(
    fn=generate_content,
    inputs=[
        gr.Textbox(label="Topic"),
        gr.Radio(["Arabic", "English"], label="Language"),
        gr.Radio(["YouTube Script", "YouTube Title", "Description + SEO", "LinkedIn Post"], label="Content Type")
    ],
    outputs=gr.Textbox(label="Result")
)

if __name__ == "__main__":
    iface.launch()