24Sureshkumar commited on
Commit
240a8d3
Β·
verified Β·
1 Parent(s): 2ee977a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -18,12 +18,20 @@ translator_tokenizer = MarianTokenizer.from_pretrained(translator_model)
18
  generator_model = "EleutherAI/gpt-neo-1.3B"
19
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
20
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
 
 
21
  if generator_tokenizer.pad_token is None:
22
  generator_tokenizer.pad_token = generator_tokenizer.eos_token
23
 
24
- # Hugging Face API for Image Generation
25
  HF_API_KEY = os.getenv("HF_API_KEY") # Use environment variable
26
- IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
 
 
 
 
 
 
27
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
28
 
29
  def translate_tamil_to_english(text):
@@ -41,15 +49,22 @@ def generate_text(prompt):
41
  def generate_image(prompt):
42
  """Sends request to API for image generation."""
43
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
 
 
 
 
44
  if response.status_code == 200:
 
45
  return Image.open(io.BytesIO(response.content))
 
 
46
  return Image.new("RGB", (300, 300), "red") # Error placeholder image
47
 
48
  def process_input(tamil_text):
49
  """Complete pipeline: Translation, Text Generation, and Image Generation."""
50
  english_text = translate_tamil_to_english(tamil_text)
51
  creative_text = generate_text(english_text)
52
- image = generate_image(english_text)
53
  return english_text, creative_text, image
54
 
55
  # Create Gradio Interface
@@ -67,3 +82,4 @@ interface = gr.Interface(
67
 
68
  # Launch the Gradio app
69
  interface.launch()
 
 
18
  generator_model = "EleutherAI/gpt-neo-1.3B"
19
  generator = AutoModelForCausalLM.from_pretrained(generator_model).to(device)
20
  generator_tokenizer = AutoTokenizer.from_pretrained(generator_model)
21
+
22
+ # Ensure tokenizer has a padding token
23
  if generator_tokenizer.pad_token is None:
24
  generator_tokenizer.pad_token = generator_tokenizer.eos_token
25
 
26
+ # Set Hugging Face API Key
27
  HF_API_KEY = os.getenv("HF_API_KEY") # Use environment variable
28
+ if not HF_API_KEY:
29
+ print("⚠️ Hugging Face API key is missing! Set HF_API_KEY in your environment.")
30
+ else:
31
+ print("βœ… Hugging Face API key detected.")
32
+
33
+ # Use Stable Diffusion Model for Image Generation
34
+ IMAGE_GEN_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2"
35
  HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
36
 
37
  def translate_tamil_to_english(text):
 
49
  def generate_image(prompt):
50
  """Sends request to API for image generation."""
51
  response = requests.post(IMAGE_GEN_URL, headers=HEADERS, json={"inputs": prompt})
52
+
53
+ print("πŸ”„ Image Generation Request Sent!")
54
+ print(f"Status Code: {response.status_code}")
55
+
56
  if response.status_code == 200:
57
+ print("βœ… Image successfully generated!")
58
  return Image.open(io.BytesIO(response.content))
59
+
60
+ print("❌ Error in image generation:", response.text)
61
  return Image.new("RGB", (300, 300), "red") # Error placeholder image
62
 
63
  def process_input(tamil_text):
64
  """Complete pipeline: Translation, Text Generation, and Image Generation."""
65
  english_text = translate_tamil_to_english(tamil_text)
66
  creative_text = generate_text(english_text)
67
+ image = generate_image(creative_text)
68
  return english_text, creative_text, image
69
 
70
  # Create Gradio Interface
 
82
 
83
  # Launch the Gradio app
84
  interface.launch()
85
+