Spaces:
Sleeping
Sleeping
Create app/app.py
Browse files- app/app.py +53 -0
app/app.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This Gradio app allows users to interact with a chatbot that can generate text and images based on user prompts.
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
from transformers_js import pipeline # Corrected import to use transformers_js instead of transformers_js_py
|
5 |
+
|
6 |
+
# Define the available models
|
7 |
+
AVAILABLE_MODELS = {
|
8 |
+
"GPT-2": "gpt2",
|
9 |
+
"DALL-E": "dalle-mini/dalle-mini-1.3B"
|
10 |
+
}
|
11 |
+
|
12 |
+
# Initialize the text generation pipeline
|
13 |
+
text_generator = pipeline("text-generation", model=AVAILABLE_MODELS["GPT-2"])
|
14 |
+
|
15 |
+
# Initialize the image generation pipeline
|
16 |
+
image_generator = pipeline("image-generation", model=AVAILABLE_MODELS["DALL-E"])
|
17 |
+
|
18 |
+
# Function to generate text
|
19 |
+
def generate_text(prompt, model):
|
20 |
+
np.random.seed(42) # Set a seed for reproducibility
|
21 |
+
if model == "GPT-2":
|
22 |
+
return text_generator(prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
|
23 |
+
else:
|
24 |
+
return "Model not supported for text generation"
|
25 |
+
|
26 |
+
# Function to generate images
|
27 |
+
def generate_image(prompt, model):
|
28 |
+
if model == "DALL-E":
|
29 |
+
image = image_generator(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
30 |
+
return image
|
31 |
+
else:
|
32 |
+
return "Model not supported for image generation"
|
33 |
+
|
34 |
+
# Create the Gradio interface
|
35 |
+
with gr.Blocks() as demo:
|
36 |
+
gr.Markdown("# Chatbot with Text and Image Generation")
|
37 |
+
|
38 |
+
with gr.Tab("Text Generation"):
|
39 |
+
text_prompt = gr.Textbox(label="Enter your text prompt")
|
40 |
+
text_model = gr.Radio(choices=list(AVAILABLE_MODELS.keys()), label="Choose a model", value="GPT-2")
|
41 |
+
text_output = gr.Textbox(label="Generated Text")
|
42 |
+
text_button = gr.Button("Generate Text")
|
43 |
+
text_button.click(generate_text, inputs=[text_prompt, text_model], outputs=text_output)
|
44 |
+
|
45 |
+
with gr.Tab("Image Generation"):
|
46 |
+
image_prompt = gr.Textbox(label="Enter your image prompt")
|
47 |
+
image_model = gr.Radio(choices=list(AVAILABLE_MODELS.keys()), label="Choose a model", value="DALL-E")
|
48 |
+
image_output = gr.Image(label="Generated Image")
|
49 |
+
image_button = gr.Button("Generate Image")
|
50 |
+
image_button.click(generate_image, inputs=[image_prompt, image_model], outputs=image_output)
|
51 |
+
|
52 |
+
# Launch the interface
|
53 |
+
demo.launch(show_error=True)
|