Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from gradio_client.utils import encode_url_or_file_to_base64 | |
| from huggingface_hub import InferenceClient | |
| def chat_1(history, prompt, image, oauth_token: gr.OAuthToken | None, request: gr.Request): | |
| if oauth_token is None: | |
| raise ValueError("Please log in to use this Space.") | |
| if history is None: | |
| history = [] | |
| client = InferenceClient( | |
| provider="nebius", | |
| api_key=oauth_token.token, | |
| ) | |
| user_messages = [{"role": "user", "content": prompt}] | |
| content = [ | |
| { | |
| "type": "text", | |
| "text": prompt | |
| } | |
| ] | |
| if image is not None: | |
| file_url = "https://abidlabs-smol-arena.hf.space/gradio_api/file=" + image | |
| print(">>>>>>>>>>>>>", file_url) | |
| content.append({ | |
| "type": "image_url", | |
| "image_url": { | |
| "url": file_url | |
| } | |
| }) | |
| yield history + user_messages | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": content | |
| } | |
| ] | |
| stream = client.chat.completions.create( | |
| model="google/gemma-3-27b-it", | |
| messages=messages, | |
| max_tokens=500, | |
| stream=True, | |
| ) | |
| response = "" | |
| for chunk in stream: | |
| if chunk.choices: | |
| response += chunk.choices[0].delta.content | |
| yield history + user_messages + [{"role": "assistant", "content": response}] | |
| def chat_2(history, prompt, image, oauth_token: gr.OAuthToken | None, request: gr.Request): | |
| if oauth_token is None: | |
| raise ValueError("Please log in to use this Space.") | |
| if history is None: | |
| history = [] | |
| client = InferenceClient( | |
| provider="together", | |
| api_key=oauth_token.token, | |
| ) | |
| user_messages = [{"role": "user", "content": prompt}] | |
| content = [ | |
| { | |
| "type": "text", | |
| "text": prompt | |
| } | |
| ] | |
| if image is not None: | |
| file_url = "https://abidlabs-smol-arena.hf.space/gradio_api/file=" + image | |
| print(">>>>>>>>>>>>>", file_url) | |
| content.append({ | |
| "type": "image_url", | |
| "image_url": { | |
| "url": file_url | |
| } | |
| }) | |
| yield history + user_messages | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": content | |
| } | |
| ] | |
| stream = client.chat.completions.create( | |
| model="meta-llama/Llama-4-Scout-17B-16E-Instruct", | |
| messages=messages, | |
| max_tokens=500, | |
| stream=True, | |
| ) | |
| response = "" | |
| for chunk in stream: | |
| if chunk.choices: | |
| response += chunk.choices[0].delta.content | |
| yield history + user_messages + [{"role": "assistant", "content": response}] | |
| def chat_labels(models): | |
| names = [] | |
| models = models or [] | |
| if len(models) > 0: | |
| names.append(models[0]) | |
| else: | |
| names.append("Chatbot 1") | |
| if len(models) > 1: | |
| names.append(models[1]) | |
| else: | |
| names.append("Chatbot 2") | |
| return gr.Chatbot(label=names[0], type="messages"), gr.Chatbot(label=names[1], type="messages") | |
| with gr.Blocks() as demo: | |
| with gr.Sidebar(): | |
| gr.Markdown("## Smol Arena") | |
| gr.Markdown("Welcome to Smol Arena! This is a Space that allows you to test LLMs / VLMs that have less than 30B active parameters. \n\nInference is provided by [Hugging Face Inference API](https://huggingface.co/inference-api) so please log in to use this Space.") | |
| gr.LoginButton() | |
| dropdown = gr.Dropdown(multiselect=True, choices=["Gemma 3 (27b)", "Llama 4 (scout)"], value=["Gemma 3 (27b)", "Llama 4 (scout)"], max_choices=2, label="Select 2 models to compare.", interactive=False) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("Optional image to ask the model about") | |
| image = gr.Image(type="filepath", label="Optional Image") | |
| chatbot_1 = gr.Chatbot(type="messages") | |
| chatbot_2 = gr.Chatbot(type="messages") | |
| textbox = gr.Textbox(label="Prompt") | |
| gr.on( | |
| [dropdown.change, demo.load], | |
| fn=chat_labels, | |
| inputs=[dropdown], | |
| outputs=[chatbot_1, chatbot_2], | |
| ) | |
| textbox.submit(fn=chat_1, inputs=[chatbot_1, textbox, image], outputs=[chatbot_1]) | |
| textbox.submit(fn=chat_2, inputs=[chatbot_2, textbox, image], outputs=[chatbot_2]) | |
| textbox.submit(lambda: "", inputs=[], outputs=[textbox]) | |
| demo.launch() |