Spaces:
Runtime error
Runtime error
import gradio as gr | |
import requests | |
import json | |
import os | |
import time | |
import threading | |
import logging | |
from typing import List, Dict, Any, Optional | |
from datetime import datetime | |
import asyncio | |
import subprocess | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(**name**) | |
class OllamaManager: | |
def **init**(self, base_url: str = “http://localhost:11434”): | |
self.base_url = base_url | |
self.available_models = [] | |
self.current_model = None | |
``` | |
def wait_for_ollama(self, timeout: int = 60) -> bool: | |
"""Wait for Ollama service to be ready""" | |
start_time = time.time() | |
while time.time() - start_time < timeout: | |
try: | |
response = requests.get(f"{self.base_url}/api/tags", timeout=5) | |
if response.status_code == 200: | |
logger.info("Ollama service is ready") | |
return True | |
except requests.RequestException: | |
time.sleep(2) | |
return False | |
def list_models(self) -> List[str]: | |
"""Get list of available models""" | |
try: | |
response = requests.get(f"{self.base_url}/api/tags") | |
if response.status_code == 200: | |
data = response.json() | |
self.available_models = [model["name"] for model in data.get("models", [])] | |
return self.available_models | |
return [] | |
except Exception as e: | |
logger.error(f"Error listing models: {e}") | |
return [] | |
def pull_model(self, model_name: str) -> bool: | |
"""Pull a model from Ollama registry""" | |
try: | |
logger.info(f"Pulling model: {model_name}") | |
response = requests.post( | |
f"{self.base_url}/api/pull", | |
json={"name": model_name}, | |
stream=True | |
) | |
for line in response.iter_lines(): | |
if line: | |
data = json.loads(line.decode('utf-8')) | |
if data.get("status") == "success": | |
logger.info(f"Successfully pulled model: {model_name}") | |
return True | |
elif "error" in data: | |
logger.error(f"Error pulling model: {data['error']}") | |
return False | |
return True | |
except Exception as e: | |
logger.error(f"Error pulling model {model_name}: {e}") | |
return False | |
def chat_with_model(self, model_name: str, messages: List[Dict], temperature: float = 0.7) -> str: | |
"""Chat with an Ollama model""" | |
try: | |
# Convert messages to Ollama format | |
prompt = self._format_messages(messages) | |
response = requests.post( | |
f"{self.base_url}/api/generate", | |
json={ | |
"model": model_name, | |
"prompt": prompt, | |
"temperature": temperature, | |
"stream": False | |
}, | |
timeout=120 | |
) | |
if response.status_code == 200: | |
data = response.json() | |
return data.get("response", "No response received") | |
else: | |
return f"Error: HTTP {response.status_code}" | |
except Exception as e: | |
logger.error(f"Error chatting with model: {e}") | |
return f"Error: {str(e)}" | |
def _format_messages(self, messages: List[Dict]) -> str: | |
"""Format conversation messages for Ollama""" | |
formatted = "" | |
for msg in messages: | |
role = msg.get("role", "user") | |
content = msg.get("content", "") | |
if role == "user": | |
formatted += f"User: {content}\n" | |
elif role == "assistant": | |
formatted += f"Assistant: {content}\n" | |
formatted += "Assistant: " | |
return formatted | |
``` | |
class AIAssistant: | |
def **init**(self): | |
self.ollama = OllamaManager() | |
self.conversation_history = [] | |
self.current_model = “llama3.1:8b” # Default model | |
``` | |
# Wait for Ollama and setup models | |
self._initialize_models() | |
def _initialize_models(self): | |
"""Initialize Ollama and pull default models""" | |
if self.ollama.wait_for_ollama(): | |
# Try to pull some popular models | |
models_to_pull = [ | |
"llama3.1:8b", | |
"codellama:7b", | |
"mistral:7b" | |
] | |
for model in models_to_pull: | |
if self.ollama.pull_model(model): | |
if not self.current_model or model == "llama3.1:8b": | |
self.current_model = model | |
break | |
def get_available_models(self): | |
"""Get list of available models""" | |
return self.ollama.list_models() | |
def chat(self, message: str, history: List, model: str = None, temperature: float = 0.7): | |
"""Main chat function""" | |
if not message.strip(): | |
return history, "" | |
model = model or self.current_model | |
if not model: | |
return history + [[message, "No model available. Please wait for model to load."]], "" | |
# Add user message to history | |
history.append([message, ""]) | |
# Prepare conversation context | |
context_messages = [] | |
for h in history[-10:]: # Last 10 exchanges | |
if h[0]: # User message | |
context_messages.append({"role": "user", "content": h[0]}) | |
if h[1]: # Assistant message | |
context_messages.append({"role": "assistant", "content": h[1]}) | |
# Get AI response | |
try: | |
response = self.ollama.chat_with_model(model, context_messages, temperature) | |
history[-1][1] = response | |
except Exception as e: | |
history[-1][1] = f"Error: {str(e)}" | |
return history, "" | |
def clear_chat(self): | |
"""Clear conversation history""" | |
self.conversation_history = [] | |
return [] | |
def get_model_info(self, model_name: str): | |
"""Get information about a model""" | |
try: | |
response = requests.post( | |
f"{self.ollama.base_url}/api/show", | |
json={"name": model_name} | |
) | |
if response.status_code == 200: | |
return response.json() | |
return {"error": "Model not found"} | |
except Exception as e: | |
return {"error": str(e)} | |
``` | |
# Initialize the AI assistant | |
assistant = AIAssistant() | |
def create_interface(): | |
“”“Create the Gradio interface””” | |
``` | |
with gr.Blocks(title="X - AI Assistant", theme=gr.themes.Soft()) as app: | |
gr.Markdown(""" | |
# 🤖 X - AI Assistant Space | |
Welcome to the X AI Assistant! This space provides access to various AI models through Ollama. | |
**Features:** | |
- Chat with different AI models | |
- Adjustable temperature settings | |
- Model management | |
- Conversation history | |
""") | |
with gr.Tab("💬 Chat"): | |
with gr.Row(): | |
with gr.Column(scale=3): | |
chatbot = gr.Chatbot( | |
height=500, | |
show_label=False, | |
container=True, | |
bubble_full_width=False | |
) | |
with gr.Row(): | |
msg = gr.Textbox( | |
placeholder="Type your message here...", | |
show_label=False, | |
container=False, | |
scale=4 | |
) | |
send_btn = gr.Button("Send", variant="primary", scale=1) | |
with gr.Row(): | |
clear_btn = gr.Button("Clear Chat", variant="secondary") | |
with gr.Column(scale=1): | |
gr.Markdown("### Settings") | |
model_dropdown = gr.Dropdown( | |
choices=assistant.get_available_models(), | |
value=assistant.current_model, | |
label="Model", | |
interactive=True | |
) | |
temperature = gr.Slider( | |
minimum=0.1, | |
maximum=2.0, | |
value=0.7, | |
step=0.1, | |
label="Temperature" | |
) | |
refresh_models_btn = gr.Button("Refresh Models") | |
with gr.Tab("🔧 Model Management"): | |
with gr.Column(): | |
gr.Markdown("### Available Models") | |
model_list = gr.DataFrame( | |
headers=["Model Name", "Status"], | |
wrap=True | |
) | |
with gr.Row(): | |
pull_model_input = gr.Textbox( | |
placeholder="Enter model name to pull (e.g., llama3.1:8b)", | |
label="Pull New Model" | |
) | |
pull_btn = gr.Button("Pull Model", variant="primary") | |
pull_status = gr.Textbox(label="Status", interactive=False) | |
with gr.Tab("ℹ️ Info"): | |
gr.Markdown(""" | |
### About This Space | |
This Hugging Face Space runs Ollama with various AI models. You can: | |
1. **Chat** with AI models in real-time | |
2. **Adjust settings** like temperature for different response styles | |
3. **Manage models** by pulling new ones or viewing available models | |
4. **Switch between models** for different capabilities | |
### Popular Models to Try: | |
- `llama3.1:8b` - General purpose, good balance of speed and quality | |
- `codellama:7b` - Specialized for coding tasks | |
- `mistral:7b` - Fast and efficient | |
- `deepseek-coder:6.7b` - Advanced coding capabilities | |
### Built for: https://huggingface.co/spaces/likhonsheikh/X | |
""") | |
# Event handlers | |
def submit_message(message, history, model, temp): | |
return assistant.chat(message, history, model, temp) | |
def refresh_models(): | |
models = assistant.get_available_models() | |
return gr.Dropdown(choices=models) | |
def pull_new_model(model_name): | |
if not model_name.strip(): | |
return "Please enter a model name" | |
if assistant.ollama.pull_model(model_name): | |
return f"Successfully pulled model: {model_name}" | |
else: | |
return f"Failed to pull model: {model_name}" | |
# Connect events | |
msg.submit( | |
submit_message, | |
inputs=[msg, chatbot, model_dropdown, temperature], | |
outputs=[chatbot, msg] | |
) | |
send_btn.click( | |
submit_message, | |
inputs=[msg, chatbot, model_dropdown, temperature], | |
outputs=[chatbot, msg] | |
) | |
clear_btn.click( | |
assistant.clear_chat, | |
outputs=[chatbot] | |
) | |
refresh_models_btn.click( | |
refresh_models, | |
outputs=[model_dropdown] | |
) | |
pull_btn.click( | |
pull_new_model, | |
inputs=[pull_model_input], | |
outputs=[pull_status] | |
) | |
return app | |
``` | |
if **name** == “**main**”: | |
# Create and launch the app | |
app = create_interface() | |
app.launch( | |
server_name=“0.0.0.0”, | |
server_port=7860, | |
share=False, | |
show_error=True | |
) |