AICodepen / app.py
ReallyFloppyPenguin's picture
Update app.py
c8a03d7 verified
import gradio as gr
import requests
import os
import json
from typing import List, Dict, Optional
import time
import subprocess
import tempfile
import sys
import io
import contextlib
class OpenRouterCodingAgent:
def __init__(self):
# Get API key from environment variable or Hugging Face Secrets
self.api_key = os.getenv("OPENROUTER_API_KEY")
self.base_url = "https://openrouter.ai/api/v1/chat/completions"
# Free coding models (good for code generation)
self.free_models = [
"agentica-org/deepcoder-14b-preview:free",
"arliai/qwq-32b-arliai-rpr-v1:free",
"cognitivecomputations/dolphin3.0-mistral-24b:free",
"cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
"deepseek/deepseek-chat-v3-0324:free",
"deepseek/deepseek-chat:free",
"deepseek/deepseek-r1-0528-qwen3-8b:free",
"deepseek/deepseek-r1-0528:free",
"deepseek/deepseek-r1-distill-llama-70b:free",
"deepseek/deepseek-r1-distill-qwen-14b:free",
"deepseek/deepseek-r1:free",
"deepseek/deepseek-v3-base:free",
"featherless/qwerky-72b:free",
"google/gemini-2.0-flash-exp:free",
"google/gemini-2.5-pro-exp-03-25",
"google/gemma-2-9b-it:free",
"google/gemma-3-12b-it:free",
"google/gemma-3-27b-it:free",
"google/gemma-3-4b-it:free",
"google/gemma-3n-e4b-it:free",
"meta-llama/llama-3.1-8b-instruct:free",
"meta-llama/llama-3.2-11b-vision-instruct:free",
"meta-llama/llama-3.2-1b-instruct:free",
"meta-llama/llama-3.3-70b-instruct:free",
"meta-llama/llama-4-maverick:free",
"meta-llama/llama-4-scout:free",
"microsoft/mai-ds-r1:free",
"mistralai/devstral-small:free",
"mistralai/mistral-7b-instruct:free",
"mistralai/mistral-nemo:free",
"mistralai/mistral-small-24b-instruct-2501:free",
"mistralai/mistral-small-3.1-24b-instruct:free",
"mistralai/mistral-small-3.2-24b-instruct:free",
"moonshotai/kimi-dev-72b:free",
"moonshotai/kimi-vl-a3b-thinking:free",
"nousresearch/deephermes-3-llama-3-8b-preview:free",
"nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
"nvidia/llama-3.3-nemotron-super-49b-v1:free",
"qwen/qwen-2.5-72b-instruct:free",
"qwen/qwen-2.5-coder-32b-instruct:free",
"qwen/qwen2.5-vl-32b-instruct:free",
"qwen/qwen2.5-vl-72b-instruct:free",
"qwen/qwen3-14b:free",
"qwen/qwen3-235b-a22b:free",
"qwen/qwen3-30b-a3b:free",
"qwen/qwen3-32b:free",
"qwen/qwen3-8b:free",
"qwen/qwq-32b:free",
"rekaai/reka-flash-3:free",
"sarvamai/sarvam-m:free",
"shisa-ai/shisa-v2-llama3.3-70b:free",
"thudm/glm-4-32b:free",
"thudm/glm-z1-32b:free",
"tngtech/deepseek-r1t-chimera:free",
]
# Set default model (DeepSeek is excellent for coding)
self.default_model = "deepseek/deepseek-r1:free"
print(f"βœ… Coding Agent ready with {len(self.free_models)} free models")
def get_clean_model_choices(self):
"""Get model choices with clean names for display"""
choices = []
for model in self.free_models:
clean_name = model.replace(":free", "")
choices.append((clean_name, model))
return choices
def make_request(self, messages: List[Dict], model: str, temperature: float = 0.7) -> str:
"""Make a request to OpenRouter API"""
if not self.api_key:
return "❌ Error: OpenRouter API key not configured."
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://huggingface.co/spaces",
"X-Title": "Free Coding Agent via OpenRouter"
}
data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": 2000,
"stream": False
}
try:
response = requests.post(self.base_url, headers=headers, json=data, timeout=60)
response.raise_for_status()
result = response.json()
if 'choices' in result and len(result['choices']) > 0:
return result['choices'][0]['message']['content']
else:
return "❌ Error: No response generated"
except Exception as e:
return f"❌ Error: {str(e)}"
# Initialize the coding agent
coding_agent = OpenRouterCodingAgent()
def execute_python_code(code: str) -> str:
"""Execute Python code safely and return output"""
if not code.strip():
return ""
# Create string buffer to capture output
output = io.StringIO()
try:
# Redirect stdout to capture print statements
with contextlib.redirect_stdout(output):
# Execute the code
exec(code)
result = output.getvalue()
return result if result else "βœ… Code executed successfully (no output)"
except Exception as e:
return f"❌ Error: {str(e)}"
def create_web_preview(html: str, css: str, js: str) -> str:
"""Create a complete HTML page with CSS and JS"""
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Live Preview</title>
<style>
body {{
margin: 0;
padding: 20px;
font-family: Arial, sans-serif;
}}
{css}
</style>
</head>
<body>
{html}
<script>
{js}
</script>
</body>
</html>
"""
def generate_code(prompt: str, model: str, code_type: str, current_code: str = "") -> str:
"""Generate code using OpenRouter API"""
system_prompts = {
"html": "You are an expert HTML developer. Generate clean, semantic HTML code based on the user's request. Only return the HTML code, no explanations.",
"css": "You are an expert CSS developer. Generate modern, responsive CSS code based on the user's request. Only return the CSS code, no explanations.",
"javascript": "You are an expert JavaScript developer. Generate clean, modern JavaScript code based on the user's request. Only return the JavaScript code, no explanations.",
"python": "You are an expert Python developer. Generate clean, working Python code based on the user's request. Only return the Python code, no explanations."
}
context = f"\nCurrent {code_type} code:\n```{code_type}\n{current_code}\n```\n" if current_code.strip() else ""
messages = [
{"role": "system", "content": system_prompts.get(code_type, "You are a helpful coding assistant.")},
{"role": "user", "content": f"{prompt}{context}"}
]
response = coding_agent.make_request(messages, model)
# Clean up the response to extract just the code
if "```" in response:
# Extract code from markdown code blocks
parts = response.split("```")
for i, part in enumerate(parts):
if i % 2 == 1: # Odd indices are code blocks
# Remove language identifier if present
lines = part.strip().split('\n')
if lines[0].strip() in ['html', 'css', 'javascript', 'js', 'python', 'py']:
return '\n'.join(lines[1:])
return part.strip()
return response.strip()
def chat_with_agent(message: str, history: List, model: str) -> tuple:
"""Chat with the coding agent"""
if not message.strip():
return history, ""
messages = [
{"role": "system", "content": "You are a helpful coding assistant. Help users with programming questions, debugging, and code explanations."}
]
# Add conversation history - convert from messages format
for msg in history:
if isinstance(msg, dict):
messages.append(msg)
else:
# Handle old tuple format if any
user_msg, assistant_msg = msg
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
# Add current message
messages.append({"role": "user", "content": message})
# Get response
response = coding_agent.make_request(messages, model, temperature=0.7)
# Add to history in messages format
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, ""
# Create the Gradio interface
with gr.Blocks(
title="Free Coding Agent - CodePen Style",
theme=gr.themes.Soft(),
css="""
.code-container { font-family: 'Courier New', monospace; }
.preview-container { border: 1px solid #ddd; border-radius: 8px; }
.main-header { text-align: center; margin-bottom: 2em; }
""") as demo:
gr.HTML("""
<div class="main-header">
<h1>πŸš€ Free Coding Agent</h1>
<p>CodePen-style environment with AI assistance</p>
<p style="color: #22c55e; font-weight: bold;">✨ Generate, edit, and run code with AI help!</p>
</div>
""")
with gr.Row():
# Left Panel - Code Editors and AI
with gr.Column(scale=1):
gr.Markdown("### πŸ€– AI Coding Assistant")
model_dropdown = gr.Dropdown(
choices=coding_agent.get_clean_model_choices(),
value=coding_agent.default_model,
label="AI Model",
info="Choose your coding AI"
)
# AI Chat Interface
with gr.Accordion("πŸ’¬ Chat with AI", open=True):
chatbot = gr.Chatbot(height=300, show_label=False, type='messages')
chat_input = gr.Textbox(
placeholder="Ask the AI about coding, debugging, or request code generation...",
show_label=False
)
chat_btn = gr.Button("Send", variant="primary")
# Code Generation Tools
gr.Markdown("### πŸ› οΈ Code Generation")
code_type = gr.Radio(
choices=["html", "css", "javascript", "python"],
value="html",
label="Code Type"
)
code_prompt = gr.Textbox(
placeholder="Describe what you want to generate...",
label="Code Request",
lines=2
)
generate_btn = gr.Button("🎯 Generate Code", variant="secondary")
# Right Panel - Code Editors
with gr.Column(scale=2):
gr.Markdown("### πŸ“ Code Editors")
with gr.Tabs():
with gr.Tab("HTML"):
html_code = gr.Code(
value="<h1>Hello World!</h1>\n<p>Start coding here...</p>",
language="html",
label="HTML",
lines=15
)
with gr.Tab("CSS"):
css_code = gr.Code(
value="body {\n background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);\n color: white;\n text-align: center;\n padding: 50px;\n}",
language="css",
label="CSS",
lines=15
)
with gr.Tab("JavaScript"):
js_code = gr.Code(
value="console.log('Hello from JavaScript!');\n\n// Add your JavaScript here",
language="javascript",
label="JavaScript",
lines=15
)
with gr.Tab("Python"):
python_code = gr.Code(
value="print('Hello from Python!')\n\n# Add your Python code here",
language="python",
label="Python",
lines=15
)
with gr.Row():
# Preview Panel
with gr.Column():
gr.Markdown("### 🌐 Live Preview")
with gr.Tabs():
with gr.Tab("Web Preview"):
web_preview = gr.HTML(
value=create_web_preview(
"<h1>Hello World!</h1><p>Start coding here...</p>",
"body { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; text-align: center; padding: 50px; }",
"console.log('Hello from JavaScript!');"
),
label="Preview"
)
update_preview_btn = gr.Button("πŸ”„ Update Preview", variant="primary")
with gr.Tab("Python Output"):
python_output = gr.Textbox(
value="",
label="Python Output",
lines=10,
max_lines=20,
interactive=False
)
run_python_btn = gr.Button("▢️ Run Python", variant="primary")
# Event Handlers
def update_preview(html, css, js):
return create_web_preview(html, css, js)
def run_python(code):
return execute_python_code(code)
def generate_code_handler(prompt, model, code_type, html, css, js, python):
current_code = {
"html": html,
"css": css,
"javascript": js,
"python": python
}.get(code_type, "")
generated = generate_code(prompt, model, code_type, current_code)
if code_type == "html":
return generated, css, js, python
elif code_type == "css":
return html, generated, js, python
elif code_type == "javascript":
return html, css, generated, python
elif code_type == "python":
return html, css, js, generated
return html, css, js, python
# Connect event handlers
chat_btn.click(
chat_with_agent,
inputs=[chat_input, chatbot, model_dropdown],
outputs=[chatbot, chat_input]
)
chat_input.submit(
chat_with_agent,
inputs=[chat_input, chatbot, model_dropdown],
outputs=[chatbot, chat_input]
)
update_preview_btn.click(
update_preview,
inputs=[html_code, css_code, js_code],
outputs=[web_preview]
)
run_python_btn.click(
run_python,
inputs=[python_code],
outputs=[python_output]
)
generate_btn.click(
generate_code_handler,
inputs=[code_prompt, model_dropdown, code_type, html_code, css_code, js_code, python_code],
outputs=[html_code, css_code, js_code, python_code]
)
# Auto-update preview when code changes
for code_input in [html_code, css_code, js_code]:
code_input.change(
update_preview,
inputs=[html_code, css_code, js_code],
outputs=[web_preview]
)
# Launch the app
if __name__ == "__main__":
demo.launch(
share=False,
server_name="0.0.0.0",
show_error=True
)