Spaces:
Running
Running
add qwen
Browse files- app.py +117 -16
- requirements.txt +2 -1
app.py
CHANGED
@@ -24,6 +24,7 @@ from huggingface_hub import InferenceClient
|
|
24 |
from tavily import TavilyClient
|
25 |
from huggingface_hub import HfApi
|
26 |
import tempfile
|
|
|
27 |
|
28 |
# Gradio supported languages for syntax highlighting
|
29 |
GRADIO_SUPPORTED_LANGUAGES = [
|
@@ -232,6 +233,11 @@ AVAILABLE_MODELS = [
|
|
232 |
"name": "GLM-4.1V-9B-Thinking",
|
233 |
"id": "THUDM/GLM-4.1V-9B-Thinking",
|
234 |
"description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
|
|
|
|
|
|
|
|
|
|
|
235 |
}
|
236 |
]
|
237 |
|
@@ -303,7 +309,8 @@ def get_inference_client(model_id, provider="auto"):
|
|
303 |
"""Return an InferenceClient with provider based on model_id and user selection."""
|
304 |
if model_id == "moonshotai/Kimi-K2-Instruct":
|
305 |
provider = "groq"
|
306 |
-
|
|
|
307 |
return InferenceClient(
|
308 |
provider=provider,
|
309 |
api_key=HF_TOKEN,
|
@@ -1266,6 +1273,72 @@ This will help me create a better design for you."""
|
|
1266 |
history_output: history_to_chatbot_messages(_history),
|
1267 |
}
|
1268 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1269 |
# Deploy to Spaces logic
|
1270 |
|
1271 |
def wrap_html_in_gradio_app(html_code):
|
@@ -1662,24 +1735,52 @@ with gr.Blocks(
|
|
1662 |
return gr.update(value=f"Error duplicating Transformers.js space: {e}. If this is a RepoUrl object error, ensure you are not accessing a .url attribute and use str(duplicated_repo) for the URL.", visible=True)
|
1663 |
# Other SDKs (existing logic)
|
1664 |
if sdk == "static":
|
|
|
1665 |
file_name = "index.html"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1666 |
else:
|
1667 |
file_name = "app.py"
|
1668 |
-
|
1669 |
-
|
1670 |
-
|
1671 |
-
|
1672 |
-
|
1673 |
-
|
1674 |
-
|
1675 |
-
|
1676 |
-
|
1677 |
-
|
1678 |
-
|
1679 |
-
|
1680 |
-
|
1681 |
-
|
1682 |
-
|
|
|
|
|
|
|
1683 |
|
1684 |
# Connect the deploy button to the new function
|
1685 |
deploy_btn.click(
|
|
|
24 |
from tavily import TavilyClient
|
25 |
from huggingface_hub import HfApi
|
26 |
import tempfile
|
27 |
+
from openai import OpenAI
|
28 |
|
29 |
# Gradio supported languages for syntax highlighting
|
30 |
GRADIO_SUPPORTED_LANGUAGES = [
|
|
|
233 |
"name": "GLM-4.1V-9B-Thinking",
|
234 |
"id": "THUDM/GLM-4.1V-9B-Thinking",
|
235 |
"description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"name": "Qwen3-235B-OpenRouter",
|
239 |
+
"id": "openrouter/qwen3-235b-a22b-07-25:free",
|
240 |
+
"description": "Qwen3-235B-A22B model via OpenRouter API (openrouter.ai)"
|
241 |
}
|
242 |
]
|
243 |
|
|
|
309 |
"""Return an InferenceClient with provider based on model_id and user selection."""
|
310 |
if model_id == "moonshotai/Kimi-K2-Instruct":
|
311 |
provider = "groq"
|
312 |
+
if model_id == "openrouter/qwen3-235b-a22b-07-25:free":
|
313 |
+
return "openrouter"
|
314 |
return InferenceClient(
|
315 |
provider=provider,
|
316 |
api_key=HF_TOKEN,
|
|
|
1273 |
history_output: history_to_chatbot_messages(_history),
|
1274 |
}
|
1275 |
|
1276 |
+
# OpenRouter (OpenAI) logic
|
1277 |
+
if client == "openrouter":
|
1278 |
+
import os
|
1279 |
+
from openai import OpenAI
|
1280 |
+
openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
|
1281 |
+
openrouter_site_url = os.getenv("OPENROUTER_SITE_URL", "https://huggingface.co/spaces/akhaliq/anycoder")
|
1282 |
+
openrouter_site_title = os.getenv("OPENROUTER_SITE_TITLE", "AnyCoder")
|
1283 |
+
if not openrouter_api_key:
|
1284 |
+
error_message = "Error: OPENROUTER_API_KEY environment variable is not set."
|
1285 |
+
yield {
|
1286 |
+
code_output: error_message,
|
1287 |
+
history_output: history_to_chatbot_messages(_history),
|
1288 |
+
}
|
1289 |
+
return
|
1290 |
+
openai_client = OpenAI(
|
1291 |
+
base_url="https://openrouter.ai/api/v1",
|
1292 |
+
api_key=openrouter_api_key,
|
1293 |
+
)
|
1294 |
+
# Prepare OpenAI message format
|
1295 |
+
openai_messages = []
|
1296 |
+
for m in messages:
|
1297 |
+
if m["role"] == "system":
|
1298 |
+
openai_messages.append({"role": "system", "content": m["content"]})
|
1299 |
+
elif m["role"] == "user":
|
1300 |
+
openai_messages.append({"role": "user", "content": m["content"]})
|
1301 |
+
elif m["role"] == "assistant":
|
1302 |
+
openai_messages.append({"role": "assistant", "content": m["content"]})
|
1303 |
+
openai_messages.append({"role": "user", "content": enhanced_query})
|
1304 |
+
try:
|
1305 |
+
completion = openai_client.chat.completions.create(
|
1306 |
+
model="qwen/qwen3-235b-a22b-07-25:free",
|
1307 |
+
messages=openai_messages,
|
1308 |
+
extra_headers={
|
1309 |
+
"HTTP-Referer": openrouter_site_url,
|
1310 |
+
"X-Title": openrouter_site_title,
|
1311 |
+
},
|
1312 |
+
extra_body={},
|
1313 |
+
stream=True,
|
1314 |
+
max_tokens=10000
|
1315 |
+
)
|
1316 |
+
content = ""
|
1317 |
+
for chunk in completion:
|
1318 |
+
if hasattr(chunk, "choices") and chunk.choices and hasattr(chunk.choices[0], "delta") and hasattr(chunk.choices[0].delta, "content") and chunk.choices[0].delta.content is not None:
|
1319 |
+
content += chunk.choices[0].delta.content
|
1320 |
+
clean_code = remove_code_block(content)
|
1321 |
+
yield {
|
1322 |
+
code_output: gr.update(value=clean_code, language=get_gradio_language(language)),
|
1323 |
+
history_output: history_to_chatbot_messages(_history),
|
1324 |
+
sandbox: send_to_sandbox(clean_code) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
|
1325 |
+
}
|
1326 |
+
# After streaming, update history
|
1327 |
+
_history.append([query, content])
|
1328 |
+
yield {
|
1329 |
+
code_output: remove_code_block(content),
|
1330 |
+
history: _history,
|
1331 |
+
sandbox: send_to_sandbox(remove_code_block(content)),
|
1332 |
+
history_output: history_to_chatbot_messages(_history),
|
1333 |
+
}
|
1334 |
+
except Exception as e:
|
1335 |
+
error_message = f"Error (OpenRouter): {str(e)}"
|
1336 |
+
yield {
|
1337 |
+
code_output: error_message,
|
1338 |
+
history_output: history_to_chatbot_messages(_history),
|
1339 |
+
}
|
1340 |
+
return
|
1341 |
+
|
1342 |
# Deploy to Spaces logic
|
1343 |
|
1344 |
def wrap_html_in_gradio_app(html_code):
|
|
|
1735 |
return gr.update(value=f"Error duplicating Transformers.js space: {e}. If this is a RepoUrl object error, ensure you are not accessing a .url attribute and use str(duplicated_repo) for the URL.", visible=True)
|
1736 |
# Other SDKs (existing logic)
|
1737 |
if sdk == "static":
|
1738 |
+
import time
|
1739 |
file_name = "index.html"
|
1740 |
+
# Wait and retry logic after repo creation
|
1741 |
+
max_attempts = 3
|
1742 |
+
for attempt in range(max_attempts):
|
1743 |
+
import tempfile
|
1744 |
+
with tempfile.NamedTemporaryFile("w", suffix=".html", delete=False) as f:
|
1745 |
+
f.write(code)
|
1746 |
+
temp_path = f.name
|
1747 |
+
try:
|
1748 |
+
api.upload_file(
|
1749 |
+
path_or_fileobj=temp_path,
|
1750 |
+
path_in_repo=file_name,
|
1751 |
+
repo_id=repo_id,
|
1752 |
+
repo_type="space"
|
1753 |
+
)
|
1754 |
+
space_url = f"https://huggingface.co/spaces/{repo_id}"
|
1755 |
+
return gr.update(value=f"✅ Deployed! [Open your Space here]({space_url})", visible=True)
|
1756 |
+
except Exception as e:
|
1757 |
+
if attempt < max_attempts - 1:
|
1758 |
+
time.sleep(2) # Wait before retrying
|
1759 |
+
else:
|
1760 |
+
return gr.update(value=f"Error uploading file after {max_attempts} attempts: {e}. The Space was created, but the file could not be uploaded. Please try again in a few seconds from the Hugging Face UI.", visible=True)
|
1761 |
+
finally:
|
1762 |
+
import os
|
1763 |
+
os.unlink(temp_path)
|
1764 |
else:
|
1765 |
file_name = "app.py"
|
1766 |
+
import tempfile
|
1767 |
+
with tempfile.NamedTemporaryFile("w", suffix=f".{file_name.split('.')[-1]}", delete=False) as f:
|
1768 |
+
f.write(code)
|
1769 |
+
temp_path = f.name
|
1770 |
+
try:
|
1771 |
+
api.upload_file(
|
1772 |
+
path_or_fileobj=temp_path,
|
1773 |
+
path_in_repo=file_name,
|
1774 |
+
repo_id=repo_id,
|
1775 |
+
repo_type="space"
|
1776 |
+
)
|
1777 |
+
space_url = f"https://huggingface.co/spaces/{repo_id}"
|
1778 |
+
return gr.update(value=f"✅ Deployed! [Open your Space here]({space_url})", visible=True)
|
1779 |
+
except Exception as e:
|
1780 |
+
return gr.update(value=f"Error uploading file: {e}", visible=True)
|
1781 |
+
finally:
|
1782 |
+
import os
|
1783 |
+
os.unlink(temp_path)
|
1784 |
|
1785 |
# Connect the deploy button to the new function
|
1786 |
deploy_btn.click(
|
requirements.txt
CHANGED
@@ -8,4 +8,5 @@ Pillow
|
|
8 |
opencv-python
|
9 |
requests
|
10 |
beautifulsoup4
|
11 |
-
html2text
|
|
|
|
8 |
opencv-python
|
9 |
requests
|
10 |
beautifulsoup4
|
11 |
+
html2text
|
12 |
+
openai
|