Spaces:
Running
Running
anycoder
#60
by
bngnt
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .gitattributes +35 -1
- .gitignore +101 -20
- .pre-commit-config.yaml +32 -0
- .python-version +1 -0
- .vscode/extensions.json +8 -0
- .vscode/settings.json +16 -0
- README.md +2 -119
- app.py +0 -0
- app_allenai.py +67 -0
- app_cerebras.py +19 -0
- app_claude.py +21 -0
- app_cohere.py +21 -0
- app_compare.py +210 -0
- app_crew.py +8 -0
- app_deepseek.py +23 -0
- app_experimental.py +300 -0
- app_fal.py +16 -0
- app_fireworks.py +19 -0
- app_gemini.py +22 -0
- app_gemini_camera.py +23 -0
- app_gemini_coder.py +23 -0
- app_gemini_voice.py +23 -0
- app_groq.py +21 -0
- app_groq_coder.py +23 -0
- app_hf.py +17 -0
- app_huggingface.py +22 -0
- app_hyperbolic.py +19 -0
- app_hyperbolic_coder.py +20 -0
- app_langchain.py +23 -0
- app_lumaai.py +7 -0
- app_marco_o1.py +12 -0
- app_meta.py +6 -0
- app_mindsearch.py +12 -0
- app_minimax.py +22 -0
- app_minimax_coder.py +23 -0
- app_mistral.py +23 -0
- app_moondream.py +13 -0
- app_nvidia.py +22 -0
- app_nvidia_coder.py +23 -0
- app_omini.py +10 -0
- app_openai.py +21 -0
- app_openai_coder.py +22 -0
- app_openai_voice.py +23 -0
- app_openrouter.py +22 -0
- app_paligemma.py +78 -0
- app_perplexity.py +23 -0
- app_playai.py +10 -0
- app_qwen.py +19 -0
- app_qwen_coder.py +20 -0
- app_replicate.py +18 -0
.gitattributes
CHANGED
@@ -1 +1,35 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
# Byte-compiled / optimized / DLL files
|
2 |
__pycache__/
|
3 |
*.py[cod]
|
@@ -19,18 +21,16 @@ lib64/
|
|
19 |
parts/
|
20 |
sdist/
|
21 |
var/
|
|
|
|
|
22 |
*.egg-info/
|
23 |
.installed.cfg
|
24 |
*.egg
|
25 |
MANIFEST
|
26 |
|
27 |
-
# Virtual environments
|
28 |
-
venv/
|
29 |
-
env/
|
30 |
-
ENV/
|
31 |
-
.venv/
|
32 |
-
|
33 |
# PyInstaller
|
|
|
|
|
34 |
*.manifest
|
35 |
*.spec
|
36 |
|
@@ -48,34 +48,115 @@ htmlcov/
|
|
48 |
nosetests.xml
|
49 |
coverage.xml
|
50 |
*.cover
|
|
|
51 |
.hypothesis/
|
52 |
.pytest_cache/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# Jupyter Notebook
|
55 |
.ipynb_checkpoints
|
56 |
|
|
|
|
|
|
|
|
|
57 |
# pyenv
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
# mypy
|
61 |
.mypy_cache/
|
62 |
.dmypy.json
|
|
|
63 |
|
64 |
# Pyre type checker
|
65 |
.pyre/
|
66 |
|
67 |
-
#
|
68 |
-
|
69 |
-
logs/
|
70 |
-
|
71 |
-
# System files
|
72 |
-
.DS_Store
|
73 |
-
Thumbs.db
|
74 |
|
75 |
-
#
|
76 |
-
|
77 |
-
poetry.lock
|
78 |
-
Pipfile.lock
|
79 |
|
80 |
-
#
|
81 |
-
.
|
|
|
|
|
|
|
|
|
|
1 |
+
.gradio/
|
2 |
+
|
3 |
# Byte-compiled / optimized / DLL files
|
4 |
__pycache__/
|
5 |
*.py[cod]
|
|
|
21 |
parts/
|
22 |
sdist/
|
23 |
var/
|
24 |
+
wheels/
|
25 |
+
share/python-wheels/
|
26 |
*.egg-info/
|
27 |
.installed.cfg
|
28 |
*.egg
|
29 |
MANIFEST
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# PyInstaller
|
32 |
+
# Usually these files are written by a python script from a template
|
33 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
34 |
*.manifest
|
35 |
*.spec
|
36 |
|
|
|
48 |
nosetests.xml
|
49 |
coverage.xml
|
50 |
*.cover
|
51 |
+
*.py,cover
|
52 |
.hypothesis/
|
53 |
.pytest_cache/
|
54 |
+
cover/
|
55 |
+
|
56 |
+
# Translations
|
57 |
+
*.mo
|
58 |
+
*.pot
|
59 |
+
|
60 |
+
# Django stuff:
|
61 |
+
*.log
|
62 |
+
local_settings.py
|
63 |
+
db.sqlite3
|
64 |
+
db.sqlite3-journal
|
65 |
+
|
66 |
+
# Flask stuff:
|
67 |
+
instance/
|
68 |
+
.webassets-cache
|
69 |
+
|
70 |
+
# Scrapy stuff:
|
71 |
+
.scrapy
|
72 |
+
|
73 |
+
# Sphinx documentation
|
74 |
+
docs/_build/
|
75 |
+
|
76 |
+
# PyBuilder
|
77 |
+
.pybuilder/
|
78 |
+
target/
|
79 |
|
80 |
# Jupyter Notebook
|
81 |
.ipynb_checkpoints
|
82 |
|
83 |
+
# IPython
|
84 |
+
profile_default/
|
85 |
+
ipython_config.py
|
86 |
+
|
87 |
# pyenv
|
88 |
+
# For a library or package, you might want to ignore these files since the code is
|
89 |
+
# intended to run in multiple environments; otherwise, check them in:
|
90 |
+
# .python-version
|
91 |
+
|
92 |
+
# pipenv
|
93 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
94 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
95 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
96 |
+
# install all needed dependencies.
|
97 |
+
#Pipfile.lock
|
98 |
+
|
99 |
+
# poetry
|
100 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
101 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
102 |
+
# commonly ignored for libraries.
|
103 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
104 |
+
#poetry.lock
|
105 |
+
|
106 |
+
# pdm
|
107 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
108 |
+
#pdm.lock
|
109 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
110 |
+
# in version control.
|
111 |
+
# https://pdm.fming.dev/#use-with-ide
|
112 |
+
.pdm.toml
|
113 |
+
|
114 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
+
__pypackages__/
|
116 |
+
|
117 |
+
# Celery stuff
|
118 |
+
celerybeat-schedule
|
119 |
+
celerybeat.pid
|
120 |
+
|
121 |
+
# SageMath parsed files
|
122 |
+
*.sage.py
|
123 |
+
|
124 |
+
# Environments
|
125 |
+
.env
|
126 |
+
.venv
|
127 |
+
env/
|
128 |
+
venv/
|
129 |
+
ENV/
|
130 |
+
env.bak/
|
131 |
+
venv.bak/
|
132 |
+
|
133 |
+
# Spyder project settings
|
134 |
+
.spyderproject
|
135 |
+
.spyproject
|
136 |
+
|
137 |
+
# Rope project settings
|
138 |
+
.ropeproject
|
139 |
+
|
140 |
+
# mkdocs documentation
|
141 |
+
/site
|
142 |
|
143 |
# mypy
|
144 |
.mypy_cache/
|
145 |
.dmypy.json
|
146 |
+
dmypy.json
|
147 |
|
148 |
# Pyre type checker
|
149 |
.pyre/
|
150 |
|
151 |
+
# pytype static type analyzer
|
152 |
+
.pytype/
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
# Cython debug symbols
|
155 |
+
cython_debug/
|
|
|
|
|
156 |
|
157 |
+
# PyCharm
|
158 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
+
#.idea/
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v5.0.0
|
4 |
+
hooks:
|
5 |
+
- id: check-executables-have-shebangs
|
6 |
+
- id: check-json
|
7 |
+
- id: check-merge-conflict
|
8 |
+
- id: check-shebang-scripts-are-executable
|
9 |
+
- id: check-toml
|
10 |
+
- id: check-yaml
|
11 |
+
- id: end-of-file-fixer
|
12 |
+
- id: mixed-line-ending
|
13 |
+
args: ["--fix=lf"]
|
14 |
+
- id: requirements-txt-fixer
|
15 |
+
- id: trailing-whitespace
|
16 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
17 |
+
rev: v0.8.6
|
18 |
+
hooks:
|
19 |
+
- id: ruff
|
20 |
+
args: ["--fix"]
|
21 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
22 |
+
rev: v1.14.1
|
23 |
+
hooks:
|
24 |
+
- id: mypy
|
25 |
+
args: ["--ignore-missing-imports"]
|
26 |
+
additional_dependencies:
|
27 |
+
[
|
28 |
+
"types-python-slugify",
|
29 |
+
"types-requests",
|
30 |
+
"types-PyYAML",
|
31 |
+
"types-pytz",
|
32 |
+
]
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.10
|
.vscode/extensions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"recommendations": [
|
3 |
+
"ms-python.python",
|
4 |
+
"charliermarsh.ruff",
|
5 |
+
"streetsidesoftware.code-spell-checker",
|
6 |
+
"tamasfe.even-better-toml"
|
7 |
+
]
|
8 |
+
}
|
.vscode/settings.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"editor.formatOnSave": true,
|
3 |
+
"files.insertFinalNewline": false,
|
4 |
+
"[python]": {
|
5 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
6 |
+
"editor.formatOnType": true,
|
7 |
+
"editor.codeActionsOnSave": {
|
8 |
+
"source.fixAll.ruff": "explicit"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"[jupyter]": {
|
12 |
+
"files.insertFinalNewline": false
|
13 |
+
},
|
14 |
+
"notebook.output.scrolling": true,
|
15 |
+
"notebook.formatOnSave.enabled": true
|
16 |
+
}
|
README.md
CHANGED
@@ -4,127 +4,10 @@ emoji: 🏢
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
disable_embedding: true
|
11 |
-
hf_oauth: true
|
12 |
-
hf_oauth_scopes:
|
13 |
-
- manage-repos
|
14 |
---
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
AnyCoder is an AI-powered code generator that helps you create applications by describing them in plain English. It supports multiple AI models, multimodal input, website redesign, and one-click deployment to Hugging Face Spaces. The UI is built with pure Gradio theming for a minimal, modern experience.
|
19 |
-
|
20 |
-
## Features
|
21 |
-
|
22 |
-
- **Multi-Model Support**: Choose from Moonshot Kimi-K2, DeepSeek V3, DeepSeek R1, ERNIE-4.5-VL, MiniMax M1, Qwen3-235B-A22B, SmolLM3-3B, and GLM-4.1V-9B-Thinking
|
23 |
-
- **Flexible Input**: Describe your app in text, upload a UI design image (for multimodal models), provide a reference file (PDF, TXT, MD, CSV, DOCX, or image), or enter a website URL for redesign
|
24 |
-
- **Web Search Integration**: Enable real-time web search (Tavily, with advanced search depth) to enhance code generation with up-to-date information and best practices
|
25 |
-
- **Code Generation**: Generate code in HTML, Python, JS, and more. Special support for transformers.js apps (outputs index.html, index.js, style.css)
|
26 |
-
- **Live Preview**: Instantly preview generated HTML in a sandboxed iframe
|
27 |
-
- **Modify Existing Code**: Use search/replace block format to update generated HTML
|
28 |
-
- **One-Click Deployment**: Deploy your app to Hugging Face Spaces (Gradio, Streamlit, Static HTML, or Transformers.js) with OAuth login
|
29 |
-
- **History & Examples**: Chat-like history of all interactions and quick example prompts for fast prototyping
|
30 |
-
- **Minimal, Modern UI**: Built with Gradio 5.x, using only built-in theming and styling (no custom CSS)
|
31 |
-
|
32 |
-
## Installation
|
33 |
-
|
34 |
-
1. Clone the repository:
|
35 |
-
```bash
|
36 |
-
git clone <repository-url>
|
37 |
-
cd anycoder
|
38 |
-
```
|
39 |
-
2. Install dependencies:
|
40 |
-
```bash
|
41 |
-
pip install -r requirements.txt
|
42 |
-
```
|
43 |
-
3. Set up environment variables:
|
44 |
-
```bash
|
45 |
-
export HF_TOKEN="your_huggingface_token"
|
46 |
-
export TAVILY_API_KEY="your_tavily_api_key" # Optional, for web search feature
|
47 |
-
```
|
48 |
-
|
49 |
-
## Usage
|
50 |
-
|
51 |
-
1. Run the application:
|
52 |
-
```bash
|
53 |
-
python app.py
|
54 |
-
```
|
55 |
-
2. Open your browser and navigate to the provided URL
|
56 |
-
3. Describe your application in the text input field, or:
|
57 |
-
- Upload a UI design image (for ERNIE-4.5-VL or GLM-4.1V-9B-Thinking)
|
58 |
-
- Upload a reference file (PDF, TXT, MD, CSV, DOCX, or image)
|
59 |
-
- Enter a website URL for redesign (the app will extract and analyze the HTML and content)
|
60 |
-
- Enable web search for up-to-date information
|
61 |
-
- Choose a different AI model or code language
|
62 |
-
4. Click "Generate" to create your code
|
63 |
-
5. View the generated code in the Code tab or see it in action in the Preview tab
|
64 |
-
6. Use the History tab to review previous generations
|
65 |
-
7. **Deploy to Space**: Enter a title, select SDK, and click "🚀 Deploy App" to publish your application (OAuth login required)
|
66 |
-
|
67 |
-
## Supported Models
|
68 |
-
|
69 |
-
- Moonshot Kimi-K2
|
70 |
-
- DeepSeek V3
|
71 |
-
- DeepSeek R1
|
72 |
-
- ERNIE-4.5-VL (multimodal)
|
73 |
-
- MiniMax M1
|
74 |
-
- Qwen3-235B-A22B
|
75 |
-
- SmolLM3-3B
|
76 |
-
- GLM-4.1V-9B-Thinking (multimodal)
|
77 |
-
|
78 |
-
## Input Options
|
79 |
-
|
80 |
-
- **Text Prompt**: Describe your app or code requirements
|
81 |
-
- **Image Upload**: For multimodal models, upload a UI design image to generate code from visuals
|
82 |
-
- **File Upload**: Provide a reference file (PDF, TXT, MD, CSV, DOCX, or image) for code generation or text extraction (OCR for images)
|
83 |
-
- **Website URL**: Enter a URL to extract and redesign the website (HTML and content are analyzed and modernized)
|
84 |
-
|
85 |
-
## Web Search Feature
|
86 |
-
|
87 |
-
- Enable the "Web search" toggle to use Tavily for real-time information (requires TAVILY_API_KEY)
|
88 |
-
- Uses advanced search depth for best results
|
89 |
-
|
90 |
-
## Code Generation & Modification
|
91 |
-
|
92 |
-
- Generates code in HTML, Python, JS, and more (selectable via dropdown)
|
93 |
-
- Special support for transformers.js apps (outputs index.html, index.js, style.css)
|
94 |
-
- Svelte apps
|
95 |
-
- For HTML, provides a live preview in a sandboxed iframe
|
96 |
-
- For modification requests, uses a search/replace block format to update existing HTML
|
97 |
-
|
98 |
-
## Deployment
|
99 |
-
|
100 |
-
- Deploy generated apps to Hugging Face Spaces directly from the UI
|
101 |
-
- Supported SDKs: Gradio (Python), Streamlit (Python), Static (HTML), Transformers.js
|
102 |
-
- OAuth login with Hugging Face is required for deployment to user-owned Spaces
|
103 |
-
|
104 |
-
## History & Examples
|
105 |
-
|
106 |
-
- Maintains a chat-like history of user/assistant interactions
|
107 |
-
- Quick example prompts are available in the sidebar for fast prototyping
|
108 |
-
|
109 |
-
## UI/UX
|
110 |
-
|
111 |
-
- Built with Gradio 5.x, using only Gradio's built-in theming and styling (no custom CSS)
|
112 |
-
- Minimal, uncluttered sidebar and interface
|
113 |
-
|
114 |
-
## Environment Variables
|
115 |
-
|
116 |
-
- `HF_TOKEN`: Your Hugging Face API token (required)
|
117 |
-
- `TAVILY_API_KEY`: Your Tavily API key (optional, for web search)
|
118 |
-
|
119 |
-
## Project Structure
|
120 |
-
|
121 |
-
```
|
122 |
-
anycoder/
|
123 |
-
├── app.py # Main application (all logic and UI)
|
124 |
-
├── requirements.txt
|
125 |
-
├── README.md # This file
|
126 |
-
```
|
127 |
-
|
128 |
-
## License
|
129 |
-
|
130 |
-
[Add your license information here]
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.23.3
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
disable_embedding: true
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
app_allenai.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_client import Client
|
3 |
+
|
4 |
+
MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"}
|
5 |
+
|
6 |
+
|
7 |
+
def create_chat_fn(client):
|
8 |
+
def chat(message, history):
|
9 |
+
response = client.predict(
|
10 |
+
message=message,
|
11 |
+
system_prompt="You are a helpful AI assistant.",
|
12 |
+
temperature=0.7,
|
13 |
+
max_new_tokens=1024,
|
14 |
+
top_k=40,
|
15 |
+
repetition_penalty=1.1,
|
16 |
+
top_p=0.95,
|
17 |
+
api_name="/chat",
|
18 |
+
)
|
19 |
+
return response
|
20 |
+
|
21 |
+
return chat
|
22 |
+
|
23 |
+
|
24 |
+
def set_client_for_session(model_name, request: gr.Request):
|
25 |
+
headers = {}
|
26 |
+
if request and hasattr(request, "request") and hasattr(request.request, "headers"):
|
27 |
+
x_ip_token = request.request.headers.get("x-ip-token")
|
28 |
+
if x_ip_token:
|
29 |
+
headers["X-IP-Token"] = x_ip_token
|
30 |
+
|
31 |
+
return Client(MODELS[model_name], headers=headers)
|
32 |
+
|
33 |
+
|
34 |
+
def safe_chat_fn(message, history, client):
|
35 |
+
if client is None:
|
36 |
+
return "Error: Client not initialized. Please refresh the page."
|
37 |
+
return create_chat_fn(client)(message, history)
|
38 |
+
|
39 |
+
|
40 |
+
with gr.Blocks() as demo:
|
41 |
+
client = gr.State()
|
42 |
+
|
43 |
+
model_dropdown = gr.Dropdown(
|
44 |
+
choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True
|
45 |
+
)
|
46 |
+
|
47 |
+
chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client])
|
48 |
+
|
49 |
+
# Update client when model changes
|
50 |
+
def update_model(model_name, request):
|
51 |
+
return set_client_for_session(model_name, request)
|
52 |
+
|
53 |
+
model_dropdown.change(
|
54 |
+
fn=update_model,
|
55 |
+
inputs=[model_dropdown],
|
56 |
+
outputs=[client],
|
57 |
+
)
|
58 |
+
|
59 |
+
# Initialize client on page load
|
60 |
+
demo.load(
|
61 |
+
fn=set_client_for_session,
|
62 |
+
inputs=gr.State("OLMo-2-1124-13B-Instruct"),
|
63 |
+
outputs=client,
|
64 |
+
)
|
65 |
+
|
66 |
+
if __name__ == "__main__":
|
67 |
+
demo.launch()
|
app_cerebras.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cerebras_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"llama3.1-8b",
|
10 |
+
"llama3.1-70b",
|
11 |
+
"llama3.1-405b",
|
12 |
+
],
|
13 |
+
default_model="llama3.1-70b",
|
14 |
+
src=cerebras_gradio.registry,
|
15 |
+
accept_token=not os.getenv("CEREBRAS_API_KEY"),
|
16 |
+
)
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
demo.launch()
|
app_claude.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import anthropic_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"claude-3-5-sonnet-20241022",
|
10 |
+
"claude-3-5-haiku-20241022",
|
11 |
+
"claude-3-opus-20240229",
|
12 |
+
"claude-3-sonnet-20240229",
|
13 |
+
"claude-3-haiku-20240307",
|
14 |
+
],
|
15 |
+
default_model="claude-3-5-sonnet-20241022",
|
16 |
+
src=anthropic_gradio.registry,
|
17 |
+
accept_token=not os.getenv("ANTHROPIC_API_KEY"),
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_cohere.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cohere_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"command-r",
|
10 |
+
"command-r-08-2024",
|
11 |
+
"command-r-plus",
|
12 |
+
"command-r-plus-08-2024",
|
13 |
+
"command-r7b-12-2024",
|
14 |
+
],
|
15 |
+
default_model="command-r7b-12-2024",
|
16 |
+
src=cohere_gradio.registry,
|
17 |
+
accept_token=not os.getenv("COHERE_API_KEY"),
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_compare.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import google.generativeai as genai
|
4 |
+
import gradio as gr
|
5 |
+
import openai
|
6 |
+
from anthropic import Anthropic
|
7 |
+
from openai import OpenAI # Add explicit OpenAI import
|
8 |
+
|
9 |
+
|
10 |
+
def get_all_models():
|
11 |
+
"""Get all available models from the registries."""
|
12 |
+
return [
|
13 |
+
"SambaNova: Meta-Llama-3.2-1B-Instruct",
|
14 |
+
"SambaNova: Meta-Llama-3.2-3B-Instruct",
|
15 |
+
"SambaNova: Llama-3.2-11B-Vision-Instruct",
|
16 |
+
"SambaNova: Llama-3.2-90B-Vision-Instruct",
|
17 |
+
"SambaNova: Meta-Llama-3.1-8B-Instruct",
|
18 |
+
"SambaNova: Meta-Llama-3.1-70B-Instruct",
|
19 |
+
"SambaNova: Meta-Llama-3.1-405B-Instruct",
|
20 |
+
"Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
|
21 |
+
"Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
|
22 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
|
23 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
|
24 |
+
"Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
|
25 |
+
"Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
|
26 |
+
"Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
|
27 |
+
"Hyperbolic: deepseek-ai/DeepSeek-V2.5",
|
28 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
|
29 |
+
]
|
30 |
+
|
31 |
+
|
32 |
+
def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
|
33 |
+
"""Generate a prompt for models to discuss and build upon previous
|
34 |
+
responses.
|
35 |
+
"""
|
36 |
+
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
37 |
+
|
38 |
+
Previous responses from other AI models:
|
39 |
+
{chr(10).join(f"- {response}" for response in previous_responses)}
|
40 |
+
|
41 |
+
Please provide your perspective while:
|
42 |
+
1. Acknowledging key insights from previous responses
|
43 |
+
2. Adding any missing important points
|
44 |
+
3. Respectfully noting if you disagree with anything and explaining why
|
45 |
+
4. Building towards a complete answer
|
46 |
+
|
47 |
+
Keep your response focused and concise (max 3-4 paragraphs)."""
|
48 |
+
return prompt
|
49 |
+
|
50 |
+
|
51 |
+
def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
|
52 |
+
"""Generate a prompt for final consensus building."""
|
53 |
+
return f"""Review this multi-AI discussion about: "{original_question}"
|
54 |
+
|
55 |
+
Discussion history:
|
56 |
+
{chr(10).join(discussion_history)}
|
57 |
+
|
58 |
+
As a final synthesizer, please:
|
59 |
+
1. Identify the key points where all models agreed
|
60 |
+
2. Explain how any disagreements were resolved
|
61 |
+
3. Present a clear, unified answer that represents our collective best understanding
|
62 |
+
4. Note any remaining uncertainties or caveats
|
63 |
+
|
64 |
+
Keep the final consensus concise but complete."""
|
65 |
+
|
66 |
+
|
67 |
+
def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
|
68 |
+
import openai
|
69 |
+
|
70 |
+
client = openai.OpenAI(api_key=api_key)
|
71 |
+
response = client.chat.completions.create(model=model, messages=messages)
|
72 |
+
return response.choices[0].message.content
|
73 |
+
|
74 |
+
|
75 |
+
def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
|
76 |
+
"""Chat with Anthropic's Claude model."""
|
77 |
+
client = Anthropic(api_key=api_key)
|
78 |
+
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
79 |
+
return response.content[0].text
|
80 |
+
|
81 |
+
|
82 |
+
def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
83 |
+
"""Chat with Gemini Pro model."""
|
84 |
+
genai.configure(api_key=api_key)
|
85 |
+
model = genai.GenerativeModel("gemini-pro")
|
86 |
+
|
87 |
+
# Convert messages to Gemini format
|
88 |
+
gemini_messages = []
|
89 |
+
for msg in messages:
|
90 |
+
role = "user" if msg["role"] == "user" else "model"
|
91 |
+
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
92 |
+
|
93 |
+
response = model.generate_content([m["parts"][0] for m in gemini_messages])
|
94 |
+
return response.text
|
95 |
+
|
96 |
+
|
97 |
+
def chat_with_sambanova(
|
98 |
+
messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
99 |
+
) -> str:
|
100 |
+
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
101 |
+
client = openai.OpenAI(
|
102 |
+
api_key=api_key,
|
103 |
+
base_url="https://api.sambanova.ai/v1",
|
104 |
+
)
|
105 |
+
|
106 |
+
response = client.chat.completions.create(
|
107 |
+
model=model_name,
|
108 |
+
messages=messages,
|
109 |
+
temperature=0.1,
|
110 |
+
top_p=0.1, # Use the specific model name passed in
|
111 |
+
)
|
112 |
+
return response.choices[0].message.content
|
113 |
+
|
114 |
+
|
115 |
+
def chat_with_hyperbolic(
|
116 |
+
messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
117 |
+
) -> str:
|
118 |
+
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
119 |
+
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
120 |
+
|
121 |
+
# Add system message to the start of the messages list
|
122 |
+
full_messages = [
|
123 |
+
{"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
|
124 |
+
*messages,
|
125 |
+
]
|
126 |
+
|
127 |
+
response = client.chat.completions.create(
|
128 |
+
model=model_name, # Use the specific model name passed in
|
129 |
+
messages=full_messages,
|
130 |
+
temperature=0.7,
|
131 |
+
max_tokens=1024,
|
132 |
+
)
|
133 |
+
return response.choices[0].message.content
|
134 |
+
|
135 |
+
|
136 |
+
def multi_model_consensus(
|
137 |
+
question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
138 |
+
) -> list[tuple[str, str]]:
|
139 |
+
if not selected_models:
|
140 |
+
raise gr.Error("Please select at least one model to chat with.")
|
141 |
+
|
142 |
+
chat_history = []
|
143 |
+
progress(0, desc="Getting responses from all models...")
|
144 |
+
|
145 |
+
# Get responses from all models in parallel
|
146 |
+
for i, model in enumerate(selected_models):
|
147 |
+
provider, model_name = model.split(": ", 1)
|
148 |
+
progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
|
149 |
+
|
150 |
+
try:
|
151 |
+
if provider == "Anthropic":
|
152 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
153 |
+
response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
154 |
+
elif provider == "SambaNova":
|
155 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
156 |
+
response = chat_with_sambanova(
|
157 |
+
messages=[
|
158 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
159 |
+
{"role": "user", "content": question},
|
160 |
+
],
|
161 |
+
api_key=api_key,
|
162 |
+
model_name=model_name,
|
163 |
+
)
|
164 |
+
elif provider == "Hyperbolic":
|
165 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
166 |
+
response = chat_with_hyperbolic(
|
167 |
+
messages=[{"role": "user", "content": question}],
|
168 |
+
api_key=api_key,
|
169 |
+
model_name=model_name,
|
170 |
+
)
|
171 |
+
else: # Gemini
|
172 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
173 |
+
response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
|
174 |
+
|
175 |
+
chat_history.append((model, response))
|
176 |
+
except Exception as e:
|
177 |
+
chat_history.append((model, f"Error: {e!s}"))
|
178 |
+
|
179 |
+
progress(1.0, desc="Done!")
|
180 |
+
return chat_history
|
181 |
+
|
182 |
+
|
183 |
+
with gr.Blocks() as demo:
|
184 |
+
gr.Markdown("# Model Response Comparison")
|
185 |
+
gr.Markdown("""Select multiple models to compare their responses""")
|
186 |
+
|
187 |
+
with gr.Row():
|
188 |
+
with gr.Column():
|
189 |
+
model_selector = gr.Dropdown(
|
190 |
+
choices=get_all_models(),
|
191 |
+
multiselect=True,
|
192 |
+
label="Select Models",
|
193 |
+
info="Choose models to compare",
|
194 |
+
value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
|
195 |
+
)
|
196 |
+
|
197 |
+
chatbot = gr.Chatbot(height=600, label="Model Responses")
|
198 |
+
msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
|
199 |
+
|
200 |
+
def respond(message, selected_models):
|
201 |
+
chat_history = multi_model_consensus(message, selected_models, rounds=1)
|
202 |
+
return chat_history
|
203 |
+
|
204 |
+
msg.submit(respond, [msg, model_selector], [chatbot])
|
205 |
+
|
206 |
+
for fn in demo.fns.values():
|
207 |
+
fn.api_name = False
|
208 |
+
|
209 |
+
if __name__ == "__main__":
|
210 |
+
demo.launch()
|
app_crew.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
demo = gr.load(
|
5 |
+
name="crewai:gpt-4-turbo",
|
6 |
+
crew_type="article", # or 'support'
|
7 |
+
src=ai_gradio.registry,
|
8 |
+
)
|
app_deepseek.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=DEEPSEEK_MODELS_FULL[-1],
|
16 |
+
dropdown_label="Select DeepSeek Model",
|
17 |
+
choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_experimental.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
|
4 |
+
import google.generativeai as genai
|
5 |
+
import gradio as gr
|
6 |
+
import openai
|
7 |
+
from anthropic import Anthropic
|
8 |
+
from openai import OpenAI # Add explicit OpenAI import
|
9 |
+
|
10 |
+
|
11 |
+
def get_all_models():
|
12 |
+
"""Get all available models from the registries."""
|
13 |
+
return [
|
14 |
+
"SambaNova: Meta-Llama-3.2-1B-Instruct",
|
15 |
+
"SambaNova: Meta-Llama-3.2-3B-Instruct",
|
16 |
+
"SambaNova: Llama-3.2-11B-Vision-Instruct",
|
17 |
+
"SambaNova: Llama-3.2-90B-Vision-Instruct",
|
18 |
+
"SambaNova: Meta-Llama-3.1-8B-Instruct",
|
19 |
+
"SambaNova: Meta-Llama-3.1-70B-Instruct",
|
20 |
+
"SambaNova: Meta-Llama-3.1-405B-Instruct",
|
21 |
+
"Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
|
22 |
+
"Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
|
23 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
|
24 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
|
25 |
+
"Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
|
26 |
+
"Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
|
27 |
+
"Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
|
28 |
+
"Hyperbolic: deepseek-ai/DeepSeek-V2.5",
|
29 |
+
"Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
|
30 |
+
]
|
31 |
+
|
32 |
+
|
33 |
+
def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
|
34 |
+
"""Generate a prompt for models to discuss and build upon previous
|
35 |
+
responses.
|
36 |
+
"""
|
37 |
+
prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
|
38 |
+
|
39 |
+
Previous responses from other AI models:
|
40 |
+
{chr(10).join(f"- {response}" for response in previous_responses)}
|
41 |
+
|
42 |
+
Please provide your perspective while:
|
43 |
+
1. Acknowledging key insights from previous responses
|
44 |
+
2. Adding any missing important points
|
45 |
+
3. Respectfully noting if you disagree with anything and explaining why
|
46 |
+
4. Building towards a complete answer
|
47 |
+
|
48 |
+
Keep your response focused and concise (max 3-4 paragraphs)."""
|
49 |
+
return prompt
|
50 |
+
|
51 |
+
|
52 |
+
def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
|
53 |
+
"""Generate a prompt for final consensus building."""
|
54 |
+
return f"""Review this multi-AI discussion about: "{original_question}"
|
55 |
+
|
56 |
+
Discussion history:
|
57 |
+
{chr(10).join(discussion_history)}
|
58 |
+
|
59 |
+
As a final synthesizer, please:
|
60 |
+
1. Identify the key points where all models agreed
|
61 |
+
2. Explain how any disagreements were resolved
|
62 |
+
3. Present a clear, unified answer that represents our collective best understanding
|
63 |
+
4. Note any remaining uncertainties or caveats
|
64 |
+
|
65 |
+
Keep the final consensus concise but complete."""
|
66 |
+
|
67 |
+
|
68 |
+
def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
|
69 |
+
import openai
|
70 |
+
|
71 |
+
client = openai.OpenAI(api_key=api_key)
|
72 |
+
response = client.chat.completions.create(model=model, messages=messages)
|
73 |
+
return response.choices[0].message.content
|
74 |
+
|
75 |
+
|
76 |
+
def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
|
77 |
+
"""Chat with Anthropic's Claude model."""
|
78 |
+
client = Anthropic(api_key=api_key)
|
79 |
+
response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
|
80 |
+
return response.content[0].text
|
81 |
+
|
82 |
+
|
83 |
+
def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
|
84 |
+
"""Chat with Gemini Pro model."""
|
85 |
+
genai.configure(api_key=api_key)
|
86 |
+
model = genai.GenerativeModel("gemini-pro")
|
87 |
+
|
88 |
+
# Convert messages to Gemini format
|
89 |
+
gemini_messages = []
|
90 |
+
for msg in messages:
|
91 |
+
role = "user" if msg["role"] == "user" else "model"
|
92 |
+
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
93 |
+
|
94 |
+
response = model.generate_content([m["parts"][0] for m in gemini_messages])
|
95 |
+
return response.text
|
96 |
+
|
97 |
+
|
98 |
+
def chat_with_sambanova(
|
99 |
+
messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
|
100 |
+
) -> str:
|
101 |
+
"""Chat with SambaNova's models using their OpenAI-compatible API."""
|
102 |
+
client = openai.OpenAI(
|
103 |
+
api_key=api_key,
|
104 |
+
base_url="https://api.sambanova.ai/v1",
|
105 |
+
)
|
106 |
+
|
107 |
+
response = client.chat.completions.create(
|
108 |
+
model=model_name,
|
109 |
+
messages=messages,
|
110 |
+
temperature=0.1,
|
111 |
+
top_p=0.1, # Use the specific model name passed in
|
112 |
+
)
|
113 |
+
return response.choices[0].message.content
|
114 |
+
|
115 |
+
|
116 |
+
def chat_with_hyperbolic(
|
117 |
+
messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
118 |
+
) -> str:
|
119 |
+
"""Chat with Hyperbolic's models using their OpenAI-compatible API."""
|
120 |
+
client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
|
121 |
+
|
122 |
+
# Add system message to the start of the messages list
|
123 |
+
full_messages = [
|
124 |
+
{"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
|
125 |
+
*messages,
|
126 |
+
]
|
127 |
+
|
128 |
+
response = client.chat.completions.create(
|
129 |
+
model=model_name, # Use the specific model name passed in
|
130 |
+
messages=full_messages,
|
131 |
+
temperature=0.7,
|
132 |
+
max_tokens=1024,
|
133 |
+
)
|
134 |
+
return response.choices[0].message.content
|
135 |
+
|
136 |
+
|
137 |
+
def multi_model_consensus(
|
138 |
+
question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
|
139 |
+
) -> list[tuple[str, str]]:
|
140 |
+
if not selected_models:
|
141 |
+
raise gr.Error("Please select at least one model to chat with.")
|
142 |
+
|
143 |
+
chat_history = []
|
144 |
+
discussion_history = []
|
145 |
+
|
146 |
+
# Initial responses
|
147 |
+
progress(0, desc="Getting initial responses...")
|
148 |
+
initial_responses = []
|
149 |
+
for i, model in enumerate(selected_models):
|
150 |
+
provider, model_name = model.split(": ", 1)
|
151 |
+
|
152 |
+
try:
|
153 |
+
if provider == "Anthropic":
|
154 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
155 |
+
response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
156 |
+
elif provider == "SambaNova":
|
157 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
158 |
+
response = chat_with_sambanova(
|
159 |
+
messages=[
|
160 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
161 |
+
{"role": "user", "content": question},
|
162 |
+
],
|
163 |
+
api_key=api_key,
|
164 |
+
)
|
165 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
166 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
167 |
+
response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
|
168 |
+
else: # Gemini
|
169 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
170 |
+
response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
|
171 |
+
|
172 |
+
initial_responses.append(f"{model}: {response}")
|
173 |
+
discussion_history.append(f"Initial response from {model}:\n{response}")
|
174 |
+
chat_history.append((f"Initial response from {model}", response))
|
175 |
+
except Exception as e:
|
176 |
+
chat_history.append((f"Error from {model}", str(e)))
|
177 |
+
|
178 |
+
# Discussion rounds
|
179 |
+
for round_num in range(rounds):
|
180 |
+
progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
|
181 |
+
round_responses = []
|
182 |
+
|
183 |
+
random.shuffle(selected_models) # Randomize order each round
|
184 |
+
for model in selected_models:
|
185 |
+
provider, model_name = model.split(": ", 1)
|
186 |
+
|
187 |
+
try:
|
188 |
+
discussion_prompt = generate_discussion_prompt(question, discussion_history)
|
189 |
+
if provider == "Anthropic":
|
190 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
191 |
+
response = chat_with_anthropic(
|
192 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
193 |
+
)
|
194 |
+
elif provider == "SambaNova":
|
195 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
196 |
+
response = chat_with_sambanova(
|
197 |
+
messages=[
|
198 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
199 |
+
{"role": "user", "content": discussion_prompt},
|
200 |
+
],
|
201 |
+
api_key=api_key,
|
202 |
+
)
|
203 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
204 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
205 |
+
response = chat_with_hyperbolic(
|
206 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
207 |
+
)
|
208 |
+
else: # Gemini
|
209 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
210 |
+
response = chat_with_gemini(
|
211 |
+
messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
|
212 |
+
)
|
213 |
+
|
214 |
+
round_responses.append(f"{model}: {response}")
|
215 |
+
discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
|
216 |
+
chat_history.append((f"Round {round_num + 1} - {model}", response))
|
217 |
+
except Exception as e:
|
218 |
+
chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
|
219 |
+
|
220 |
+
# Final consensus
|
221 |
+
progress(0.9, desc="Building final consensus...")
|
222 |
+
model = selected_models[0]
|
223 |
+
provider, model_name = model.split(": ", 1)
|
224 |
+
|
225 |
+
try:
|
226 |
+
consensus_prompt = generate_consensus_prompt(question, discussion_history)
|
227 |
+
if provider == "Anthropic":
|
228 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
229 |
+
final_consensus = chat_with_anthropic(
|
230 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
231 |
+
)
|
232 |
+
elif provider == "SambaNova":
|
233 |
+
api_key = os.getenv("SAMBANOVA_API_KEY")
|
234 |
+
final_consensus = chat_with_sambanova(
|
235 |
+
messages=[
|
236 |
+
{"role": "system", "content": "You are a helpful assistant"},
|
237 |
+
{"role": "user", "content": consensus_prompt},
|
238 |
+
],
|
239 |
+
api_key=api_key,
|
240 |
+
)
|
241 |
+
elif provider == "Hyperbolic": # Add Hyperbolic case
|
242 |
+
api_key = os.getenv("HYPERBOLIC_API_KEY")
|
243 |
+
final_consensus = chat_with_hyperbolic(
|
244 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
245 |
+
)
|
246 |
+
else: # Gemini
|
247 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
248 |
+
final_consensus = chat_with_gemini(
|
249 |
+
messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
|
250 |
+
)
|
251 |
+
except Exception as e:
|
252 |
+
final_consensus = f"Error getting consensus from {model}: {e!s}"
|
253 |
+
|
254 |
+
chat_history.append(("Final Consensus", final_consensus))
|
255 |
+
|
256 |
+
progress(1.0, desc="Done!")
|
257 |
+
return chat_history
|
258 |
+
|
259 |
+
|
260 |
+
with gr.Blocks() as demo:
|
261 |
+
gr.Markdown("# Experimental Multi-Model Consensus Chat")
|
262 |
+
gr.Markdown(
|
263 |
+
"""Select multiple models to collaborate on answering your question.
|
264 |
+
The models will discuss with each other and attempt to reach a consensus.
|
265 |
+
Maximum 3 models can be selected at once."""
|
266 |
+
)
|
267 |
+
|
268 |
+
with gr.Row():
|
269 |
+
with gr.Column():
|
270 |
+
model_selector = gr.Dropdown(
|
271 |
+
choices=get_all_models(),
|
272 |
+
multiselect=True,
|
273 |
+
label="Select Models (max 3)",
|
274 |
+
info="Choose up to 3 models to participate in the discussion",
|
275 |
+
value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
|
276 |
+
max_choices=3,
|
277 |
+
)
|
278 |
+
rounds_slider = gr.Slider(
|
279 |
+
minimum=1,
|
280 |
+
maximum=2,
|
281 |
+
value=1,
|
282 |
+
step=1,
|
283 |
+
label="Discussion Rounds",
|
284 |
+
info="Number of rounds of discussion between models",
|
285 |
+
)
|
286 |
+
|
287 |
+
chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
|
288 |
+
msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
|
289 |
+
|
290 |
+
def respond(message, selected_models, rounds):
|
291 |
+
chat_history = multi_model_consensus(message, selected_models, rounds)
|
292 |
+
return chat_history
|
293 |
+
|
294 |
+
msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
|
295 |
+
|
296 |
+
for fn in demo.fns.values():
|
297 |
+
fn.api_name = False
|
298 |
+
|
299 |
+
if __name__ == "__main__":
|
300 |
+
demo.launch()
|
app_fal.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fal_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"fal-ai/ltx-video",
|
8 |
+
"fal-ai/ltx-video/image-to-video",
|
9 |
+
"fal-ai/luma-photon",
|
10 |
+
],
|
11 |
+
default_model="fal-ai/luma-photon",
|
12 |
+
src=fal_gradio.registry,
|
13 |
+
)
|
14 |
+
|
15 |
+
if __name__ == "__main__":
|
16 |
+
demo.launch()
|
app_fireworks.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import fireworks_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"f1-preview",
|
10 |
+
"f1-mini-preview",
|
11 |
+
"llama-v3p3-70b-instruct",
|
12 |
+
],
|
13 |
+
default_model="llama-v3p3-70b-instruct",
|
14 |
+
src=fireworks_gradio.registry,
|
15 |
+
accept_token=not os.getenv("FIREWORKS_API_KEY"),
|
16 |
+
)
|
17 |
+
|
18 |
+
if __name__ == "__main__":
|
19 |
+
demo.launch()
|
app_gemini.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Gemini models but keep their full names for loading
|
6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=GEMINI_MODELS_FULL[-1],
|
15 |
+
dropdown_label="Select Gemini Model",
|
16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
src=ai_gradio.registry,
|
18 |
+
fill_height=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_gemini_camera.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Gemini models but keep their full names for loading
|
6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=GEMINI_MODELS_FULL[-2],
|
15 |
+
dropdown_label="Select Gemini Model",
|
16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
src=ai_gradio.registry,
|
18 |
+
camera=True,
|
19 |
+
fill_height=True,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_gemini_coder.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Gemini models but keep their full names for loading
|
6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=GEMINI_MODELS_FULL[0],
|
15 |
+
dropdown_label="Select Gemini Model",
|
16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
src=ai_gradio.registry,
|
18 |
+
fill_height=True,
|
19 |
+
coder=True,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_gemini_voice.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Gemini models but keep their full names for loading
|
6 |
+
GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=GEMINI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=GEMINI_MODELS_FULL[-2],
|
15 |
+
dropdown_label="Select Gemini Model",
|
16 |
+
choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
src=ai_gradio.registry,
|
18 |
+
enable_voice=True,
|
19 |
+
fill_height=True,
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_groq.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Groq models from the registry
|
6 |
+
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
10 |
+
|
11 |
+
demo = get_app(
|
12 |
+
models=GROQ_MODELS_FULL,
|
13 |
+
default_model=GROQ_MODELS_FULL[-2],
|
14 |
+
src=ai_gradio.registry,
|
15 |
+
dropdown_label="Select Groq Model",
|
16 |
+
choices=GROQ_MODELS_DISPLAY,
|
17 |
+
fill_height=True,
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_groq_coder.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the Groq models but keep their full names for loading
|
6 |
+
GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=GROQ_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=GROQ_MODELS_FULL[-1],
|
15 |
+
dropdown_label="Select Groq Model",
|
16 |
+
choices=GROQ_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
fill_height=True,
|
18 |
+
coder=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_hf.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import get_app
|
2 |
+
|
3 |
+
demo = get_app(
|
4 |
+
models=[
|
5 |
+
"microsoft/Phi-3.5-mini-instruct",
|
6 |
+
"HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
7 |
+
"google/gemma-2-2b-it",
|
8 |
+
"openai-community/gpt2",
|
9 |
+
"microsoft/phi-2",
|
10 |
+
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
11 |
+
],
|
12 |
+
default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
|
13 |
+
src="models",
|
14 |
+
)
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
demo.launch()
|
app_huggingface.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=HUGGINGFACE_MODELS_FULL[0],
|
16 |
+
dropdown_label="Select Huggingface Model",
|
17 |
+
choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True,
|
20 |
+
provider="fireworks-ai",
|
21 |
+
bill_to="huggingface"
|
22 |
+
)
|
app_hyperbolic.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=HYPERBOLIC_MODELS_FULL[-2],
|
16 |
+
dropdown_label="Select Hyperbolic Model",
|
17 |
+
choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
)
|
app_hyperbolic_coder.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=HYPERBOLIC_MODELS_FULL[-2],
|
16 |
+
dropdown_label="Select Hyperbolic Model",
|
17 |
+
choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True,
|
20 |
+
)
|
app_langchain.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=LANGCHAIN_MODELS_FULL[0],
|
16 |
+
dropdown_label="Select Langchain Model",
|
17 |
+
choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
23 |
+
|
app_lumaai.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import lumaai_gradio
|
3 |
+
|
4 |
+
demo = gr.load(
|
5 |
+
name="dream-machine",
|
6 |
+
src=lumaai_gradio.registry,
|
7 |
+
)
|
app_marco_o1.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
import transformers_gradio
|
4 |
+
|
5 |
+
demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
|
6 |
+
demo.fn = spaces.GPU()(demo.fn)
|
7 |
+
|
8 |
+
for fn in demo.fns.values():
|
9 |
+
fn.api_name = False
|
10 |
+
|
11 |
+
if __name__ == "__main__":
|
12 |
+
demo.launch()
|
app_meta.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
|
4 |
+
|
5 |
+
if __name__ == "__main__":
|
6 |
+
demo.launch()
|
app_mindsearch.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# Load the Gradio space
|
4 |
+
demo = gr.load(name="internlm/MindSearch", src="spaces")
|
5 |
+
|
6 |
+
# Disable API access for all functions
|
7 |
+
if hasattr(demo, "fns"):
|
8 |
+
for fn in demo.fns.values():
|
9 |
+
fn.api_name = False
|
10 |
+
|
11 |
+
if __name__ == "__main__":
|
12 |
+
demo.launch()
|
app_minimax.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=MINIMAX_MODELS_FULL[0],
|
16 |
+
dropdown_label="Select Minimax Model",
|
17 |
+
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_minimax_coder.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the hyperbolic models but keep their full names for loading
|
6 |
+
MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=MINIMAX_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=MINIMAX_MODELS_FULL[0],
|
16 |
+
dropdown_label="Select Minimax Model",
|
17 |
+
choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_mistral.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the mistral models but keep their full names for loading
|
6 |
+
MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=MISTRAL_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=MISTRAL_MODELS_FULL[5],
|
16 |
+
dropdown_label="Select Mistral Model",
|
17 |
+
choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_moondream.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# Load the Gradio space
|
4 |
+
demo = gr.load(name="akhaliq/moondream", src="spaces")
|
5 |
+
|
6 |
+
|
7 |
+
# Disable API access for all functions
|
8 |
+
if hasattr(demo, "fns"):
|
9 |
+
for fn in demo.fns.values():
|
10 |
+
fn.api_name = False
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
demo.launch()
|
app_nvidia.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the nvidia models but keep their full names for loading
|
6 |
+
NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=NVIDIA_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=NVIDIA_MODELS_FULL[0],
|
16 |
+
dropdown_label="Select Nvidia Model",
|
17 |
+
choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_nvidia_coder.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the nvidia models but keep their full names for loading
|
6 |
+
NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=NVIDIA_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=NVIDIA_MODELS_FULL[-1],
|
16 |
+
dropdown_label="Select Nvidia Model",
|
17 |
+
choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_omini.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# Load the Gradio space
|
4 |
+
demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
|
5 |
+
|
6 |
+
|
7 |
+
# Disable API access for all functions
|
8 |
+
if hasattr(demo, "fns"):
|
9 |
+
for fn in demo.fns.values():
|
10 |
+
fn.api_name = False
|
app_openai.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the OpenAI models but keep their full names for loading
|
6 |
+
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=OPENAI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=OPENAI_MODELS_FULL[-1],
|
15 |
+
dropdown_label="Select OpenAI Model",
|
16 |
+
choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
fill_height=True,
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch()
|
app_openai_coder.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the OpenAI models but keep their full names for loading
|
6 |
+
OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=OPENAI_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=OPENAI_MODELS_FULL[-1],
|
15 |
+
dropdown_label="Select OpenAI Model",
|
16 |
+
choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
fill_height=True,
|
18 |
+
coder=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_openai_voice.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import openai_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"gpt-4o-realtime-preview",
|
10 |
+
"gpt-4o-realtime-preview-2024-12-17",
|
11 |
+
"gpt-4o-realtime-preview-2024-10-01",
|
12 |
+
"gpt-4o-mini-realtime-preview",
|
13 |
+
"gpt-4o-mini-realtime-preview-2024-12-17",
|
14 |
+
],
|
15 |
+
default_model="gpt-4o-mini-realtime-preview-2024-12-17",
|
16 |
+
src=openai_gradio.registry,
|
17 |
+
accept_token=not os.getenv("OPENAI_API_KEY"),
|
18 |
+
twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
|
19 |
+
twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_openrouter.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the OpenAI models but keep their full names for loading
|
6 |
+
OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL]
|
10 |
+
|
11 |
+
# Create and launch the interface using get_app utility
|
12 |
+
demo = get_app(
|
13 |
+
models=OPENROUTER_MODELS_FULL, # Use the full names with prefix
|
14 |
+
default_model=OPENROUTER_MODELS_FULL[-1],
|
15 |
+
dropdown_label="Select OpenRouter Model",
|
16 |
+
choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix
|
17 |
+
fill_height=True,
|
18 |
+
coder=True,
|
19 |
+
)
|
20 |
+
|
21 |
+
if __name__ == "__main__":
|
22 |
+
demo.launch()
|
app_paligemma.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_client import Client, handle_file
|
3 |
+
|
4 |
+
MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
|
5 |
+
|
6 |
+
|
7 |
+
def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
8 |
+
def chat(message, history):
|
9 |
+
text = message.get("text", "")
|
10 |
+
files = message.get("files", [])
|
11 |
+
processed_files = [handle_file(f) for f in files]
|
12 |
+
|
13 |
+
response = client.predict(
|
14 |
+
message={"text": text, "files": processed_files},
|
15 |
+
system_prompt=system_prompt,
|
16 |
+
temperature=temperature,
|
17 |
+
max_new_tokens=max_tokens,
|
18 |
+
top_k=top_k,
|
19 |
+
repetition_penalty=rep_penalty,
|
20 |
+
top_p=top_p,
|
21 |
+
api_name="/chat",
|
22 |
+
)
|
23 |
+
return response
|
24 |
+
|
25 |
+
return chat
|
26 |
+
|
27 |
+
|
28 |
+
def set_client_for_session(model_name, request: gr.Request):
|
29 |
+
headers = {}
|
30 |
+
if request and hasattr(request, "headers"):
|
31 |
+
x_ip_token = request.headers.get("x-ip-token")
|
32 |
+
if x_ip_token:
|
33 |
+
headers["X-IP-Token"] = x_ip_token
|
34 |
+
|
35 |
+
return Client(MODELS[model_name], headers=headers)
|
36 |
+
|
37 |
+
|
38 |
+
def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
|
39 |
+
if client is None:
|
40 |
+
return "Error: Client not initialized. Please refresh the page."
|
41 |
+
try:
|
42 |
+
return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
|
43 |
+
message, history
|
44 |
+
)
|
45 |
+
except Exception as e:
|
46 |
+
print(f"Error during chat: {e!s}")
|
47 |
+
return f"Error during chat: {e!s}"
|
48 |
+
|
49 |
+
|
50 |
+
with gr.Blocks() as demo:
|
51 |
+
client = gr.State()
|
52 |
+
|
53 |
+
with gr.Accordion("Advanced Settings", open=False):
|
54 |
+
system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
|
55 |
+
with gr.Row():
|
56 |
+
temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
|
57 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
|
58 |
+
with gr.Row():
|
59 |
+
top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
|
60 |
+
rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
|
61 |
+
max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
|
62 |
+
|
63 |
+
chat_interface = gr.ChatInterface(
|
64 |
+
fn=safe_chat_fn,
|
65 |
+
additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
|
66 |
+
multimodal=True,
|
67 |
+
)
|
68 |
+
|
69 |
+
# Initialize client on page load with default model
|
70 |
+
demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
|
71 |
+
|
72 |
+
# Move the API access check here, after demo is defined
|
73 |
+
if hasattr(demo, "fns"):
|
74 |
+
for fn in demo.fns.values():
|
75 |
+
fn.api_name = False
|
76 |
+
|
77 |
+
if __name__ == "__main__":
|
78 |
+
demo.launch()
|
app_perplexity.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import perplexity_gradio
|
4 |
+
|
5 |
+
from utils import get_app
|
6 |
+
|
7 |
+
demo = get_app(
|
8 |
+
models=[
|
9 |
+
"llama-3.1-sonar-large-128k-online",
|
10 |
+
"llama-3.1-sonar-small-128k-online",
|
11 |
+
"llama-3.1-sonar-huge-128k-online",
|
12 |
+
"llama-3.1-sonar-small-128k-chat",
|
13 |
+
"llama-3.1-sonar-large-128k-chat",
|
14 |
+
"llama-3.1-8b-instruct",
|
15 |
+
"llama-3.1-70b-instruct",
|
16 |
+
],
|
17 |
+
default_model="llama-3.1-sonar-huge-128k-online",
|
18 |
+
src=perplexity_gradio.registry,
|
19 |
+
accept_token=not os.getenv("PERPLEXITY_API_KEY"),
|
20 |
+
)
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
demo.launch()
|
app_playai.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import playai_gradio
|
3 |
+
|
4 |
+
demo = gr.load(
|
5 |
+
name="PlayDialog",
|
6 |
+
src=playai_gradio.registry,
|
7 |
+
)
|
8 |
+
|
9 |
+
for fn in demo.fns.values():
|
10 |
+
fn.api_name = False
|
app_qwen.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the qwen models but keep their full names for loading
|
6 |
+
QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=QWEN_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=QWEN_MODELS_FULL[-1],
|
16 |
+
dropdown_label="Select Qwen Model",
|
17 |
+
choices=QWEN_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
)
|
app_qwen_coder.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ai_gradio
|
2 |
+
|
3 |
+
from utils_ai_gradio import get_app
|
4 |
+
|
5 |
+
# Get the qwen models but keep their full names for loading
|
6 |
+
QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
|
7 |
+
|
8 |
+
# Create display names without the prefix
|
9 |
+
QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
|
10 |
+
|
11 |
+
|
12 |
+
# Create and launch the interface using get_app utility
|
13 |
+
demo = get_app(
|
14 |
+
models=QWEN_MODELS_FULL, # Use the full names with prefix
|
15 |
+
default_model=QWEN_MODELS_FULL[-1],
|
16 |
+
dropdown_label="Select Qwen Model",
|
17 |
+
choices=QWEN_MODELS_DISPLAY, # Display names without prefix
|
18 |
+
fill_height=True,
|
19 |
+
coder=True,
|
20 |
+
)
|
app_replicate.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import replicate_gradio
|
2 |
+
|
3 |
+
from utils import get_app
|
4 |
+
|
5 |
+
demo = get_app(
|
6 |
+
models=[
|
7 |
+
"black-forest-labs/flux-depth-pro",
|
8 |
+
"black-forest-labs/flux-canny-pro",
|
9 |
+
"black-forest-labs/flux-fill-pro",
|
10 |
+
"black-forest-labs/flux-depth-dev",
|
11 |
+
"tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1",
|
12 |
+
],
|
13 |
+
default_model="tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1",
|
14 |
+
src=replicate_gradio.registry,
|
15 |
+
)
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
demo.launch()
|