Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .github/workflows/update_space.yml +28 -0
- .gitignore +168 -0
- LICENSE +24 -0
- README.md +97 -7
- app.py +269 -0
- example.png +0 -0
- favicon.ico +0 -0
- pyproject.toml +25 -0
- requirements.txt +2 -0
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- master
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.12'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
110 |
+
.pdm.toml
|
111 |
+
.pdm-python
|
112 |
+
.pdm-build/
|
113 |
+
|
114 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
+
__pypackages__/
|
116 |
+
|
117 |
+
# Celery stuff
|
118 |
+
celerybeat-schedule
|
119 |
+
celerybeat.pid
|
120 |
+
|
121 |
+
# SageMath parsed files
|
122 |
+
*.sage.py
|
123 |
+
|
124 |
+
# Environments
|
125 |
+
.env
|
126 |
+
.venv
|
127 |
+
env/
|
128 |
+
venv/
|
129 |
+
ENV/
|
130 |
+
env.bak/
|
131 |
+
venv.bak/
|
132 |
+
|
133 |
+
# Spyder project settings
|
134 |
+
.spyderproject
|
135 |
+
.spyproject
|
136 |
+
|
137 |
+
# Rope project settings
|
138 |
+
.ropeproject
|
139 |
+
|
140 |
+
# mkdocs documentation
|
141 |
+
/site
|
142 |
+
|
143 |
+
# mypy
|
144 |
+
.mypy_cache/
|
145 |
+
.dmypy.json
|
146 |
+
dmypy.json
|
147 |
+
|
148 |
+
# Pyre type checker
|
149 |
+
.pyre/
|
150 |
+
|
151 |
+
# pytype static type analyzer
|
152 |
+
.pytype/
|
153 |
+
|
154 |
+
# Cython debug symbols
|
155 |
+
cython_debug/
|
156 |
+
|
157 |
+
# PyCharm
|
158 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
+
#.idea/
|
163 |
+
|
164 |
+
# VSCode / Misc
|
165 |
+
.vscode
|
166 |
+
.ruff_cache
|
167 |
+
flagged
|
168 |
+
*.csv
|
LICENSE
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
BSD 2-Clause License
|
2 |
+
|
3 |
+
Copyright (c) 2024, justin
|
4 |
+
|
5 |
+
Redistribution and use in source and binary forms, with or without
|
6 |
+
modification, are permitted provided that the following conditions are met:
|
7 |
+
|
8 |
+
1. Redistributions of source code must retain the above copyright notice, this
|
9 |
+
list of conditions and the following disclaimer.
|
10 |
+
|
11 |
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
12 |
+
this list of conditions and the following disclaimer in the documentation
|
13 |
+
and/or other materials provided with the distribution.
|
14 |
+
|
15 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
18 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
19 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
20 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
21 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
22 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
23 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
24 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
README.md
CHANGED
@@ -1,12 +1,102 @@
|
|
1 |
---
|
2 |
title: AnkiGen
|
3 |
-
emoji:
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.44.1
|
8 |
app_file: app.py
|
9 |
-
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
title: AnkiGen
|
3 |
+
emoji: 📚
|
|
|
|
|
|
|
|
|
4 |
app_file: app.py
|
5 |
+
requirements: requirements.txt
|
6 |
+
python: 3.12
|
7 |
+
sdk: gradio
|
8 |
+
sdk_version: 4.44.0
|
9 |
---
|
10 |
|
11 |
+
# AnkiGen - Anki Card Generator
|
12 |
+
|
13 |
+
AnkiGen is a Gradio-based web application that generates Anki-compatible CSV files using Large Language Models (LLMs) based on user-specified subjects and preferences.
|
14 |
+
|
15 |
+
## Features
|
16 |
+
|
17 |
+
- Generate Anki cards for various subjects
|
18 |
+
- Customizable number of topics and cards per topic
|
19 |
+
- User-friendly interface powered by Gradio
|
20 |
+
- Exports to CSV format compatible with Anki import
|
21 |
+
- Utilizes LLMs for high-quality content generation
|
22 |
+
|
23 |
+
## FORK features
|
24 |
+
|
25 |
+
- instead of Openai, Openrouter is added as an free alternative :)
|
26 |
+
|
27 |
+
|
28 |
+
## Screenshot
|
29 |
+
|
30 |
+

|
31 |
+
|
32 |
+
|
33 |
+
## Installation for Local Use
|
34 |
+
|
35 |
+
1. Clone this repository:
|
36 |
+
|
37 |
+
```
|
38 |
+
git clone https://github.com/Banishedone/ankigen.git
|
39 |
+
cd ankigen
|
40 |
+
```
|
41 |
+
|
42 |
+
|
43 |
+
2. Install the required dependencies:
|
44 |
+
|
45 |
+
```
|
46 |
+
pip install -r requirements.txt
|
47 |
+
```
|
48 |
+
|
49 |
+
3. Set up your Openrouter API key (required for LLM functionality).
|
50 |
+
|
51 |
+
## Usage
|
52 |
+
|
53 |
+
1. Run the application:
|
54 |
+
|
55 |
+
```
|
56 |
+
gradio app.py --demo-name ankigen
|
57 |
+
```
|
58 |
+
|
59 |
+
2. Open your web browser and navigate to the provided local URL (typically `http://127.0.0.1:7860`).
|
60 |
+
|
61 |
+
3. In the application interface:
|
62 |
+
- Enter your OpenAI API key
|
63 |
+
- Specify the subject you want to create cards for
|
64 |
+
- Adjust the number of topics and cards per topic
|
65 |
+
- (Optional) Add any preference prompts
|
66 |
+
- Click "Generate Cards"
|
67 |
+
|
68 |
+
4. Review the generated cards in the interface.
|
69 |
+
|
70 |
+
5. Click "Export to CSV" to download the Anki-compatible file.
|
71 |
+
|
72 |
+
## CSV Format
|
73 |
+
|
74 |
+
The generated CSV file includes the following fields:
|
75 |
+
- Index
|
76 |
+
- Topic
|
77 |
+
- Question
|
78 |
+
- Answer
|
79 |
+
- Explanation
|
80 |
+
- Example
|
81 |
+
|
82 |
+
You can create a new note type in Anki with these fields to handle importing.
|
83 |
+
|
84 |
+
## Development
|
85 |
+
|
86 |
+
This project is built with:
|
87 |
+
- Python 3.12
|
88 |
+
- Gradio 4.44.0
|
89 |
+
|
90 |
+
To contribute or modify:
|
91 |
+
1. Make your changes in `app.py`
|
92 |
+
2. Update `requirements.txt` if you add new dependencies
|
93 |
+
3. Test thoroughly before submitting pull requests
|
94 |
+
|
95 |
+
## License
|
96 |
+
|
97 |
+
BSD 2.0
|
98 |
+
|
99 |
+
## Acknowledgments
|
100 |
+
|
101 |
+
- This project uses the Gradio library (https://gradio.app/) for the web interface
|
102 |
+
- Card generation is powered by OpenAI's language models
|
app.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from typing import List, Optional
|
4 |
+
import gradio as gr
|
5 |
+
import json
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
class Step(BaseModel):
|
10 |
+
explanation: str
|
11 |
+
output: str
|
12 |
+
|
13 |
+
|
14 |
+
class Subtopics(BaseModel):
|
15 |
+
steps: List[Step]
|
16 |
+
result: List[str]
|
17 |
+
|
18 |
+
|
19 |
+
class Topics(BaseModel):
|
20 |
+
result: List[Subtopics]
|
21 |
+
|
22 |
+
|
23 |
+
class CardFront(BaseModel):
|
24 |
+
question: Optional[str] = None
|
25 |
+
|
26 |
+
|
27 |
+
class CardBack(BaseModel):
|
28 |
+
answer: Optional[str] = None
|
29 |
+
explanation: str
|
30 |
+
example: str
|
31 |
+
|
32 |
+
|
33 |
+
class Card(BaseModel):
|
34 |
+
front: CardFront
|
35 |
+
back: CardBack
|
36 |
+
|
37 |
+
|
38 |
+
class CardList(BaseModel):
|
39 |
+
topic: str
|
40 |
+
cards: List[Card]
|
41 |
+
|
42 |
+
|
43 |
+
def structured_output_completion(
|
44 |
+
client, model, system_prompt, user_prompt
|
45 |
+
):
|
46 |
+
try:
|
47 |
+
completion = client.chat.completions.create(
|
48 |
+
model=model,
|
49 |
+
messages=[
|
50 |
+
{"role": "system", "content": system_prompt.strip()},
|
51 |
+
{"role": "user", "content": user_prompt.strip()},
|
52 |
+
],
|
53 |
+
)
|
54 |
+
|
55 |
+
print("Raw API response:", completion)
|
56 |
+
|
57 |
+
response_content = completion.choices[0].message.content
|
58 |
+
|
59 |
+
try:
|
60 |
+
parsed_response = json.loads(response_content)
|
61 |
+
return parsed_response
|
62 |
+
except json.JSONDecodeError:
|
63 |
+
print("Invalid JSON response:", response_content)
|
64 |
+
return None
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
print(f"An error occurred during the API call: {e}")
|
68 |
+
return None
|
69 |
+
|
70 |
+
def generate_cards(
|
71 |
+
api_key_input,
|
72 |
+
model_name,
|
73 |
+
subject,
|
74 |
+
topic_number=1,
|
75 |
+
cards_per_topic=2,
|
76 |
+
preference_prompt="assume I'm a beginner",
|
77 |
+
):
|
78 |
+
gr.Info("Starting process")
|
79 |
+
|
80 |
+
if not api_key_input:
|
81 |
+
return gr.Error("Error: OpenRouter API key is required.")
|
82 |
+
|
83 |
+
client = OpenAI(
|
84 |
+
base_url="https://openrouter.ai/api/v1",
|
85 |
+
api_key=api_key_input,
|
86 |
+
)
|
87 |
+
model = model_name
|
88 |
+
|
89 |
+
|
90 |
+
all_card_lists = []
|
91 |
+
|
92 |
+
system_prompt = f"""
|
93 |
+
You are an expert in {subject}, assisting the user to master the topic while
|
94 |
+
keeping in mind the user's preferences: {preference_prompt}.
|
95 |
+
Please provide your responses in valid JSON format.
|
96 |
+
"""
|
97 |
+
|
98 |
+
topic_prompt = f"""
|
99 |
+
Generate the top {topic_number} important subjects to know on {subject} in
|
100 |
+
order of ascending difficulty. Return the result as a JSON array of objects,
|
101 |
+
each containing 'subject' and 'difficulty' keys.
|
102 |
+
"""
|
103 |
+
try:
|
104 |
+
topics_response = structured_output_completion(
|
105 |
+
client, model, system_prompt, topic_prompt
|
106 |
+
)
|
107 |
+
if topics_response is None:
|
108 |
+
raise gr.Error("Failed to generate topics. Please try again.")
|
109 |
+
|
110 |
+
topic_list = [item["subject"] for item in topics_response[:topic_number]]
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
raise gr.Error(f"An error occurred: {str(e)}. Please try again or check your API key.")
|
114 |
+
|
115 |
+
for topic in topic_list:
|
116 |
+
card_prompt = f"""
|
117 |
+
Generate {cards_per_topic} cards on {subject}: "{topic}"
|
118 |
+
keeping in mind the user's preferences: {preference_prompt}.
|
119 |
+
|
120 |
+
Questions should cover both sample problems and concepts.
|
121 |
+
|
122 |
+
Use the explanation field to help the user understand the reason behind things
|
123 |
+
and maximize learning. Additionally, offer tips (performance, gotchas, etc.).
|
124 |
+
|
125 |
+
Return the result as a JSON object with the following structure:
|
126 |
+
{{
|
127 |
+
"topic": "string",
|
128 |
+
"cards": [
|
129 |
+
{{
|
130 |
+
"front": {{ "question": "string" }},
|
131 |
+
"back": {{
|
132 |
+
"answer": "string",
|
133 |
+
"explanation": "string",
|
134 |
+
"example": "string"
|
135 |
+
}}
|
136 |
+
}}
|
137 |
+
]
|
138 |
+
}}
|
139 |
+
"""
|
140 |
+
|
141 |
+
try:
|
142 |
+
cards = structured_output_completion(
|
143 |
+
client, model, system_prompt, card_prompt
|
144 |
+
)
|
145 |
+
if cards is None:
|
146 |
+
print(f"Failed to generate cards for topic '{topic}'.")
|
147 |
+
continue
|
148 |
+
if not isinstance(cards, dict) or 'topic' not in cards or 'cards' not in cards:
|
149 |
+
print(f"Invalid card response format for topic '{topic}'.")
|
150 |
+
continue
|
151 |
+
all_card_lists.append(cards)
|
152 |
+
except Exception as e:
|
153 |
+
print(f"An error occurred while generating cards for topic '{topic}': {e}")
|
154 |
+
continue
|
155 |
+
|
156 |
+
flattened_data = []
|
157 |
+
|
158 |
+
for card_list_index, card_list in enumerate(all_card_lists, start=1):
|
159 |
+
try:
|
160 |
+
topic = card_list['topic']
|
161 |
+
# Get the total number of cards in this list to determine padding
|
162 |
+
total_cards = len(card_list['cards'])
|
163 |
+
# Calculate the number of digits needed for padding
|
164 |
+
padding = len(str(total_cards))
|
165 |
+
|
166 |
+
for card_index, card in enumerate(card_list['cards'], start=1):
|
167 |
+
# Format the index with zero-padding
|
168 |
+
index = f"{card_list_index}.{card_index:0{padding}}"
|
169 |
+
question = card['front']['question']
|
170 |
+
answer = card['back']['answer']
|
171 |
+
explanation = card['back']['explanation']
|
172 |
+
example = card['back']['example']
|
173 |
+
row = [index, topic, question, answer, explanation, example]
|
174 |
+
flattened_data.append(row)
|
175 |
+
except Exception as e:
|
176 |
+
print(f"An error occurred while processing card {index}: {e}")
|
177 |
+
continue
|
178 |
+
|
179 |
+
return flattened_data
|
180 |
+
|
181 |
+
|
182 |
+
def export_csv(d):
|
183 |
+
MIN_ROWS = 2
|
184 |
+
|
185 |
+
if len(d) < MIN_ROWS:
|
186 |
+
gr.Warning(f"The dataframe has fewer than {MIN_ROWS} rows. Nothing to export.")
|
187 |
+
return None
|
188 |
+
|
189 |
+
gr.Info("Exporting...")
|
190 |
+
d.to_csv("anki_deck.csv", index=False)
|
191 |
+
return gr.File(value="anki_deck.csv", visible=True)
|
192 |
+
|
193 |
+
|
194 |
+
with gr.Blocks(
|
195 |
+
gr.themes.Soft(), title="AnkiGen", css="footer{display:none !important}"
|
196 |
+
) as ankigen:
|
197 |
+
gr.Markdown("# 📚 AnkiGen - Anki Card Generator")
|
198 |
+
gr.Markdown("#### Generate an LLM generated Anki comptible csv based on your subject and preferences.")
|
199 |
+
|
200 |
+
with gr.Row():
|
201 |
+
with gr.Column(scale=1):
|
202 |
+
gr.Markdown("### Configuration")
|
203 |
+
|
204 |
+
api_key_input = gr.Textbox(
|
205 |
+
label="OpenRouter API Key",
|
206 |
+
type="password",
|
207 |
+
placeholder="Enter your OpenRouter API key",
|
208 |
+
)
|
209 |
+
model_input = gr.Textbox(
|
210 |
+
label="Model Name",
|
211 |
+
placeholder="Enter the model name (e.g., nousresearch/hermes-3-llama-3.1-405b:free)",
|
212 |
+
value="nousresearch/hermes-3-llama-3.1-405b:free"
|
213 |
+
)
|
214 |
+
subject = gr.Textbox(
|
215 |
+
label="Subject",
|
216 |
+
placeholder="Enter the subject, e.g., 'Basic SQL Concepts'",
|
217 |
+
)
|
218 |
+
topic_number = gr.Slider(
|
219 |
+
label="Number of Topics", minimum=2, maximum=20, step=1, value=2
|
220 |
+
)
|
221 |
+
cards_per_topic = gr.Slider(
|
222 |
+
label="Cards per Topic", minimum=2, maximum=30, step=1, value=3
|
223 |
+
)
|
224 |
+
preference_prompt = gr.Textbox(
|
225 |
+
label="Preference Prompt",
|
226 |
+
placeholder="Any preferences? For example: Learning level, e.g., \"Assume I'm a beginner\" or \"Target an advanced audience\" Content scope, e.g., \"Only cover up until subqueries in SQL\" or \"Focus on organic chemistry basics\"",
|
227 |
+
)
|
228 |
+
generate_button = gr.Button("Generate Cards")
|
229 |
+
with gr.Column(scale=2):
|
230 |
+
gr.Markdown("### Generated Cards")
|
231 |
+
gr.Markdown(
|
232 |
+
"""
|
233 |
+
Subject to change: currently exports a .csv with the following fields, you can
|
234 |
+
create a new note type with these fields to handle importing.:
|
235 |
+
<b>Index, Topic, Question, Answer, Explanation, Example</b>
|
236 |
+
"""
|
237 |
+
)
|
238 |
+
output = gr.Dataframe(
|
239 |
+
headers=[
|
240 |
+
"Index",
|
241 |
+
"Topic",
|
242 |
+
"Question",
|
243 |
+
"Answer",
|
244 |
+
"Explanation",
|
245 |
+
"Example",
|
246 |
+
],
|
247 |
+
interactive=False,
|
248 |
+
height=800,
|
249 |
+
)
|
250 |
+
export_button = gr.Button("Export to CSV")
|
251 |
+
download_link = gr.File(interactive=False, visible=False)
|
252 |
+
|
253 |
+
generate_button.click(
|
254 |
+
fn=generate_cards,
|
255 |
+
inputs=[
|
256 |
+
api_key_input,
|
257 |
+
model_input,
|
258 |
+
subject,
|
259 |
+
topic_number,
|
260 |
+
cards_per_topic,
|
261 |
+
preference_prompt,
|
262 |
+
],
|
263 |
+
outputs=output,
|
264 |
+
)
|
265 |
+
|
266 |
+
export_button.click(fn=export_csv, inputs=output, outputs=download_link)
|
267 |
+
|
268 |
+
if __name__ == "__main__":
|
269 |
+
ankigen.launch(share=False, favicon_path="./favicon.ico")
|
example.png
ADDED
![]() |
favicon.ico
ADDED
|
pyproject.toml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = ["setuptools>=61.0"]
|
3 |
+
build-backend = "setuptools.build_meta"
|
4 |
+
|
5 |
+
[project]
|
6 |
+
name = "ankigen"
|
7 |
+
version = "0.1.0"
|
8 |
+
description = ""
|
9 |
+
authors = [
|
10 |
+
{name = "Justin", email = "9146678+brickfrog@users.noreply.github.com"}
|
11 |
+
]
|
12 |
+
readme = "README.md"
|
13 |
+
requires-python = ">=3.12"
|
14 |
+
dependencies = [
|
15 |
+
"openai>=1.35.10",
|
16 |
+
"gradio>=4.44.1",
|
17 |
+
]
|
18 |
+
|
19 |
+
[project.optional-dependencies]
|
20 |
+
dev = [
|
21 |
+
"ipykernel>=6.29.5",
|
22 |
+
]
|
23 |
+
|
24 |
+
[tool.setuptools]
|
25 |
+
py-modules = ["app"]
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
openai
|