Spaces:
Runtime error
Runtime error
from flask import Flask, request, jsonify, render_template, send_from_directory | |
import cv2, json,base64,io,os,tempfile,logging, re | |
import numpy as np | |
from unstructured.partition.pdf import partition_pdf | |
from PIL import Image | |
# from imutils.perspective import four_point_transform | |
from dotenv import load_dotenv | |
import pytesseract | |
# from transformers import AutoProcessor, AutoModelForImageTextToText, AutoModelForVision2Seq | |
# from langchain_community.document_loaders.image_captions import ImageCaptionLoader | |
from werkzeug.utils import secure_filename | |
from langchain_groq import ChatGroq | |
from langgraph.prebuilt import create_react_agent | |
from pdf2image import convert_from_path | |
from concurrent.futures import ThreadPoolExecutor | |
from pdf2image.exceptions import PDFInfoNotInstalledError | |
from typing import Dict, TypedDict, Optional, Any | |
from langgraph.graph import StateGraph, END | |
import uuid | |
import shutil, time | |
from langchain_experimental.open_clip.open_clip import OpenCLIPEmbeddings | |
# from matplotlib.offsetbox import OffsetImage, AnnotationBbox | |
from io import BytesIO | |
from pathlib import Path | |
import os | |
# ============================== # | |
# INITIALIZE CLIP EMBEDDER # | |
# ============================== # | |
clip_embd = OpenCLIPEmbeddings() | |
# Configure logging | |
logging.basicConfig( | |
level=logging.DEBUG, # Use INFO or ERROR in production | |
format="%(asctime)s [%(levelname)s] %(message)s", | |
handlers=[ | |
logging.FileHandler("/app/logs/app.log"), | |
logging.StreamHandler() | |
] | |
) | |
logger = logging.getLogger(__name__) | |
load_dotenv() | |
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY") | |
groq_api_key = os.getenv("GROQ_API_KEY") | |
llm = ChatGroq( | |
model="meta-llama/llama-4-scout-17b-16e-instruct", | |
temperature=0, | |
max_tokens=None, | |
) | |
app = Flask(__name__) | |
# ============================== # | |
# TESSERACT CONFIGURATION # | |
# ============================== # | |
# Set the Tesseract executable path | |
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe" | |
# Set the TESSDATA_PREFIX environment variable to the directory containing the 'tessdata' folder | |
# This is crucial for Tesseract to find its language data files (e.g., eng.traineddata) | |
os.environ['TESSDATA_PREFIX'] = r'C:\Program Files\Tesseract-OCR' | |
poppler_path = r"C:\poppler\Library\bin" | |
backdrop_images_path = r"blocks\Backdrops" | |
sprite_images_path = r"blocks\sprites" | |
count = 0 | |
#BASE_DIR = Path(__file__).parent | |
BASE_DIR = Path(__file__).parent.resolve() | |
BLOCKS_DIR = BASE_DIR / "blocks" | |
STATIC_DIR = BASE_DIR / "static" | |
GEN_PROJECT_DIR = BASE_DIR / "generated_projects" | |
BACKDROP_DIR = BLOCKS_DIR / "Backdrops" | |
SPRITE_DIR = BLOCKS_DIR / "sprites" | |
OUTPUT_DIR = BASE_DIR / "OUTPUTS" | |
OUTPUT_DIR2 = BASE_DIR / "outputs" | |
DETECTED_IMAGE_DIR = OUTPUT_DIR / "DETECTED_IMAGE" | |
IMAGE_DIR = OUTPUT_DIR / "SCANNED_IMAGE" | |
JSON_DIR = OUTPUT_DIR / "EXTRACTED_JSON" | |
for d in (BLOCKS_DIR, STATIC_DIR, GEN_PROJECT_DIR, OUTPUT_DIR, OUTPUT_DIR2, BACKDROP_DIR, SPRITE_DIR, DETECTED_IMAGE_DIR, IMAGE_DIR, ): | |
d.mkdir(parents=True, exist_ok=True) | |
def classify_image_type(description_or_name: str) -> str: | |
desc = description_or_name.lower() | |
sprite_keywords = ["sprite", "character", "animal", "person", "creature", "robot", "figure"] | |
backdrop_keywords = ["background", "scene", "forest", "city", "room", "sky", "mountain", "village"] | |
code_block_keywords = [ | |
"move", "turn", "wait", "repeat", "if", "else", "broadcast", | |
"glide", "change", "forever", "when", "switch", "costume", | |
"say", "think", "stop", "clone", "touching", "sensing", | |
"scratch", "block", "code", "set", "variable" | |
] | |
if any(kw in desc for kw in code_block_keywords): | |
return "code-block" | |
elif any(kw in desc for kw in sprite_keywords): | |
return "sprite" | |
elif any(kw in desc for kw in backdrop_keywords): | |
return "backdrop" | |
else: | |
return "unknown" | |
class GameState(TypedDict): | |
project_json: dict | |
description: str | |
project_id: str | |
project_image: str | |
action_plan: Optional[Dict] | |
pseudo_node: Optional[Dict] | |
temporary_node: Optional[Dict] | |
# Refined SYSTEM_PROMPT with more explicit Scratch JSON rules, especially for variables | |
SYSTEM_PROMPT = """ | |
You are an expert AI assistant named GameScratchAgent, specialized in generating and modifying Scratch-VM 3.x game project JSON. | |
Your core task is to process game descriptions and existing Scratch JSON structures, then produce or update JSON segments accurately. | |
You possess deep knowledge of Scratch 3.0 project schema, informed by comprehensive reference materials. When generating or modifying the `blocks` section, pay extremely close attention to the following: | |
**Scratch Project JSON Schema Rules:** | |
1. **Target Structure (`project.json`'s `targets` array):** | |
* Each object in the `targets` array represents a Stage or a Sprite. | |
* `isStage`: A boolean indicating if the target is the Stage (`true`) or a Sprite (`false`). | |
* `name`: The name of the Stage (e.g., `"Stage"`) or the Sprite (e.g., `"Cat"`). This property replaces `objName` found in older Scratch versions. | |
* `variables` dictionary: This dictionary maps unique variable IDs to arrays `[variable_name, initial_value, isCloudVariable?]`. | |
* `variable_name`: The user-defined name of the variable. | |
* `initial_value`: The variable's initial value, which can be a number or a string. | |
* `isCloudVariable?`: (Optional) A boolean indicating if it's a cloud variable (`true`) or a local variable (`false` or absent for regular variables). | |
* Example: `"myVarId123": ["score", 0]`, `"cloudVarId456": ["☁ High Score", "54", true]` | |
* `lists` dictionary: This dictionary maps unique list IDs to arrays `[list_name, [item1, item2, ...]]`. | |
* Example: `"myListId789": ["my list", ["apple", "banana"]]` | |
* `broadcasts` dictionary: This dictionary maps unique broadcast IDs to their names. | |
* Example: `"myBroadcastId": "Game Over"` | |
* `blocks` dictionary: This dictionary contains all the blocks belonging to this target. Keys are block IDs, values are block objects. | |
2. **Block Structure (within a `target`'s `blocks` dictionary):** | |
* Every block object must have the following core properties: | |
* [cite_start]`opcode`: A unique internal identifier for the block's specific functionality (e.g., `"motion_movesteps"`, `"event_whenflagclicked"`)[cite: 31, 18, 439, 452]. | |
* `parent`: The ID of the block directly above it in the script stack (or `null` for a top-level block). | |
* `next`: The ID of the block directly below it in the script stack (or `null` for the end of a stack). | |
* `inputs`: An object defining values or blocks plugged into the block's input slots. Values are **arrays**. | |
* `fields`: An object defining dropdown menu selections or direct internal values within the block. Values are **arrays**. | |
* `shadow`: `true` if it's a shadow block (e.g., a default number input that can be replaced by another block), `false` otherwise. | |
* `topLevel`: `true` if it's a hat block or a standalone block (not connected to a parent), `false` otherwise. | |
3. **`inputs` Property Details (for blocks plugged into input slots):** | |
* **Direct Block Connection (Reporter/Boolean block plugged in):** | |
* Format: `"<INPUT_NAME>": [1, "<blockId_of_plugged_block>"]` | |
* Example: `"CONDITION": [1, "someBooleanBlockId"]` (e.g., for an `if` block). | |
* **Literal Value Input (Shadow block with a literal):** | |
* Format: `"<INPUT_NAME>": [1, [<type_code>, "<value_string>"]]` | |
* `type_code`: A numeric code representing the data type. Common codes include: `4` for number, `7` for string/text, `10` for string/message. | |
* `value_string`: The literal value as a string. | |
* Examples: | |
* Number: `"STEPS": [1, [4, "10"]]` (for `move 10 steps` block). | |
* String/Text: `"MESSAGE": [1, [7, "Hello"]]` (for `say Hello` block). | |
* String/Message (common for text inputs): `"MESSAGE": [1, [10, "Hello!"]]` (for `say Hello! for 2 secs`). | |
* **C-Block Substack (blocks within a loop or conditional):** | |
* Format: `"<SUBSTACK_NAME>": [2, "<blockId_of_first_block_in_substack>"]` | |
* Common `SUBSTACK_NAME` values are `SUBSTACK` (for `if`, `forever`, `repeat`) and `SUBSTACK2` (for `else` in `if else`). | |
* Example: `"SUBSTACK": [2, "firstBlockInLoopId"]` | |
4. **`fields` Property Details (for dropdowns or direct internal values):** | |
* Used for dropdown menus, variable names, list names, or other static selections directly within the block. | |
* Format: `"<FIELD_NAME>": ["<selected_value>", null]` | |
* Examples: | |
* Dropdown: `"KEY_OPTION": ["space", null]` (for `when space key pressed`). | |
* Variable Name: `"VARIABLE": ["score", null]` (for `set score to 0`). | |
* Direction (specific motion block): `"FORWARD_BACKWARD": ["forward", null]` (for `go forward layers`). | |
5. **Unique IDs:** | |
* All block IDs, variable IDs, and list IDs must be unique strings (e.g., "myBlock123", "myVarId456", "myListId789"). Do NOT use placeholder strings like "block_id_here". | |
6. **No Nested `blocks` Dictionary:** | |
* The `blocks` dictionary should only appear once per `target` (sprite/stage). Do NOT nest a `blocks` dictionary inside an individual block definition. Blocks that are part of a substack are linked via the `SUBSTACK` input. | |
7. **Asset Properties (for Costumes/Sounds):** | |
* `assetId`, `md5ext`, `bitmapResolution`, `rotationCenterX`/`rotationCenterY` should be correctly associated with costume and sound objects within the `costumes` and `sounds` arrays. | |
**General Principles and Important Considerations:** | |
* **Backward Compatibility:** Adhere strictly to existing Scratch 3.0 opcodes and schema to ensure backward compatibility with older projects. [cite_start]Opcodes must remain consistent to prevent previously saved projects from failing to load or behaving unexpectedly[cite: 18, 19, 25, 65]. | |
* **Forgiving Inputs:** Recognize that Scratch is designed to be "forgiving in its interpretation of inputs." [cite_start]The Scratch VM handles potentially "invalid" inputs gracefully (e.g., converting a number to a string if expected, returning default values like zero or empty strings, or performing no action) rather than crashing[cite: 20, 21, 22, 38, 39, 41]. This implies that precise type matching for inputs might be handled internally by Scratch, allowing for some flexibility in how values are provided, but the agent should aim for the most common and logical type. | |
""" | |
SYSTEM_PROMPT_JSON_CORRECTOR =""" | |
You are an assistant that outputs JSON responses strictly following the given schema. | |
If the JSON you produce has any formatting errors, missing required fields, or invalid structure, you must identify the problems and correct them. | |
Always return only valid JSON that fully conforms to the schema below, enclosed in triple backticks (```), without any extra text or explanation. | |
If you receive an invalid or incomplete JSON response, fix it by: | |
- Adding any missing required fields with appropriate values. | |
- Correcting syntax errors such as missing commas, brackets, or quotes. | |
- Ensuring the JSON structure matches the schema exactly. | |
Remember: Your output must be valid JSON only, ready to be parsed without errors. | |
""" | |
# debugger and resolver agent for Scratch 3.0 | |
# Main agent of the system agent for Scratch 3.0 | |
agent = create_react_agent( | |
model=llm, | |
tools=[], # No specific tools are defined here, but could be added later | |
prompt=SYSTEM_PROMPT | |
) | |
agent_json_resolver = create_react_agent( | |
model=llm, | |
tools=[], # No specific tools are defined here, but could be added later | |
prompt=SYSTEM_PROMPT_JSON_CORRECTOR | |
) | |
# Helper function to load the block catalog from a JSON file | |
def _load_block_catalog(file_path: str) -> Dict: | |
"""Loads the Scratch block catalog from a specified JSON file.""" | |
try: | |
with open(file_path, 'r') as f: | |
catalog = json.load(f) | |
logger.info(f"Successfully loaded block catalog from {file_path}") | |
return catalog | |
except FileNotFoundError: | |
logger.error(f"Error: Block catalog file not found at {file_path}") | |
# Return an empty dict or raise an error, depending on desired behavior | |
return {} | |
except json.JSONDecodeError as e: | |
logger.error(f"Error decoding JSON from {file_path}: {e}") | |
return {} | |
except Exception as e: | |
logger.error(f"An unexpected error occurred while loading {file_path}: {e}") | |
return {} | |
# --- Global variable for the block catalog --- | |
# --- Global variable for the block catalog --- | |
ALL_SCRATCH_BLOCKS_CATALOG = {} | |
BLOCK_CATALOG_PATH = r"blocks\blocks.json" # Define the path to your JSON file | |
HAT_BLOCKS_PATH = r"blocks\hat_blocks.json" # Path to the hat blocks JSON file | |
STACK_BLOCKS_PATH = r"blocks\stack_blocks.json" # Path to the stack blocks JSON file | |
REPORTER_BLOCKS_PATH = r"blocks\reporter_blocks.json" # Path to the reporter blocks JSON file | |
BOOLEAN_BLOCKS_PATH = r"blocks\boolean_blocks.json" # Path to the boolean blocks JSON file | |
C_BLOCKS_PATH = r"blocks\c_blocks.json" # Path to the C blocks JSON file | |
CAP_BLOCKS_PATH = r"blocks\cap_blocks.json" # Path to the cap blocks JSON file | |
# Load the block catalogs from their respective JSON files | |
hat_block_data = _load_block_catalog(HAT_BLOCKS_PATH) | |
hat_description = hat_block_data["description"] | |
hat_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in hat_block_data["blocks"]]) | |
print("Hat blocks loaded successfully.", hat_description) | |
boolean_block_data = _load_block_catalog(BOOLEAN_BLOCKS_PATH) | |
boolean_description = boolean_block_data["description"] | |
boolean_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in boolean_block_data["blocks"]]) | |
c_block_data = _load_block_catalog(C_BLOCKS_PATH) | |
c_description = c_block_data["description"] | |
c_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in c_block_data["blocks"]]) | |
cap_block_data = _load_block_catalog(CAP_BLOCKS_PATH) | |
cap_description = cap_block_data["description"] | |
cap_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in cap_block_data["blocks"]]) | |
reporter_block_data = _load_block_catalog(REPORTER_BLOCKS_PATH) | |
reporter_description = reporter_block_data["description"] | |
reporter_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in reporter_block_data["blocks"]]) | |
stack_block_data = _load_block_catalog(STACK_BLOCKS_PATH) | |
stack_description = stack_block_data["description"] | |
stack_opcodes_functionalities = "\n".join([f" - Opcode: {block['op_code']}, functionality: {block['functionality']} example: standalone use: {block['example_standalone']}" for block in stack_block_data["blocks"]]) | |
# This makes ALL_SCRATCH_BLOCKS_CATALOG available globally | |
ALL_SCRATCH_BLOCKS_CATALOG = _load_block_catalog(BLOCK_CATALOG_PATH) | |
# Helper function to extract JSON from LLM response | |
def extract_json_from_llm_response(raw_response: str) -> dict: | |
# --- 1) Pull out the JSON code‑block if present --- | |
md = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", raw_response) | |
json_string = md.group(1).strip() if md else raw_response | |
# --- 2) Trim to the outermost { … } so we drop any prefix/suffix junk --- | |
first, last = json_string.find('{'), json_string.rfind('}') | |
if 0 <= first < last: | |
json_string = json_string[first:last+1] | |
# --- 3) PRE‑CLEANUP: remove stray assistant{…}, rogue assistant keys, fix boolean quotes --- | |
json_string = re.sub(r'\b\w+\s*{', '{', json_string) | |
json_string = re.sub(r'"assistant"\s*:', '', json_string) | |
json_string = re.sub(r'\b(false|true)"', r'\1', json_string) | |
logger.debug("Ran pre‑cleanup for stray tokens and boolean quotes.") | |
# --- 3.1) Fix stray inner quotes at start of name/list values --- | |
# e.g., { "name": " \"recent_scoress\"", ... } → "recent_scoress" | |
json_string = re.sub( | |
r'("name"\s*:\s*")\s*"', | |
r'\1', | |
json_string | |
) | |
# --- 4) Escape all embedded quotes in any `logic` value up to the next key --- | |
def _esc(m): | |
prefix, body = m.group(1), m.group(2) | |
return prefix + body.replace('"', r'\"') | |
json_string = re.sub( | |
r'("logic"\s*:\s*")([\s\S]+?)(?=",\s*"[A-Za-z_]\w*"\s*:\s*)', | |
_esc, | |
json_string | |
) | |
logger.debug("Escaped embedded quotes in logic fields.") | |
logger.debug("Quoted unquoted keys.") | |
# --- 6) Remove trailing commas before } or ] --- | |
json_string = re.sub(r',\s*(?=[}\],])', '', json_string) | |
json_string = re.sub(r',\s*,', ',', json_string) | |
logger.debug("Removed trailing commas.") | |
# --- 7) Balance braces: drop extra } at end if needed --- | |
ob, cb = json_string.count('{'), json_string.count('}') | |
if cb > ob: | |
excess = cb - ob | |
json_string = json_string.rstrip()[:-excess] | |
logger.debug(f"Stripped {excess} extra closing brace(s).") | |
# --- 8) Escape literal newlines in *all* string values --- | |
json_string = re.sub( | |
r'"((?:[^"\\]|\\.)*?)"', | |
lambda m: '"' + m.group(1).replace('\n', '\\n').replace('\r', '\\r') + '"', | |
json_string, | |
flags=re.DOTALL | |
) | |
logger.debug("Escaped newlines in strings.") | |
# --- 9) Final parse attempt --- | |
try: | |
return json.loads(json_string) | |
except json.JSONDecodeError: | |
logger.error("Sanitized JSON still invalid:\n%s", json_string) | |
raise | |
# Node 1: Logic updating if any issue here | |
def pseudo_generator_node(state: GameState): | |
logger.info("--- Running plan_logic_aligner_node ---") | |
image = state.get("image", "") | |
project_json = state["project_json"] | |
# MODIFICATION 1: Include 'Stage' in the list of names to plan for. | |
# It's crucial to ensure 'Stage' is always present for its global role. | |
target_names = [t["name"] for t in project_json["targets"]] | |
refinement_prompt = f""" | |
You are an expert in Scratch 3.0 game development, specializing in understanding block relationships (stacked, nested). | |
"Analyze the Scratch code-block image and generate Pseudo-Code for what this logic appears to be doing." | |
From Image, you also have to detect a value of Key given in Text form "Script for: ". Below is the example | |
Example: "Script for: Bear", "Script for:" is a key and "Bear" is value and check if there is related target name available. | |
**Targets in Game (Sprites and Stage) available in project_json:** {', '.join(target_names)} | |
--- Scratch 3.0 Block Reference --- | |
### Hat Blocks | |
Description: {hat_description} | |
Blocks: | |
{hat_opcodes_functionalities} | |
### Boolean Blocks | |
Description: {boolean_description} | |
Blocks: | |
{boolean_opcodes_functionalities} | |
### C Blocks | |
Description: {c_description} | |
Blocks: | |
{c_opcodes_functionalities} | |
### Cap Blocks | |
Description: {cap_description} | |
Blocks: | |
{cap_opcodes_functionalities} | |
### Reporter Blocks | |
Description: {reporter_description} | |
Blocks: | |
{reporter_opcodes_functionalities} | |
### Stack Blocks | |
Description: {stack_description} | |
Blocks: | |
{stack_opcodes_functionalities} | |
----------------------------------- | |
Your task is to: | |
If you don't find any "Code-Blocks" then, | |
**Don't generate Pseudo Code, and pass the message "No Code-blocks" | |
If you find any "Code-Blocks" then, | |
1. **Refine the 'logic'**: Make it precise, accurate, and fully aligned with the Game Description. Use Scratch‑consistent verbs and phrasing. **Do NOT** use raw double‑quotes inside the logic string. | |
2. **Structural requirements**: | |
- **Numeric values** `(e.g., 0, 5, 0.2, -130)` **must** be in parentheses: `(0)`, `(5)`, `(0.2)`, `(-130)`. | |
- **AlphaNumeric values** `(e.g., hello, say 5, 4, hi!)` **must** be in parentheses: `(hello)`, `(say 5)`, `(4)`, `(hi!)`. | |
- **Variables** must be in the form `[variable v]` (e.g., `[score v]`), even when used inside expressions two example use `set [score v] to (1)` or `show variable ([speed v])`. | |
- **Dropdown options** must be in the form `[option v]` (e.g., `[Game Start v]`, `[blue sky v]`). example use `when [space v] key pressed`. | |
- **Reporter blocks** used as inputs must be double‑wrapped: `((x position))`, `((y position))`. example use `if <((y position)) = (-130)> then` or `(((x position)) * (1))`. | |
- **Boolean blocks** in conditions must be inside `< >`, including nested ones: `<not <condition>>`, `<<cond1> and <cond2>>`,`<<cond1> or <cond2>>`. | |
- **Other Boolean blocks** in conditions must be inside `< >`, including nested ones or values or variables: `<(block/value/variable) * (block/value/variable)>`,`<(block/value/variable) < (block/value/variable)>`, and example of another variable`<[apple v] contains [a v]?>`. | |
- **Operator expressions** must use explicit Scratch operator blocks, e.g.: | |
``` | |
(([ballSpeed v]) * (1.1)) | |
``` | |
- **Every hat block script must end** with a final `end` on its own line. | |
3. **Pseudo‑code formatting**: | |
- Represent each block or nested block on its own line. | |
- Indent nested blocks by 4 spaces under their parent (`forever`, `if`, etc.). | |
- No comments or explanatory text—just the block sequence. | |
- a natural language breakdown of each step taken after the event, formatted as a multi-line string representing pseudo-code. Ensure clarity and granularity—each described action should map closely to a Scratch block or tight sequence. | |
4. **Logic content**: | |
- Build clear flow for mechanics (movement, jumping, flying, scoring, collisions). | |
- Match each action closely to a Scratch block or tight sequence. | |
- Do **NOT** include any justification or comments—only the raw logic. | |
5. **Examples for reference**: | |
**Correct** pattern for a simple start script: | |
``` | |
when green flag clicked | |
switch backdrop to [blue sky v] | |
set [score v] to (0) | |
show variable [score v] | |
broadcast [Game Start v] | |
end | |
``` | |
**Correct** pattern for updating the high score variable handling: | |
``` | |
when I receive [Game Over v] | |
if <((score)) > (([High Score v]))> then | |
set [High Score v] to ([score v]) | |
end | |
switch backdrop to [Game Over v] | |
end | |
``` | |
**Correct** pattern for level up and increase difficulty use: | |
``` | |
when I receive [Level Up v] | |
change [level v] by (1) | |
set [ballSpeed v] to ((([ballSpeed v]) * (1.1))) | |
end | |
``` | |
**Correct** pattern for jumping mechanics use: | |
``` | |
when [space v] key pressed | |
if <((y position)) = (-100)> then | |
repeat (5) | |
change y by (100) | |
wait (0.1) seconds | |
change y by (-100) | |
wait (0.1) seconds | |
end | |
end | |
end | |
``` | |
**Correct** pattern for continuos moving objects use: | |
``` | |
when green flag clicked | |
go to x: (240) y: (-100) | |
set [speed v] to (-5) | |
show variable [speed v] | |
forever | |
change x by ([speed v]) | |
if <((x position)) < (-240)> then | |
go to x: (240) y: (-100) | |
end | |
end | |
end | |
``` | |
**Correct** pattern for continuos moving objects use: | |
``` | |
when green flag clicked | |
go to x: (240) y: (-100) | |
set [speed v] to (-5) | |
show variable [speed v] | |
forever | |
change x by ([speed v]) | |
if <((x position)) < (-240)> then | |
go to x: (240) y: (-100) | |
end | |
end | |
end | |
``` | |
6. **Donot** add any explaination of logic or comments to justify or explain just put the logic content in the json. | |
7. **Output**: | |
Return **only** a JSON object, using double quotes everywhere, where the key for the pseudo-code will be the target name closest to the "Script for:" value. If no code-blocks are found, return `{"refined_logic": "No Code-blocks"}`. | |
```json | |
{{ | |
"refined_logic":{{ | |
"[Target name similar to script for]": {{ | |
"plan":[ | |
{{"pseudocode":"…your fully‑formatted pseudo‑code here…"}}, | |
{{"pseudocode":"…your fully‑formatted pseudo‑code here…"}} | |
] | |
}} | |
}} | |
}} | |
``` | |
""" | |
image_input = { | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/png;base64,{image}" | |
} | |
} | |
content = [ | |
{"type": "text", "text": refinement_prompt}, | |
image_input | |
] | |
try: | |
# Invoke the main agent for logic refinement and relationship identification | |
response = agent.invoke({"messages": [{"role": "user", "content": content}]}) | |
llm_output_raw = response["messages"][-1].content.strip() | |
parsed_llm_output = extract_json_from_llm_response(llm_output_raw) | |
result = parsed_llm_output | |
print(f"result:\n\n {result}") | |
except json.JSONDecodeError as error_json: | |
# If JSON parsing fails, use the json resolver agent | |
correction_prompt = ( | |
"Your task is to correct the provided JSON string to ensure it is **syntactically perfect and adheres strictly to JSON rules**.\n" | |
"It must be a JSON object with `refined_logic` (string) and `block_relationships` (array of objects).\n" | |
f"- **Error Details**: {error_json}\n\n" | |
"**Strict Instructions for your response:**\n" | |
"1. **ONLY** output the corrected JSON. Do not include any other text or explanations.\n" | |
"2. Ensure all keys and string values are enclosed in **double quotes**. Escape internal quotes (`\\`).\n" | |
"3. No trailing commas. Correct nesting.\n\n" | |
"Here is the problematic JSON string to correct:\n" | |
f"```json\n{llm_output_raw}\n```\n" | |
"Corrected JSON:\n" | |
) | |
try: | |
correction_response = agent_json_resolver.invoke({"messages": [{"role": "user", "content": correction_prompt}]}) | |
corrected_output = extract_json_from_llm_response(correction_response["messages"][-1].content) | |
#block_relationships = corrected_output.get("block_relationships", []) | |
result = corrected_output | |
except Exception as e_corr: | |
logger.error(f"Failed to correct JSON output for even after retry: {e_corr}") | |
# Update the original action_plan in the state with the refined version | |
state["pseudo_code"] = result | |
print(f"[OVREALL REFINED PSEUDO CODE LOGIC]: {result}") | |
logger.info("Plan refinement and block relation analysis completed for all plans.") | |
return state | |
scratch_keywords = [ | |
"move", "turn", "wait", "repeat", "if", "else", "broadcast", | |
"glide", "change", "forever", "when", "switch", | |
"next costume", "set", "show", "hide", "play sound", | |
"go to", "x position", "y position", "think", "say", | |
"variable", "stop", "clone", | |
"touching", "sensing", "pen", "clear","Scratch","Code","scratch blocks" | |
] | |
# --- FUNCTION: Extract images from saved PDF --- | |
def extract_images_from_pdf(pdf_path, final_json_path_2): | |
''' Extract images from PDF and generate structured sprite JSON ''' | |
try: | |
pdf_filename = os.path.splitext(os.path.basename(pdf_path))[0] # e.g., "scratch_crab" | |
pdf_dir_path = os.path.dirname(pdf_path).replace("/", "\\") | |
# Create subfolders | |
extracted_image_subdir = os.path.join( | |
DETECTED_IMAGE_DIR, pdf_filename) | |
json_subdir = os.path.join(JSON_DIR, pdf_filename) | |
os.makedirs(extracted_image_subdir, exist_ok=True) | |
os.makedirs(json_subdir, exist_ok=True) | |
# Output paths | |
output_json_path = os.path.join(json_subdir, "extracted.json") | |
final_json_path = os.path.join(json_subdir, "extracted_sprites.json") | |
final_json_path_2 = os.path.join(json_subdir, "extracted_sprites_2.json") | |
try: | |
elements = partition_pdf( | |
filename=pdf_path, | |
strategy="hi_res", | |
extract_image_block_types=["Image"], | |
extract_image_block_to_payload=True, # Set to True to get base64 in output | |
) | |
except Exception as e: | |
raise RuntimeError( | |
f"❌ Failed to extract images from PDF: {str(e)}") | |
try: | |
with open(output_json_path, "w") as f: | |
json.dump([element.to_dict() | |
for element in elements], f, indent=4) | |
except Exception as e: | |
raise RuntimeError(f"❌ Failed to write extracted.json: {str(e)}") | |
try: | |
# Display extracted images | |
with open(output_json_path, 'r') as file: | |
file_elements = json.load(file) | |
except Exception as e: | |
raise RuntimeError(f"❌ Failed to read extracted.json: {str(e)}") | |
# Prepare manipulated sprite JSON structure | |
manipulated_json = {} | |
# SET A SYSTEM PROMPT | |
system_prompt = """ | |
You are an expert in visual scene understanding. | |
Your Job is to analyze an image and respond acoording if asked for name give simple name by analyzing it and if ask for descrption generate a short description covering its elements. | |
Guidelines: | |
- Focus only the images given in Square Shape. | |
- Don't Consider Blank areas in Image as. | |
- Don't include generic summary or explanation outside the fields. | |
Return only string. | |
""" | |
agent = create_react_agent( | |
model=llm, | |
tools=[], | |
prompt=system_prompt | |
) | |
# If JSON already exists, load it and find the next available Sprite number | |
if os.path.exists(final_json_path): | |
with open(final_json_path, "r") as existing_file: | |
manipulated = json.load(existing_file) | |
# Determine the next available index (e.g., Sprite 4 if 1–3 already exist) | |
existing_keys = [int(k.replace("Sprite ", "")) | |
for k in manipulated.keys()] | |
start_count = max(existing_keys, default=0) + 1 | |
else: | |
start_count = 1 | |
sprite_count = start_count | |
for i, element in enumerate(file_elements): | |
if "image_base64" in element["metadata"]: | |
try: | |
image_data = base64.b64decode( | |
element["metadata"]["image_base64"]) | |
image = Image.open(io.BytesIO(image_data)).convert("RGB") | |
image = upscale_image(image, scale=2) | |
# image.show(title=f"Extracted Image {i+1}") | |
image_path = os.path.join(extracted_image_subdir, f"Sprite_{i+1}.png") | |
image.save(image_path) # don't need to store image in local folder, process it from variable | |
with open(image_path, "rb") as image_file: | |
image_bytes = image_file.read() | |
img_base64 = base64.b64encode(image_bytes).decode("utf-8") | |
prompt_combined = """ | |
Analyze this image and return JSON with keys:# modify prompt for "name", if it detects "code-blocks only then give name as 'scratch-block'" | |
{ | |
"name": "<short name or 'scratch blocks'>" , | |
"description": "<short description>" | |
} | |
Guidelines: | |
- If image contains logical/code blocks from Scratch (e.g., move, turn, repeat, when clicked, etc.), use 'scratch-block' as the name. | |
- If image is a character, object, or backdrop, give an appropriate descriptive name instead. | |
- Avoid generic names like 'image1' or 'picture'. | |
- Keep the response strictly in JSON format. | |
""" | |
content = [ | |
{"type": "text", "text": prompt_combined}, | |
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}} | |
] | |
response = agent.invoke({"messages": [{"role": "user", "content": content}]}) | |
result_json = json.loads(response["messages"][-1].content) | |
try: | |
name = result_json.get("name", "").strip() | |
description = result_json.get("description", "").strip() | |
except Exception as e: | |
logger.error(f"⚠️ Failed to extract name/description: {str(e)}") | |
name = "unknown" | |
description = "unknown" | |
manipulated_json[f"Sprite {sprite_count}"] = { | |
"name": name, | |
"base64": element["metadata"]["image_base64"], | |
"file-path": pdf_dir_path, | |
"description": description | |
} | |
sprite_count += 1 | |
except Exception as e: | |
print(f"⚠️ Error processing Sprite {i+1}: {str(e)}") | |
# Save manipulated JSON | |
with open(final_json_path, "w") as sprite_file: | |
json.dump(manipulated_json, sprite_file, indent=4) | |
def is_code_block(name: str) -> bool: | |
for kw in scratch_keywords: | |
if kw.lower() in name.lower(): | |
return True | |
return False | |
# Filter out code block images | |
filtered_sprites = {} | |
for key, value in manipulated_json.items(): | |
sprite_name = value.get("name", "") | |
if not is_code_block(sprite_name): | |
filtered_sprites[key] = value | |
else: | |
logger.info(f"🛑 Excluded code block-like image: {key}") | |
# if not any(is_code_block(value.get("name","")) for value in manipulated_json.values()): | |
# return jsonify({"message":"Invalid Content"}), 400 | |
# if not filtered_sprites: | |
# return "Invalid Content", {} | |
# Overwrite with filtered content | |
with open(final_json_path_2, "w") as sprite_file: | |
json.dump(filtered_sprites, sprite_file, indent=4) | |
# print(f"✅ Manipulated sprite JSON saved: {final_json_path}") | |
return final_json_path, manipulated_json | |
except Exception as e: | |
raise RuntimeError(f"❌ Error in extract_images_from_pdf: {str(e)}") | |
def similarity_matching(input_json_path: str, project_folder:str) -> str: | |
logger.info("🔍 Running similarity matching...") | |
# ============================== # | |
# DEFINE PATHS # | |
# ============================== # | |
image_dirs = [backdrop_images_path, sprite_images_path] | |
project_json_path = os.path.join(project_folder, "project.json") | |
# ============================== # | |
# READ SPRITE METADATA # | |
# ============================== # | |
with open(input_json_path, 'r') as f: | |
sprites_data = json.load(f) | |
sprite_ids, texts, sprite_base64 = [], [], [] | |
for sid, sprite in sprites_data.items(): | |
sprite_ids.append(sid) | |
texts.append( | |
"This is " + sprite.get("description", sprite.get("name", ""))) | |
sprite_base64.append(sprite["base64"]) | |
# ========================================= # | |
# Walk folders to collect all image paths # | |
# ========================================= # | |
folder_image_paths = [ | |
#backdrops | |
BACKDROP_DIR / "badroom3.sb3" / "8cc0b88d53345b3e337e8f028a32a4e7.png", | |
BACKDROP_DIR / "baseball2.sb3" / "7be1f5b3e682813dac1f297e52ff7dca.png", | |
BACKDROP_DIR / "beach_malibu.sb3" / "050615fe992a00d6af0e664e497ebf53.png", | |
BACKDROP_DIR / "castle2.sb3" / "951765ee7f7370f120c9df20b577c22f.png", | |
BACKDROP_DIR / "hall.sb3" / "ea86ca30b346f27ca5faf1254f6a31e3.png", | |
BACKDROP_DIR / "jungle.sb3" / "f4f908da19e2753f3ed679d7b37650ca.png", | |
#sprite | |
SPRITE_DIR / "Batter.sprite3" / "baseball_sprite_motion_1.png", | |
SPRITE_DIR / "Bear.sprite3" / "bear_motion_2.png", | |
SPRITE_DIR / "Beetle.sprite3" / "46d0dfd4ae7e9bfe3a6a2e35a4905eae.png", | |
SPRITE_DIR / "cat" / "cat_motion_1.png", | |
SPRITE_DIR / "Centaur.sprite3" / "2373556e776cad3ba4d6ee04fc34550b.png", | |
SPRITE_DIR / "Crab.sprite3" / "bear_element.png", | |
SPRITE_DIR / "Soccer Ball.sprite3" / "cat_football.png", | |
] | |
# ============================== # | |
# DECODE SPRITE IMAGES # | |
# ============================== # | |
# temp_dir = tempfile.mkdtemp() | |
# sprite_image_paths = [] | |
# for idx, b64 in enumerate(sprite_base64): | |
# image_data = base64.b64decode(b64.split(",")[-1]) | |
# img = Image.open(BytesIO(image_data)).convert("RGB") | |
# temp_path = os.path.join(temp_dir, f"sprite_{idx}.png") | |
# img.save(temp_path) | |
# sprite_image_paths.append(temp_path) | |
# print(f"\n\n\nSPRITE IMAGE PATHS: \n{sprite_image_paths}") | |
# ============================== # | |
# EMBED SPRITE IMAGES # | |
# ============================== # | |
sprite_features = clip_embd.embed_image(sprite_base64) | |
# ============================== # | |
# COMPUTE SIMILARITIES # | |
# ============================== # | |
with open(f"{BLOCKS_DIR}/embeddings.json", "r") as f: | |
embedding_json = json.load(f) | |
# print(f"\n\n EMBEDDING JSON: {embedding_json}") | |
img_matrix = np.array([img["embeddings"] for img in embedding_json]) | |
sprite_matrix = np.array(sprite_features) | |
similarity = np.matmul(sprite_matrix, img_matrix.T) | |
most_similar_indices = np.argmax(similarity, axis=1) | |
print(f"") | |
# ============= Match and copy =============== | |
project_data = [] | |
copied_folders = set() | |
# =============================================================== # | |
# Loop through most similar images from Sprites folder # | |
# → Copy sprite assets (excluding matched image + sprite.json) # | |
# → Load sprite.json and append its data to project_data # | |
# =============================================================== # | |
for sprite_idx, matched_idx in enumerate(most_similar_indices): | |
matched_image_path = folder_image_paths[matched_idx] | |
matched_image_path = os.path.normpath(matched_image_path) | |
matched_folder = os.path.dirname(matched_image_path) | |
#folder_name = os.path.basename(matched_folder) | |
if matched_folder in copied_folders: | |
continue | |
copied_folders.add(matched_folder) | |
logger.info(f"Matched image path: {matched_image_path}") | |
sprite_json_path = os.path.join(matched_folder, 'sprite.json') | |
if not os.path.exists(sprite_json_path): | |
logger.warning(f"sprite.json not found in: {matched_folder}") | |
continue | |
with open(sprite_json_path, 'r') as f: | |
sprite_data = json.load(f) | |
# print(f"SPRITE DATA: \n{sprite_data}") | |
# Copy only non-matched files | |
for fname in os.listdir(matched_folder): | |
fpath = os.path.join(matched_folder, fname) | |
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'sprite.json'}: | |
shutil.copy2(fpath, os.path.join(project_folder, fname)) | |
# logger.info(f"Copied Sprite asset: {fname}") | |
project_data.append(sprite_data) | |
# ================================================================== # | |
# Loop through most similar images from Backdrops folder # | |
# → Copy Backdrop assets (excluding matched image + project.json) # | |
# → Load project.json and append its data to project_data # | |
# ================================================================== # | |
backdrop_data = [] # for backdrop-related entries | |
for backdrop_idx, matched_idx in enumerate(most_similar_indices): | |
matched_image_path = os.path.normpath(folder_image_paths[matched_idx]) | |
# Check if the match is from the Backdrops folder | |
if matched_image_path.startswith(os.path.normpath(backdrop_images_path)): | |
matched_folder = os.path.dirname(matched_image_path) | |
folder_name = os.path.basename(matched_folder) | |
logger.info(f"Backdrop matched image: {matched_image_path}") | |
# Copy only non-matched files | |
for fname in os.listdir(matched_folder): | |
fpath = os.path.join(matched_folder, fname) | |
if os.path.isfile(fpath) and fname not in {os.path.basename(matched_image_path), 'project.json'}: | |
shutil.copy2(fpath, os.path.join(project_folder, fname)) | |
# logger.info(f"Copied Backdrop asset: {fname}") | |
# Append backdrop's project.json | |
backdrop_json_path = os.path.join(matched_folder, 'project.json') | |
if os.path.exists(backdrop_json_path): | |
with open(backdrop_json_path, 'r') as f: | |
backdrop_json_data = json.load(f) | |
# print(f"SPRITE DATA: \n{backdrop_json_data}") | |
if "targets" in backdrop_json_data: | |
for target in backdrop_json_data["targets"]: | |
if target.get("isStage") == True: | |
backdrop_data.append(target) | |
else: | |
logger.warning(f"project.json not found in: {matched_folder}") | |
# Merge JSON structure | |
final_project = { | |
"targets": [], | |
"monitors": [], | |
"extensions": [], | |
"meta": { | |
"semver": "3.0.0", | |
"vm": "11.3.0", | |
"agent": "OpenAI ScratchVision Agent" | |
} | |
} | |
for sprite in project_data: | |
if not sprite.get("isStage", False): | |
final_project["targets"].append(sprite) | |
if backdrop_data: | |
all_costumes, sounds = [], [] | |
for idx, bd in enumerate(backdrop_data): | |
all_costumes.extend(bd.get("costumes", [])) | |
if idx == 0 and "sounds" in bd: | |
sounds = bd["sounds"] | |
final_project["targets"].append({ | |
"isStage": True, | |
"name": "Stage", | |
"variables": {}, | |
"lists": {}, | |
"broadcasts": {}, | |
"blocks": {}, | |
"comments": {}, | |
"currentCostume": 1 if len(all_costumes) > 1 else 0, | |
"costumes": all_costumes, | |
"sounds": sounds, | |
"volume": 100, | |
"layerOrder": 0, | |
"tempo": 60, | |
"videoTransparency": 50, | |
"videoState": "on", | |
"textToSpeechLanguage": None | |
}) | |
with open(project_json_path, 'w') as f: | |
json.dump(final_project, f, indent=2) | |
# logger.info(f"🎉 Final project saved: {project_json_path}") | |
return project_json_path | |
def delay_for_tpm_node(state: GameState): | |
logger.info("--- Running DelayForTPMNode ---") | |
time.sleep(10) # Adjust the delay as needed | |
logger.info("Delay completed.") | |
return state | |
# Build the LangGraph workflow | |
workflow = StateGraph(GameState) | |
# Add all nodes to the workflow | |
workflow.add_node("time_delay_1", delay_for_tpm_node) | |
workflow.add_node("opcode_counter", pseudo_generator_node) | |
workflow.set_entry_point("time_delay_1") | |
workflow.add_edge("time_delay_1","opcode_counter") | |
workflow.add_edge("opcode_counter", END) | |
app_graph = workflow.compile() | |
# ============== Helper function to Upscale an Image ============== # | |
def upscale_image(image: Image.Image, scale: int = 2) -> Image.Image: | |
""" | |
Upscales a PIL image by a given scale factor. | |
""" | |
try: | |
width, height = image.size | |
new_size = (width * scale, height * scale) | |
upscaled_image = image.resize(new_size, Image.LANCZOS) | |
logger.info(f"✅ Upscaled image to {new_size}") | |
return upscaled_image | |
except Exception as e: | |
logger.error(f"❌ Error during image upscaling: {str(e)}") | |
return image | |
def create_sb3_archive(project_folder, project_id): | |
""" | |
Zips the project folder and renames it to an .sb3 file. | |
Args: | |
project_folder (str): The path to the directory containing the project.json and assets. | |
project_id (str): The unique ID for the project, used for naming the .sb3 file. | |
Returns: | |
str: The path to the created .sb3 file, or None if an error occurred. | |
""" | |
output_filename = os.path.join("generated_projects", project_id) | |
zip_path = None | |
sb3_path = None | |
try: | |
zip_path = shutil.make_archive(output_filename, 'zip', root_dir=project_folder) | |
logger.info(f"Project folder zipped to: {zip_path}") | |
# 2. Rename the .zip file to .sb3 | |
sb3_path = f"{output_filename}.sb3" | |
os.rename(zip_path, sb3_path) | |
logger.info(f"Renamed {zip_path} to {sb3_path}") | |
return sb3_path | |
except Exception as e: | |
logger.error(f"Error creating SB3 archive for {project_id}: {e}") | |
# Clean up any partial files if an error occurs | |
if zip_path and os.path.exists(zip_path): | |
os.remove(zip_path) | |
if sb3_path and os.path.exists(sb3_path): | |
os.remove(sb3_path) | |
return None | |
def index(): | |
return render_template('app_index.html') | |
def download_sb3(project_id): | |
""" | |
Allows users to download the generated .sb3 Scratch project file. | |
""" | |
sb3_filename = f"{project_id}.sb3" | |
sb3_filepath = os.path.join("generated_projects", sb3_filename) | |
try: | |
if os.path.exists(sb3_filepath): | |
logger.info(f"Serving SB3 file for project ID: {project_id}") | |
# send_from_directory serves the file and handles content-disposition for download | |
return send_from_directory( | |
directory="generated_projects", | |
path=sb3_filename, | |
as_attachment=True, # This makes the browser download the file | |
download_name=sb3_filename # This sets the filename for the download | |
) | |
else: | |
logger.warning(f"SB3 file not found for ID: {project_id}") | |
return jsonify({"error": "Scratch project file not found"}), 404 | |
except Exception as e: | |
logger.error(f"Error serving SB3 file for ID {project_id}: {e}") | |
return jsonify({"error": "Failed to retrieve Scratch project file"}), 500 | |
# API endpoint | |
def process_pdf(): | |
try: | |
logger.info("Received request to process PDF.") | |
if 'pdf_file' not in request.files: | |
logger.warning("No PDF file found in request.") | |
return jsonify({"error": "Missing PDF file in form-data with key 'pdf_file'"}), 400 | |
pdf_file = request.files['pdf_file'] | |
if pdf_file.filename == '': | |
return jsonify({"error": "Empty filename"}), 400 | |
# ================================================= # | |
# Generate Random UUID for project folder name # | |
# ================================================= # | |
project_id = str(uuid.uuid4()).replace('-', '') | |
project_folder = os.path.join("outputs", f"{project_id}") | |
# =========================================================================== # | |
# Create empty json in project_{random_id} folder # | |
# =========================================================================== # | |
os.makedirs(project_folder, exist_ok=True) | |
# Save the uploaded PDF temporarily | |
filename = secure_filename(pdf_file.filename) | |
temp_dir = tempfile.mkdtemp() | |
saved_pdf_path = os.path.join(temp_dir, filename) | |
pdf_file.save(saved_pdf_path) | |
# logger.info(f"Created project folder: {project_folder}") | |
logger.info(f"Saved uploaded PDF to: {saved_pdf_path}") | |
# Extract & process | |
json_path = None | |
output_path, result = extract_images_from_pdf(saved_pdf_path, json_path) | |
# Check extracted_sprites.json for "scratch block" in any 'name' | |
extracted_dir = os.path.join(JSON_DIR, os.path.splitext(filename)[0]) | |
extracted_sprites_json = os.path.join(extracted_dir, "extracted_sprites.json") | |
if not os.path.exists(extracted_sprites_json): | |
return jsonify({"error": "No extracted_sprites.json found"}), 500 | |
with open(extracted_sprites_json, 'r') as f: | |
sprite_data = json.load(f) | |
project_output = similarity_matching(output_path, project_folder) | |
logger.info("Received request to process PDF.") | |
with open(project_output, 'r') as f: | |
project_skeleton = json.load(f) | |
images = convert_from_path(saved_pdf_path, dpi=300, poppler_path=poppler_path) | |
img_base64 = base64.b64encode(images).decode("utf-8") | |
#image_paths = await convert_pdf_to_images_async(saved_pdf_path) | |
#updating logic here [Dev Patel] | |
initial_state_dict = { | |
"project_json": project_skeleton, | |
"description": "The pseudo code for the script", | |
"project_id": project_id, | |
"project_image": img_base64, | |
"action_plan": {}, | |
"pseudo_code": {}, | |
"temporary_node": {}, | |
} | |
final_state_dict = app_graph.invoke(initial_state_dict) # Pass dictionary | |
final_project_json = final_state_dict['project_json'] # Access as dict | |
# Save the *final* filled project JSON, overwriting the skeleton | |
with open(project_output, "w") as f: | |
json.dump(final_project_json, f, indent=2) | |
logger.info(f"Final project JSON saved to {project_output}") | |
# --- Call the new function to create the .sb3 file --- | |
sb3_file_path = create_sb3_archive(project_folder, project_id) | |
if sb3_file_path: | |
logger.info(f"Successfully created SB3 file: {sb3_file_path}") | |
# Instead of returning the local path, return a URL to the download endpoint | |
download_url = f"/download_sb3/{project_id}" | |
return jsonify({"message": "Procesed PDF and Game sb3 generated successfully", "project_id": project_id, "download_url": download_url}) | |
else: | |
return jsonify(error="Failed to create SB3 archive"), 500 | |
except Exception as e: | |
logger.error(f"Error during processing the pdf workflow for project ID {project_id}: {e}", exc_info=True) | |
return jsonify({"error": f"❌ Failed to process PDF: {str(e)}"}), 500 | |
if __name__ == '__main__': | |
os.makedirs("outputs", exist_ok=True) | |
app.run(host='0.0.0.0', port=7860, debug=True) |