Spaces:
Runtime error
Runtime error
File size: 1,363 Bytes
57cf3ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import io, base64, os, random
from fastapi import FastAPI
from pydantic import BaseModel
from PIL import Image
print(">>> importing optimum.intel.openvino ...")
from optimum.intel.openvino import OVStableDiffusionPipeline
print(">>> import OK")
MODEL_ID = os.environ.get("MODEL_ID", "OpenVINO/stable-diffusion-v1-5-int8-ov")
print("Loading model ...")
pipe = OVStableDiffusionPipeline.from_pretrained(MODEL_ID)
pipe.reshape(512, 512) # برای CPU بهتر
pipe.compile()
print("Model loaded.")
app = FastAPI(title="Txt2Img CPU API")
class Req(BaseModel):
prompt: str
negative_prompt: str | None = None
steps: int = 20
guidance: float = 7.5
seed: int | None = None
width: int = 512
height: int = 512
@app.get("/healthz")
def health():
return {"ok": True}
@app.post("/txt2img")
def txt2img(r: Req):
# در OpenVINO نیازی به torch.Generator نیست؛ seed را روی random ست میکنیم (اختیاری)
if r.seed is not None:
random.seed(r.seed)
image = pipe(
prompt=r.prompt,
negative_prompt=r.negative_prompt,
num_inference_steps=r.steps,
guidance_scale=r.guidance,
width=r.width, height=r.height,
).images[0]
buf = io.BytesIO()
image.save(buf, format="PNG")
return {"image_base64": base64.b64encode(buf.getvalue()).decode()}
|