Bh0r / handler.py
Texttra's picture
Update handler.py
4366d86 verified
from typing import Dict
import torch
from diffusers import StableDiffusionXLPipeline
from io import BytesIO
import base64
class EndpointHandler:
def __init__(self, path: str = ""):
print(f"πŸš€ Initializing Bh0r with Juggernaut-XL v9 as base model...")
# Load Juggernaut-XL v9 instead of SDXL base
self.pipe = StableDiffusionXLPipeline.from_pretrained(
"RunDiffusion/Juggernaut-XL-v9",
torch_dtype=torch.float16,
variant="fp16"
)
print("βœ… Juggernaut-XL v9 base model loaded successfully.")
# Load Bh0r LoRA
print("🧩 Loading Bh0r LoRA weights...")
self.pipe.load_lora_weights(
"Texttra/Bh0r",
weight_name="Bh0r-10.safetensors",
adapter_name="bh0r_lora"
)
self.pipe.set_adapters(["bh0r_lora"], adapter_weights=[1.0])
print("βœ… Bh0r LoRA loaded with 0.9 weight.")
# Fuse LoRA into base model
self.pipe.fuse_lora()
print("πŸ”— Fused LoRA into base model.")
# Move to GPU if available
self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
print("🎯 Model ready on device:", "cuda" if torch.cuda.is_available() else "cpu")
def __call__(self, data: Dict) -> Dict:
print("Received data:", data)
inputs = data.get("inputs", {})
prompt = inputs.get("prompt", "")
print("Extracted prompt:", prompt)
if not prompt:
return {"error": "No prompt provided."}
# Generate the image
image = self.pipe(
prompt,
num_inference_steps=40,
guidance_scale=7.0,
).images[0]
print("Image generated.")
# Convert to base64
buffer = BytesIO()
image.save(buffer, format="PNG")
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
print("Returning image.")
return {"image": base64_image}