File size: 1,982 Bytes
00bb2a2
 
 
 
 
 
 
 
aa7960a
00bb2a2
aa7960a
00bb2a2
aa7960a
00bb2a2
 
 
 
aa7960a
 
 
 
bbc474e
 
 
 
 
02a888b
aa7960a
 
 
 
00bb2a2
aa7960a
00bb2a2
aa7960a
00bb2a2
aa7960a
00bb2a2
 
 
 
 
 
 
 
 
 
 
aa7960a
00bb2a2
 
4366d86
00bb2a2
 
 
 
 
aa7960a
00bb2a2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from typing import Dict
import torch
from diffusers import StableDiffusionXLPipeline
from io import BytesIO
import base64

class EndpointHandler:
    def __init__(self, path: str = ""):
        print(f"πŸš€ Initializing Bh0r with Juggernaut-XL v9 as base model...")

        # Load Juggernaut-XL v9 instead of SDXL base
        self.pipe = StableDiffusionXLPipeline.from_pretrained(
            "RunDiffusion/Juggernaut-XL-v9",
            torch_dtype=torch.float16,
            variant="fp16"
        )

        print("βœ… Juggernaut-XL v9 base model loaded successfully.")

        # Load Bh0r LoRA
        print("🧩 Loading Bh0r LoRA weights...")
        self.pipe.load_lora_weights(
            "Texttra/Bh0r",
            weight_name="Bh0r-10.safetensors",
            adapter_name="bh0r_lora"
        )
        self.pipe.set_adapters(["bh0r_lora"], adapter_weights=[1.0])

        print("βœ… Bh0r LoRA loaded with 0.9 weight.")

        # Fuse LoRA into base model
        self.pipe.fuse_lora()
        print("πŸ”— Fused LoRA into base model.")

        # Move to GPU if available
        self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
        print("🎯 Model ready on device:", "cuda" if torch.cuda.is_available() else "cpu")

    def __call__(self, data: Dict) -> Dict:
        print("Received data:", data)

        inputs = data.get("inputs", {})
        prompt = inputs.get("prompt", "")
        print("Extracted prompt:", prompt)

        if not prompt:
            return {"error": "No prompt provided."}

        # Generate the image
        image = self.pipe(
            prompt,
            num_inference_steps=40,
            guidance_scale=7.0,
        ).images[0]

        print("Image generated.")

        # Convert to base64
        buffer = BytesIO()
        image.save(buffer, format="PNG")
        base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
        print("Returning image.")

        return {"image": base64_image}