File size: 1,379 Bytes
d22fba2
 
e369958
d22fba2
 
002d875
d22fba2
 
 
e369958
 
d22fba2
e369958
d22fba2
128e289
9cd9948
 
398d54b
128e289
e369958
d22fba2
128e289
d22fba2
 
7e29046
0c0b7e8
 
 
d22fba2
0c0b7e8
e369958
bf7ff83
0c0b7e8
e369958
398d54b
e369958
0c0b7e8
 
bf7ff83
0c0b7e8
 
 
 
bf7ff83
0c0b7e8
bf7ff83
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from typing import Dict
import torch
from diffusers import FluxPipeline
from io import BytesIO
import base64

class EndpointHandler:
    def __init__(self, path: str = ""):
        print(f"Initializing model from: {path}")
        
        self.pipe = FluxPipeline.from_pretrained(
            "black-forest-labs/FLUX.1-dev",
            torch_dtype=torch.float16
        )

        print("Loading LoRA weights from: Texttra/Cityscape_Studio")
        self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors")
        self.pipe.fuse_lora(lora_scale=0.9)

        self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
        print("Model initialized successfully.")

    def __call__(self, data: Dict) -> Dict:
        print("Received data:", data)

        inputs = data.get("inputs", {})
        prompt = inputs.get("prompt", "")
        print("Extracted prompt:", prompt)

        if not prompt:
            return {"error": "No prompt provided."}

        image = self.pipe(
            prompt,
            num_inference_steps=50,
            guidance_scale=4.5
        ).images[0]
        print("Image generated.")

        buffer = BytesIO()
        image.save(buffer, format="PNG")
        base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
        print("Returning image.")

        return {"image": base64_image}