import os #os.environ["CUDA_VISIBLE_DEVICES"] = "6" from typing import Dict, List, Any import torch from torch import autocast from diffusers import FluxPipeline, AutoPipelineForText2Image, FluxTransformer2DModel, BitsAndBytesConfig import base64 from io import BytesIO from huggingface_hub import login token = os.getenv("HF_TOKEN") if token is None: raise ValueError("The environment variable HF_TOKEN is not set") login(token=token) # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # First load and merge LoRA weights ckpt_id = "black-forest-labs/FLUX.1-dev" fused_transformer_path = os.path.join(path, "fused_transformer") if not os.path.exists(fused_transformer_path): print("Fusing LoRA weights into transformer...") pipeline = FluxPipeline.from_pretrained( ckpt_id, text_encoder=None, text_encoder_2=None, torch_dtype=torch.float16 ) pipeline.load_lora_weights(path, weight_name="pytorch_lora_weights.safetensors") pipeline.fuse_lora() pipeline.unload_lora_weights() pipeline.transformer.save_pretrained(fused_transformer_path) print("Finished fusing LoRA weights") # Load quantized model bnb_4bit_compute_dtype = torch.float16 nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=bnb_4bit_compute_dtype, ) transformer = FluxTransformer2DModel.from_pretrained( os.path.join(path, "fused_transformer"), quantization_config=nf4_config, torch_dtype=bnb_4bit_compute_dtype, ) self.pipe = AutoPipelineForText2Image.from_pretrained( ckpt_id, transformer=transformer, torch_dtype=bnb_4bit_compute_dtype ) self.pipe.enable_model_cpu_offload() def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) # run inference pipeline with autocast(device.type): image = self.pipe( prompt=inputs, num_inference_steps=28, generator=torch.Generator(device='cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'), width=1024, height=1024, guidance_scale=3.5, ).images[0] #os.makedirs('generated_images', exist_ok=True) #image_path = f'generated_images/generated_{hash(inputs)}.jpg' #image.save(image_path) return image # encode image as base 64 #buffered = BytesIO() #image.save(buffered, format="JPEG") # img_str = base64.b64encode(buffered.getvalue()) # postprocess the prediction #return {"image": img_str.decode()}