File size: 11,314 Bytes
fa5a76b
 
 
 
 
 
 
 
 
8bcba46
fa5a76b
 
 
f1b167c
5fde8e8
 
86e42cb
fa5a76b
a124ea8
86e42cb
 
fa5a76b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565ff6c
 
cee381b
714ba62
1fca2c4
d1c7fd2
78092b2
 
d1c7fd2
fa5a76b
9b08a1e
0f2fb25
df56a3f
f94e336
df56a3f
 
f94e336
dfe7bb1
9b08a1e
364cc78
db946fa
 
a124ea8
8bcba46
 
678d7db
fa5a76b
450efa9
 
 
 
 
 
 
 
 
 
 
 
fa5a76b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450efa9
fa5a76b
 
 
 
 
 
450efa9
fa5a76b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714ba62
fa5a76b
 
 
 
 
 
 
 
678d7db
fa5a76b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
import os
import gradio as gr
import json
import logging
import torch
from PIL import Image
from os import path
from torchvision import transforms
from dataclasses import dataclass
from io import BytesIO
import math
from typing import Callable
import spaces
import diffusers
import transformers
from transformers import Qwen3ForCausalLM
from diffusers import ZImagePipeline, DiffusionPipeline, AutoencoderTiny, AutoencoderKL
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.models import AutoencoderKL as DiffusersAutoencoderKL
#from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
#from diffusers.models.transformers import FluxTransformer2DModel
import copy
import random
import time
import safetensors.torch
from tqdm import tqdm
from safetensors.torch import load_file
from huggingface_hub import HfFileSystem, ModelCard
from huggingface_hub import login, hf_hub_download
hf_token = os.environ.get("HF_TOKEN")
login(token=hf_token)
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path

#torch.set_float32_matmul_precision("medium")

# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
    loras = json.load(f)

# Initialize the base model
#dtype = torch.bfloat16
#base_model = "AlekseyCalvin/Artsy_Lite_Flux_v1_by_jurdn_Diffusers"
#pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")

#pipe = diffusers.ZImagePipeline.from_pretrained("Disty0/Z-Image-Turbo-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
#torch.cuda.empty_cache()
#pipe = diffusers.ZImagePipeline.from_pretrained("dimitribarbot/Z-Image-Turbo-BF16", torch_dtype=torch.bfloat16)
#pipe = diffusers.ZImagePipeline.from_pretrained("AlekseyCalvin/Z_Image_Deturbo_Diffusers", torch_dtype=torch.bfloat16)
pipe = ZImagePipeline.from_pretrained('AlekseyCalvin/Z-Image-Deturbo-Returbo-Base_Diffusers', torch_dtype=torch.bfloat16)

#pipe.text_encoder = Qwen3ForCausalLM.from_pretrained('Qwen/Qwen3-4B-Instruct-2507').to(torch.bfloat16)
#pipe.vae = AutoencoderKL.from_pretrained("AlekseyCalvin/Custom_VAE-Z-image-FLUX.1-by-G-REPA", torch_dtype=torch.bfloat16, device_map="cuda")


#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
#pipe.vae = AutoencoderKL.from_pretrained("AlekseyCalvin/Custom_VAE-Z-image-FLUX.1-by-G-REPA", torch_dtype=torch.bfloat16, device_map="cuda")

#custom_vae = AutoencoderKL.from_pretrained("AlekseyCalvin/AnimeVAE_by_Anzhc_for_Flux_ZiT", torch_dtype=torch.float32, ignore_mismatched_sizes=True, low_cpu_mem_usage=False, device_map=None)

# Manually move the VAE to the correct device (e.g., "cuda")
#pipe.vae = custom_vae.to("cuda")
device = "cuda" if torch.cuda.is_available() else "cpu"


#pipe.vae = AutoencoderKL.from_pretrained("REPA-E/e2e-flux-vae", torch_dtype=torch.bfloat16).to("cuda")
##The repa-e vae generates extremely noisy outputs for some reason.

#pipe.vae = DiffusersAutoencoderKL.from_pretrained("kaiyuyue/FLUX.2-dev-vae", torch_dtype=torch.float16, scaling_factor = 0.3611, shift_factor = 0.1159).to("cuda")
## Alas, the model would need to be retrained to work with the Flux2 vae, with its doubled channel count of 32.
#pipe.enable_model_cpu_offload()

try: # A temp hack for some version diffusers lora loading problem
    from diffusers.utils.peft_utils import _derive_exclude_modules

    def new_derive_exclude_modules(*args, **kwargs):
        exclude_modules = _derive_exclude_modules(*args, **kwargs)
        if exclude_modules is not None:
            exclude_modules = [n for n in exclude_modules if "proj_out" not in n]
        return exclude_modules
    peft_utils._derive_exclude_modules = new_derive_exclude_modules
except:
    pass


#model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
#config = CLIPConfig.from_pretrained(model_id)
#config.text_config.max_position_embeddings = 248
#clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
#clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
#pipe.tokenizer = clip_processor.tokenizer
#pipe.text_encoder = clip_model.text_model
#pipe.tokenizer_max_length = 248
#pipe.text_encoder.dtype = torch.bfloat16
#pipe.text_encoder_2 = t5.text_model

MAX_SEED = 2**32-1

class calculateDuration:
    def __init__(self, activity_name=""):
        self.activity_name = activity_name

    def __enter__(self):
        self.start_time = time.time()
        return self
    
    def __exit__(self, exc_type, exc_value, traceback):
        self.end_time = time.time()
        self.elapsed_time = self.end_time - self.start_time
        if self.activity_name:
            print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
        else:
            print(f"Elapsed time: {self.elapsed_time:.6f} seconds")


def update_selection(evt: gr.SelectData, width, height):
    selected_lora = loras[evt.index]
    new_placeholder = f"Prompt with activator word(s): '{selected_lora['trigger_word']}'! "
    lora_repo = selected_lora["repo"]
    lora_trigger = selected_lora['trigger_word']
    updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}). Prompt using: '{lora_trigger}'!"
    if "aspect" in selected_lora:
        if selected_lora["aspect"] == "portrait":
            width = 768
            height = 1024
        elif selected_lora["aspect"] == "landscape":
            width = 1024
            height = 768
    return (
        gr.update(placeholder=new_placeholder),
        updated_text,
        evt.index,
        width,
        height,
    )

@spaces.GPU(duration=50)
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress): 
    pipe.to("cuda")
    generator = torch.Generator(device="cuda").manual_seed(seed)
    
    with calculateDuration("Generating image"):
        # Generate image
        image = pipe(
            prompt=f"{prompt} {trigger_word}",
            num_inference_steps=steps,
            guidance_scale=cfg_scale,
            width=width,
            height=height,
            generator=generator,
            joint_attention_kwargs={"scale": lora_scale},
        ).images[0]
    return image

def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
    if selected_index is None:
        raise gr.Error("You must select a LoRA before proceeding.")

    selected_lora = loras[selected_index]
    lora_path = selected_lora["repo"]
    trigger_word = selected_lora['trigger_word']

    # Load LoRA weights
    with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
        if "weights" in selected_lora:
            pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
        else:
            pipe.load_lora_weights(lora_path)
        
    # Set random seed for reproducibility
    with calculateDuration("Randomizing seed"):
        if randomize_seed:
            seed = random.randint(0, MAX_SEED)
    
    image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
    pipe.to("cpu")
    pipe.unload_lora_weights()
    return image, seed  

run_lora.zerogpu = True

css = '''
#gen_btn{height: 100%}
#title{text-align: center}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
#gallery .grid-wrap{height: 10vh}
'''
with gr.Blocks(css=css) as app:
    title = gr.HTML(
        """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory </h1>""",
        elem_id="title",
    )
    	    # Info blob stating what the app is running
    info_blob = gr.HTML(
        """<div id="info_blob"> Hosted Gallery of Custom-Trained Text2Image Generative Low-Rank Adaptors (LoRAs) for Z-image models. Running Over: Z.I.T. Originally set-up for adapters fine-tuned for the use of RCA (Revolutionary Communists of America at [https://CommunistUSA.org/]), & other activists/artists. We also train and feature adapters inspired by works of Soviet Avant-Garde, Dada, Surrealism, & other radical styles + some original conceptions/fusions. Under those are identity models of notable revolutionaries & poets. Click squares to switch adapters & see links to their pages, many of them offering more info/resources. </div>"""
    )

        # Info blob stating what the app is running
    info_blob = gr.HTML(
        """<div id="info_blob"> To reinforce/focus a selected adapter style, add its pre-encoded “trigger" word/phrase to your prompt. Corresponding activator info &/or prompt template appears once an adapter square is clicked. Copy/Paste these into prompt box as a starting point.  </div>"""
    )
    selected_index = gr.State(None)
    with gr.Row():
        with gr.Column(scale=3):
            prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Select LoRa/Style & type prompt!")
        with gr.Column(scale=1, elem_id="gen_column"):
            generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
    with gr.Row():
        with gr.Column(scale=3):
            selected_info = gr.Markdown("")
            gallery = gr.Gallery(
                [(item["image"], item["title"]) for item in loras],
                label="LoRA Inventory",
                allow_preview=False,
                columns=3,
                elem_id="gallery"
            )
            
        with gr.Column(scale=4):
            result = gr.Image(label="Generated Image")

    with gr.Row():
        with gr.Accordion("Advanced Settings", open=True):
            with gr.Column():
                with gr.Row():
                    cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.1, value=1.0)
                    steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=10)
                
                with gr.Row():
                    width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
                    height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
                
                with gr.Row():
                    randomize_seed = gr.Checkbox(True, label="Randomize seed")
                    seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
                    lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2.5, step=0.01, value=0.8)

    gallery.select(
        update_selection,
        inputs=[width, height],
        outputs=[prompt, selected_info, selected_index, width, height]
    )

    gr.on(
        triggers=[generate_button.click, prompt.submit],
        fn=run_lora,
        inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
        outputs=[result, seed]
    )

app.queue(default_concurrency_limit=2).launch(show_error=True)
app.launch()