cyd0806 commited on
Commit
b4bf64c
·
verified ·
1 Parent(s): 4409022

Upload SubjectGenius_gradio.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. SubjectGenius_gradio.py +486 -0
SubjectGenius_gradio.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["FORCE_TORCH_LAYERNORM"] = "1"
3
+ import sys
4
+ import torch
5
+ import gradio as gr
6
+ import numpy as np
7
+ import json
8
+ import cv2
9
+ from PIL import Image
10
+ from datetime import datetime
11
+ import tempfile
12
+ import os.path as osp
13
+
14
+ # 假设你的模型代码已经在同一目录或者正确的路径中
15
+ from src.condition import Condition
16
+ from src.SubjectGeniusTransformer2DModel import SubjectGeniusTransformer2DModel
17
+ from src.SubjectGeniusPipeline import SubjectGeniusPipeline
18
+ from accelerate.utils import set_seed
19
+
20
+ # 全局变量
21
+ weight_dtype = torch.bfloat16
22
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
+ transformer = None
24
+ pipe = None
25
+ TEMP_DIR = tempfile.mkdtemp()
26
+
27
+ # 默认参数设置,与原始推理脚本一致
28
+ DEFAULT_CONFIG = {
29
+ "pretrained_model_name_or_path": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell",
30
+ "transformer": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell/transformer",
31
+ "condition_types": ["fill", "subject"],
32
+ "denoising_lora": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Denoising_LoRA/subject_fill_union",
33
+ "denoising_lora_weight": 1.0,
34
+ "condition_lora_dir": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Condition_LoRA",
35
+ "resolution": 512,
36
+ "num_inference_steps": 8,
37
+ "max_sequence_length": 512
38
+ }
39
+
40
+ def load_model():
41
+ global transformer, pipe
42
+
43
+ print("开始加载transformer模型...")
44
+ # 加载transformer模型
45
+ transformer = SubjectGeniusTransformer2DModel.from_pretrained(
46
+ pretrained_model_name_or_path=DEFAULT_CONFIG["transformer"],
47
+ ).to(device=device, dtype=weight_dtype)
48
+ print("transformer模型加载完成")
49
+
50
+ print("开始加载condition LoRA...")
51
+ # 加载condition LoRA
52
+ for condition_type in DEFAULT_CONFIG["condition_types"]:
53
+ print(f"加载{condition_type} LoRA...")
54
+ transformer.load_lora_adapter(
55
+ f"{DEFAULT_CONFIG['condition_lora_dir']}/{condition_type}.safetensors",
56
+ adapter_name=condition_type
57
+ )
58
+ print("所有condition LoRA加载完成")
59
+
60
+ print("开始创建pipeline...")
61
+ # 创建pipeline
62
+ pipe = SubjectGeniusPipeline.from_pretrained(
63
+ DEFAULT_CONFIG["pretrained_model_name_or_path"],
64
+ torch_dtype=weight_dtype,
65
+ transformer=None
66
+ )
67
+ print("pipeline创建完成")
68
+
69
+ print("设置transformer...")
70
+ pipe.transformer = transformer
71
+
72
+ print("设置adapter...")
73
+ # 设置adapter
74
+ pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]])
75
+ pipe = pipe.to(device)
76
+ print("模型完全加载完成!")
77
+
78
+ return "模型加载完成!"
79
+
80
+ def process_image_for_display(image_array):
81
+ """将图像处理为适合显示的格式,保持原始尺寸,但确保是RGB格式"""
82
+ if image_array is None:
83
+ return None
84
+
85
+ # 如果是PIL图像,转换为numpy数组
86
+ if isinstance(image_array, Image.Image):
87
+ image_array = np.array(image_array)
88
+
89
+ # 确保是RGB格式
90
+ if len(image_array.shape) == 2: # 灰度图像
91
+ image_array = cv2.cvtColor(image_array, cv2.COLOR_GRAY2RGB)
92
+ elif image_array.shape[2] == 4: # RGBA图像
93
+ image_array = image_array[:, :, :3]
94
+
95
+ return image_array
96
+
97
+ def save_image_for_model(image_array, path):
98
+ """保存图像用于模型输入"""
99
+ if image_array is None:
100
+ return None
101
+
102
+ # 确保目录存在
103
+ os.makedirs(os.path.dirname(path), exist_ok=True)
104
+
105
+ # 如果是PIL图像,直接保存
106
+ if isinstance(image_array, Image.Image):
107
+ image_array.save(path)
108
+ return path
109
+
110
+ # 如果是numpy数组,转换为PIL图像再保存
111
+ Image.fromarray(process_image_for_display(image_array)).save(path)
112
+ return path
113
+
114
+ def preserve_aspect_ratio(image, target_size=(512, 512)):
115
+ """保持原始比例调整图像大小"""
116
+ if isinstance(image, np.ndarray):
117
+ pil_image = Image.fromarray(image)
118
+ else:
119
+ pil_image = image
120
+
121
+ # 计算宽高比
122
+ width, height = pil_image.size
123
+ aspect_ratio = width / height
124
+
125
+ # 创建新的白色背景图像
126
+ new_image = Image.new("RGB", target_size, (255, 255, 255))
127
+
128
+ # 保持比例缩放
129
+ if aspect_ratio > 1: # 宽图
130
+ new_width = target_size[0]
131
+ new_height = int(new_width / aspect_ratio)
132
+ else: # 高图
133
+ new_height = target_size[1]
134
+ new_width = int(new_height * aspect_ratio)
135
+
136
+ # 调整大小
137
+ resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
138
+
139
+ # 居中粘贴到新图像
140
+ paste_position = ((target_size[0] - new_width) // 2,
141
+ (target_size[1] - new_height) // 2)
142
+ new_image.paste(resized_image, paste_position)
143
+
144
+ return new_image
145
+
146
+ def generate_image(
147
+ prompt,
148
+ subject_image,
149
+ background_image,
150
+ x1, y1, x2, y2,
151
+ version="training-free",
152
+ seed=0,
153
+ num_inference_steps=8
154
+ ):
155
+ global pipe
156
+
157
+ # 确保模型已加载
158
+ if pipe is None:
159
+ load_model()
160
+
161
+ # 检查输入
162
+ if subject_image is None or background_image is None:
163
+ return None, None, "请同时上传主体图像和背景图像"
164
+
165
+ try:
166
+ # 将坐标转换为整数
167
+ x1, y1, x2, y2 = int(float(x1)), int(float(y1)), int(float(x2)), int(float(y2))
168
+ if x1 > x2: x1, x2 = x2, x1
169
+ if y1 > y2: y1, y2 = y2, y1
170
+
171
+ # 准备模型所需的固定尺寸图像
172
+ MODEL_SIZE = (512, 512)
173
+
174
+ # 1. 处理主体图像 - 保持原始比例,但调整到模型可接受的尺寸
175
+ subject_pil = Image.fromarray(subject_image) if isinstance(subject_image, np.ndarray) else subject_image
176
+ # 创建白色背景
177
+ subject_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255))
178
+ # 保持比例调整大小
179
+ subject_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS)
180
+ # 居中粘贴
181
+ paste_pos = ((MODEL_SIZE[0] - subject_pil.width) // 2,
182
+ (MODEL_SIZE[1] - subject_pil.height) // 2)
183
+ subject_processed.paste(subject_pil, paste_pos)
184
+
185
+ # 2. 处理背景图像 - 同样保持原始比例
186
+ background_pil = Image.fromarray(background_image) if isinstance(background_image, np.ndarray) else background_image
187
+
188
+ # 保存原始尺寸,用于坐标转换
189
+ orig_width, orig_height = background_pil.size
190
+
191
+ # 调整背景图像大小,保持比例
192
+ background_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255))
193
+ background_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS)
194
+ bg_paste_pos = ((MODEL_SIZE[0] - background_pil.width) // 2,
195
+ (MODEL_SIZE[1] - background_pil.height) // 2)
196
+ background_processed.paste(background_pil, bg_paste_pos)
197
+
198
+ # 3. 计算调整后的bbox坐标
199
+ scale_x = background_pil.width / orig_width
200
+ scale_y = background_pil.height / orig_height
201
+
202
+ adjusted_x1 = int(x1 * scale_x) + bg_paste_pos[0]
203
+ adjusted_y1 = int(y1 * scale_y) + bg_paste_pos[1]
204
+ adjusted_x2 = int(x2 * scale_x) + bg_paste_pos[0]
205
+ adjusted_y2 = int(y2 * scale_y) + bg_paste_pos[1]
206
+
207
+ # 确保坐标在有效范围内
208
+ adjusted_x1 = max(0, min(adjusted_x1, MODEL_SIZE[0]-1))
209
+ adjusted_y1 = max(0, min(adjusted_y1, MODEL_SIZE[1]-1))
210
+ adjusted_x2 = max(0, min(adjusted_x2, MODEL_SIZE[0]-1))
211
+ adjusted_y2 = max(0, min(adjusted_y2, MODEL_SIZE[1]-1))
212
+
213
+ # 最终bbox
214
+ bbox = [adjusted_x1, adjusted_y1, adjusted_x2, adjusted_y2]
215
+
216
+ # 4. 创建用于展示的背景图像副本(用于可视化结果)
217
+ background_display = background_processed.copy()
218
+
219
+ # 5. 在实际输入到模型的背景图像上将选定区域填充为黑色
220
+ background_for_model = background_processed.copy()
221
+ background_for_model_array = np.array(background_for_model)
222
+ # 将选定区域填充为黑色
223
+ background_for_model_array[adjusted_y1:adjusted_y2+1, adjusted_x1:adjusted_x2+1] = (0, 0, 0)
224
+ background_for_model = Image.fromarray(background_for_model_array)
225
+
226
+ # 6. 创建模型条件
227
+ subject_condition = Condition("subject", raw_img=subject_processed, no_process=True)
228
+ # 使用黑色区域的背景图像作为填充条件
229
+ fill_condition = Condition("fill", raw_img=background_for_model, no_process=True)
230
+
231
+ conditions = [subject_condition, fill_condition]
232
+
233
+ # 7. 设置随机种子
234
+ if seed is not None:
235
+ set_seed(seed)
236
+
237
+ # 8. 准备JSON数据
238
+ json_data = {
239
+ "description": prompt,
240
+ "bbox": bbox
241
+ }
242
+
243
+ # 9. 设置模型模式
244
+ if version == "training-based":
245
+ denoising_lora_name = os.path.basename(os.path.normpath(DEFAULT_CONFIG["denoising_lora"]))
246
+ pipe.transformer.load_lora_adapter(
247
+ DEFAULT_CONFIG["denoising_lora"],
248
+ adapter_name=denoising_lora_name,
249
+ use_safetensors=True
250
+ )
251
+ pipe.transformer.set_adapters(
252
+ [i for i in DEFAULT_CONFIG["condition_types"]] + [denoising_lora_name],
253
+ [1.0, 1.0, DEFAULT_CONFIG["denoising_lora_weight"]]
254
+ )
255
+ elif version == "training-free":
256
+ pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]])
257
+
258
+ # 10. 生成图像
259
+ result_img = pipe(
260
+ prompt=prompt,
261
+ conditions=conditions,
262
+ height=MODEL_SIZE[1],
263
+ width=MODEL_SIZE[0],
264
+ num_inference_steps=num_inference_steps,
265
+ max_sequence_length=DEFAULT_CONFIG["max_sequence_length"],
266
+ model_config={"json_data": json_data},
267
+ ).images[0]
268
+
269
+ # 11. 创建可视化结果(拼接图像)
270
+ concat_image = Image.new("RGB", (MODEL_SIZE[0] * 3, MODEL_SIZE[1]), (255, 255, 255))
271
+
272
+ # 添加主体图像
273
+ concat_image.paste(subject_processed, (0, 0))
274
+
275
+ # 添加实际输入模型的背景图像(包含黑色区域)
276
+ concat_image.paste(background_for_model, (MODEL_SIZE[0], 0))
277
+
278
+ # 添加生成结果
279
+ concat_image.paste(result_img, (MODEL_SIZE[0] * 2, 0))
280
+
281
+ return concat_image, result_img, "生成成功!"
282
+
283
+ except Exception as e:
284
+ import traceback
285
+ traceback.print_exc()
286
+ return None, None, f"生成图像时发生错误: {str(e)}"
287
+
288
+ def draw_bbox(background_image, evt: gr.SelectData):
289
+ """处理用户在图片上的选择,绘制矩形"""
290
+ # 初始化边界框
291
+ if not hasattr(draw_bbox, "start_point"):
292
+ draw_bbox.start_point = None
293
+ draw_bbox.current_image = None
294
+
295
+ # 检查背景图像
296
+ if background_image is None:
297
+ return background_image, "", "", "", ""
298
+
299
+ try:
300
+ # 获取图像尺寸
301
+ h, w = background_image.shape[:2]
302
+
303
+ # 处理目标宽度和高度
304
+ target_width = getattr(evt, 'target_width', None) or getattr(evt.target, 'width', None) or w
305
+ target_height = getattr(evt, 'target_height', None) or getattr(evt.target, 'height', None) or h
306
+
307
+ # 计算缩放比例
308
+ scale_x = w / target_width if target_width else 1.0
309
+ scale_y = h / target_height if target_height else 1.0
310
+
311
+ # 获取点击坐标
312
+ x = min(max(0, int(evt.index[0] * scale_x)), w-1)
313
+ y = min(max(0, int(evt.index[1] * scale_y)), h-1)
314
+
315
+ # 如果是第一次点击,记录起始点
316
+ if draw_bbox.start_point is None:
317
+ draw_bbox.start_point = (x, y)
318
+ draw_bbox.current_image = background_image.copy()
319
+ return background_image, "", "", "", ""
320
+
321
+ # 第二次点击,完成矩形
322
+ end_point = (x, y)
323
+
324
+ # 确保坐标有序
325
+ x1 = min(draw_bbox.start_point[0], end_point[0])
326
+ y1 = min(draw_bbox.start_point[1], end_point[1])
327
+ x2 = max(draw_bbox.start_point[0], end_point[0])
328
+ y2 = max(draw_bbox.start_point[1], end_point[1])
329
+
330
+ # 绘制矩形
331
+ img_with_rect = draw_bbox.current_image.copy()
332
+ cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2)
333
+
334
+ # 重置起始点
335
+ draw_bbox.start_point = None
336
+
337
+ return img_with_rect, str(x1), str(y1), str(x2), str(y2)
338
+
339
+ except Exception as e:
340
+ print(f"绘制边界框时发生错误: {e}")
341
+ draw_bbox.start_point = None
342
+ return background_image, "", "", "", ""
343
+
344
+ def update_bbox_from_input(background_image, x1, y1, x2, y2):
345
+ """根据输入的坐标值更新矩形框"""
346
+ try:
347
+ if background_image is None:
348
+ return background_image
349
+
350
+ # 尝试将坐标转换为整数
351
+ x1, y1, x2, y2 = int(float(x1) if x1 else 0), int(float(y1) if y1 else 0), \
352
+ int(float(x2) if x2 else 0), int(float(y2) if y2 else 0)
353
+
354
+ # 获取图像尺寸
355
+ h, w = background_image.shape[:2]
356
+
357
+ # 边界检查
358
+ x1 = max(0, min(x1, w-1))
359
+ y1 = max(0, min(y1, h-1))
360
+ x2 = max(0, min(x2, w-1))
361
+ y2 = max(0, min(y2, h-1))
362
+
363
+ # 确保x1 < x2, y1 < y2
364
+ if x1 > x2:
365
+ x1, x2 = x2, x1
366
+ if y1 > y2:
367
+ y1, y2 = y2, y1
368
+
369
+ # 绘制矩形
370
+ img_with_rect = background_image.copy()
371
+ cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2)
372
+
373
+ return img_with_rect
374
+ except:
375
+ return background_image
376
+
377
+ def reset_bbox(background_image):
378
+ """重置边界框和图像"""
379
+ if hasattr(draw_bbox, "start_point"):
380
+ draw_bbox.start_point = None
381
+
382
+ if background_image is None:
383
+ return None, "", "", "", ""
384
+ else:
385
+ return background_image.copy(), "", "", "", ""
386
+
387
+ # 创建Gradio界面
388
+ def create_interface():
389
+ with gr.Blocks(title="SubjectGenius 图像生成器") as demo:
390
+ gr.Markdown("# SubjectGenius 图像生成器")
391
+ gr.Markdown("上传参考图像和背景图像,并在背景上选择区域来生成新的图像。")
392
+
393
+ status_message = gr.Textbox(label="状态信息", interactive=False)
394
+
395
+ with gr.Row():
396
+ with gr.Column(scale=1):
397
+ gr.Markdown("### 输入参数")
398
+
399
+ prompt = gr.Textbox(label="图像描述文本", placeholder="例如:A decorative fabric topper for windows.")
400
+
401
+ with gr.Row():
402
+ subject_image = gr.Image(label="主体图像 (Subject)", type="numpy")
403
+ background_image = gr.Image(label="背景图像 (Fill)", type="numpy")
404
+
405
+ gr.Markdown("### 在背景图上选择区域(点击两次确定对角线顶点)或手动输入坐标")
406
+
407
+ with gr.Row():
408
+ x1_input = gr.Textbox(label="X1", placeholder="左上角 X 坐标")
409
+ y1_input = gr.Textbox(label="Y1", placeholder="左上角 Y 坐标")
410
+ x2_input = gr.Textbox(label="X2", placeholder="右下角 X 坐标")
411
+ y2_input = gr.Textbox(label="Y2", placeholder="右下角 Y 坐标")
412
+ reset_btn = gr.Button("重置选择")
413
+
414
+ with gr.Accordion("高级选项", open=False):
415
+ version = gr.Radio(
416
+ ["training-free", "training-based"],
417
+ label="版本",
418
+ value="training-free"
419
+ )
420
+ seed = gr.Slider(
421
+ 0, 1000, value=0, step=1,
422
+ label="随机种子"
423
+ )
424
+ steps = gr.Slider(
425
+ 4, 50, value=8, step=1,
426
+ label="推理步数(越大越慢但质量可能更好)"
427
+ )
428
+
429
+ generate_btn = gr.Button("生成图像", variant="primary")
430
+
431
+ with gr.Column(scale=1):
432
+ gr.Markdown("### 预览区域选择")
433
+ preview_image = gr.Image(label="区域预览", type="numpy", elem_id="preview_image")
434
+
435
+ gr.Markdown("### 生成结果")
436
+ with gr.Tabs():
437
+ with gr.TabItem("完整结果"):
438
+ output_image_full = gr.Image(label="完整结果(包含条件图像)")
439
+ with gr.TabItem("仅生成图像"):
440
+ output_image = gr.Image(label="生成图像")
441
+
442
+ # 事件处理
443
+ background_image.select(
444
+ draw_bbox,
445
+ inputs=[background_image],
446
+ outputs=[preview_image, x1_input, y1_input, x2_input, y2_input]
447
+ )
448
+
449
+ # 坐标输入同步更新预览
450
+ coord_inputs = [x1_input, y1_input, x2_input, y2_input]
451
+ for coord in coord_inputs:
452
+ coord.change(
453
+ update_bbox_from_input,
454
+ inputs=[background_image, x1_input, y1_input, x2_input, y2_input],
455
+ outputs=[preview_image]
456
+ )
457
+
458
+ # 重置按钮
459
+ reset_btn.click(
460
+ reset_bbox,
461
+ inputs=[background_image],
462
+ outputs=[preview_image, x1_input, y1_input, x2_input, y2_input]
463
+ )
464
+
465
+ # 生成按钮
466
+ generate_btn.click(
467
+ generate_image,
468
+ inputs=[prompt, subject_image, background_image,
469
+ x1_input, y1_input, x2_input, y2_input,
470
+ version, seed, steps],
471
+ outputs=[output_image_full, output_image, status_message]
472
+ )
473
+
474
+ return demo
475
+
476
+ # 主函数
477
+ if __name__ == "__main__":
478
+ # 创建界面
479
+ demo = create_interface()
480
+
481
+ # 加载模型
482
+ print("正在加载模型...")
483
+ load_model()
484
+
485
+ # 启动Gradio
486
+ demo.launch(share=True)