Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -39,6 +39,15 @@ def compute_difference_images(img_a, img_b):
|
|
39 |
"diff_overlay": diff_overlay
|
40 |
}
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# BLIP生成更详尽描述
|
43 |
def generate_detailed_caption(image):
|
44 |
inputs = blip_processor(image, return_tensors="pt")
|
@@ -79,6 +88,7 @@ def analyze_images(image_a, image_b, api_key):
|
|
79 |
img_a = image_a.convert("RGB")
|
80 |
img_b = image_b.convert("RGB")
|
81 |
images_diff = compute_difference_images(img_a, img_b)
|
|
|
82 |
|
83 |
# BLIP生成描述
|
84 |
caption_a = generate_detailed_caption(img_a)
|
@@ -112,7 +122,7 @@ def analyze_images(image_a, image_b, api_key):
|
|
112 |
"caption_a": caption_a,
|
113 |
"caption_b": caption_b,
|
114 |
"text_analysis": text_analysis,
|
115 |
-
"
|
116 |
"bar_chart": bar_chart_path,
|
117 |
"pie_chart": pie_chart_path
|
118 |
}
|
@@ -147,17 +157,8 @@ with gr.Blocks() as demo:
|
|
147 |
# 分析逻辑
|
148 |
def process_analysis(img_a, img_b, api_key):
|
149 |
results = analyze_images(img_a, img_b, api_key)
|
150 |
-
diff_images = [
|
151 |
-
("Original A", results["images_diff"]["original_a"]),
|
152 |
-
("Original B", results["images_diff"]["original_b"]),
|
153 |
-
("Sketch A", results["images_diff"]["sketch_a"]),
|
154 |
-
("Sketch B", results["images_diff"]["sketch_b"]),
|
155 |
-
("Normal A", results["images_diff"]["normal_a"]),
|
156 |
-
("Normal B", results["images_diff"]["normal_b"]),
|
157 |
-
("Difference Overlay", results["images_diff"]["diff_overlay"]),
|
158 |
-
]
|
159 |
return (
|
160 |
-
|
161 |
results["caption_a"],
|
162 |
results["caption_b"],
|
163 |
results["text_analysis"],
|
|
|
39 |
"diff_overlay": diff_overlay
|
40 |
}
|
41 |
|
42 |
+
# 保存图像到文件
|
43 |
+
def save_images(images):
|
44 |
+
paths = []
|
45 |
+
for key, img in images.items():
|
46 |
+
path = f"{key}.png"
|
47 |
+
img.save(path)
|
48 |
+
paths.append((path, key.replace("_", " ").capitalize()))
|
49 |
+
return paths
|
50 |
+
|
51 |
# BLIP生成更详尽描述
|
52 |
def generate_detailed_caption(image):
|
53 |
inputs = blip_processor(image, return_tensors="pt")
|
|
|
88 |
img_a = image_a.convert("RGB")
|
89 |
img_b = image_b.convert("RGB")
|
90 |
images_diff = compute_difference_images(img_a, img_b)
|
91 |
+
saved_images = save_images(images_diff)
|
92 |
|
93 |
# BLIP生成描述
|
94 |
caption_a = generate_detailed_caption(img_a)
|
|
|
122 |
"caption_a": caption_a,
|
123 |
"caption_b": caption_b,
|
124 |
"text_analysis": text_analysis,
|
125 |
+
"saved_images": saved_images,
|
126 |
"bar_chart": bar_chart_path,
|
127 |
"pie_chart": pie_chart_path
|
128 |
}
|
|
|
157 |
# 分析逻辑
|
158 |
def process_analysis(img_a, img_b, api_key):
|
159 |
results = analyze_images(img_a, img_b, api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
return (
|
161 |
+
results["saved_images"],
|
162 |
results["caption_a"],
|
163 |
results["caption_b"],
|
164 |
results["text_analysis"],
|