gaoyu1314 commited on
Commit
7d9eacb
·
verified ·
1 Parent(s): c816bca

Upload 8 files

Browse files
util/__init__.py ADDED
File without changes
util/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (149 Bytes). View file
 
util/__pycache__/box_annotator.cpython-311.pyc ADDED
Binary file (10.5 kB). View file
 
util/__pycache__/utils.cpython-311.pyc ADDED
Binary file (42.8 kB). View file
 
util/box_annotator.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union, Tuple
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from supervision.detection.core import Detections
7
+ from supervision.draw.color import Color, ColorPalette
8
+
9
+
10
+ class BoxAnnotator:
11
+ """
12
+ A class for drawing bounding boxes on an image using detections provided.
13
+
14
+ Attributes:
15
+ color (Union[Color, ColorPalette]): The color to draw the bounding box,
16
+ can be a single color or a color palette
17
+ thickness (int): The thickness of the bounding box lines, default is 2
18
+ text_color (Color): The color of the text on the bounding box, default is white
19
+ text_scale (float): The scale of the text on the bounding box, default is 0.5
20
+ text_thickness (int): The thickness of the text on the bounding box,
21
+ default is 1
22
+ text_padding (int): The padding around the text on the bounding box,
23
+ default is 5
24
+
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ color: Union[Color, ColorPalette] = ColorPalette.DEFAULT,
30
+ thickness: int = 3, # 1 for seeclick 2 for mind2web and 3 for demo
31
+ text_color: Color = Color.BLACK,
32
+ text_scale: float = 0.5, # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
33
+ text_thickness: int = 2, #1, # 2 for demo
34
+ text_padding: int = 10,
35
+ avoid_overlap: bool = True,
36
+ ):
37
+ self.color: Union[Color, ColorPalette] = color
38
+ self.thickness: int = thickness
39
+ self.text_color: Color = text_color
40
+ self.text_scale: float = text_scale
41
+ self.text_thickness: int = text_thickness
42
+ self.text_padding: int = text_padding
43
+ self.avoid_overlap: bool = avoid_overlap
44
+
45
+ def annotate(
46
+ self,
47
+ scene: np.ndarray,
48
+ detections: Detections,
49
+ labels: Optional[List[str]] = None,
50
+ skip_label: bool = False,
51
+ image_size: Optional[Tuple[int, int]] = None,
52
+ ) -> np.ndarray:
53
+ """
54
+ Draws bounding boxes on the frame using the detections provided.
55
+
56
+ Args:
57
+ scene (np.ndarray): The image on which the bounding boxes will be drawn
58
+ detections (Detections): The detections for which the
59
+ bounding boxes will be drawn
60
+ labels (Optional[List[str]]): An optional list of labels
61
+ corresponding to each detection. If `labels` are not provided,
62
+ corresponding `class_id` will be used as label.
63
+ skip_label (bool): Is set to `True`, skips bounding box label annotation.
64
+ Returns:
65
+ np.ndarray: The image with the bounding boxes drawn on it
66
+
67
+ Example:
68
+ ```python
69
+ import supervision as sv
70
+
71
+ classes = ['person', ...]
72
+ image = ...
73
+ detections = sv.Detections(...)
74
+
75
+ box_annotator = sv.BoxAnnotator()
76
+ labels = [
77
+ f"{classes[class_id]} {confidence:0.2f}"
78
+ for _, _, confidence, class_id, _ in detections
79
+ ]
80
+ annotated_frame = box_annotator.annotate(
81
+ scene=image.copy(),
82
+ detections=detections,
83
+ labels=labels
84
+ )
85
+ ```
86
+ """
87
+ font = cv2.FONT_HERSHEY_SIMPLEX
88
+ for i in range(len(detections)):
89
+ x1, y1, x2, y2 = detections.xyxy[i].astype(int)
90
+ class_id = (
91
+ detections.class_id[i] if detections.class_id is not None else None
92
+ )
93
+ idx = class_id if class_id is not None else i
94
+ color = (
95
+ self.color.by_idx(idx)
96
+ if isinstance(self.color, ColorPalette)
97
+ else self.color
98
+ )
99
+ cv2.rectangle(
100
+ img=scene,
101
+ pt1=(x1, y1),
102
+ pt2=(x2, y2),
103
+ color=color.as_bgr(),
104
+ thickness=self.thickness,
105
+ )
106
+ if skip_label:
107
+ continue
108
+
109
+ text = (
110
+ f"{class_id}"
111
+ if (labels is None or len(detections) != len(labels))
112
+ else labels[i]
113
+ )
114
+
115
+ text_width, text_height = cv2.getTextSize(
116
+ text=text,
117
+ fontFace=font,
118
+ fontScale=self.text_scale,
119
+ thickness=self.text_thickness,
120
+ )[0]
121
+
122
+ if not self.avoid_overlap:
123
+ text_x = x1 + self.text_padding
124
+ text_y = y1 - self.text_padding
125
+
126
+ text_background_x1 = x1
127
+ text_background_y1 = y1 - 2 * self.text_padding - text_height
128
+
129
+ text_background_x2 = x1 + 2 * self.text_padding + text_width
130
+ text_background_y2 = y1
131
+ # text_x = x1 - self.text_padding - text_width
132
+ # text_y = y1 + self.text_padding + text_height
133
+ # text_background_x1 = x1 - 2 * self.text_padding - text_width
134
+ # text_background_y1 = y1
135
+ # text_background_x2 = x1
136
+ # text_background_y2 = y1 + 2 * self.text_padding + text_height
137
+ else:
138
+ text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2 = get_optimal_label_pos(self.text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size)
139
+
140
+ cv2.rectangle(
141
+ img=scene,
142
+ pt1=(text_background_x1, text_background_y1),
143
+ pt2=(text_background_x2, text_background_y2),
144
+ color=color.as_bgr(),
145
+ thickness=cv2.FILLED,
146
+ )
147
+ # import pdb; pdb.set_trace()
148
+ box_color = color.as_rgb()
149
+ luminance = 0.299 * box_color[0] + 0.587 * box_color[1] + 0.114 * box_color[2]
150
+ text_color = (0,0,0) if luminance > 160 else (255,255,255)
151
+ cv2.putText(
152
+ img=scene,
153
+ text=text,
154
+ org=(text_x, text_y),
155
+ fontFace=font,
156
+ fontScale=self.text_scale,
157
+ # color=self.text_color.as_rgb(),
158
+ color=text_color,
159
+ thickness=self.text_thickness,
160
+ lineType=cv2.LINE_AA,
161
+ )
162
+ return scene
163
+
164
+
165
+ def box_area(box):
166
+ return (box[2] - box[0]) * (box[3] - box[1])
167
+
168
+ def intersection_area(box1, box2):
169
+ x1 = max(box1[0], box2[0])
170
+ y1 = max(box1[1], box2[1])
171
+ x2 = min(box1[2], box2[2])
172
+ y2 = min(box1[3], box2[3])
173
+ return max(0, x2 - x1) * max(0, y2 - y1)
174
+
175
+ def IoU(box1, box2, return_max=True):
176
+ intersection = intersection_area(box1, box2)
177
+ union = box_area(box1) + box_area(box2) - intersection
178
+ if box_area(box1) > 0 and box_area(box2) > 0:
179
+ ratio1 = intersection / box_area(box1)
180
+ ratio2 = intersection / box_area(box2)
181
+ else:
182
+ ratio1, ratio2 = 0, 0
183
+ if return_max:
184
+ return max(intersection / union, ratio1, ratio2)
185
+ else:
186
+ return intersection / union
187
+
188
+
189
+ def get_optimal_label_pos(text_padding, text_width, text_height, x1, y1, x2, y2, detections, image_size):
190
+ """ check overlap of text and background detection box, and get_optimal_label_pos,
191
+ pos: str, position of the text, must be one of 'top left', 'top right', 'outer left', 'outer right' TODO: if all are overlapping, return the last one, i.e. outer right
192
+ Threshold: default to 0.3
193
+ """
194
+
195
+ def get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size):
196
+ is_overlap = False
197
+ for i in range(len(detections)):
198
+ detection = detections.xyxy[i].astype(int)
199
+ if IoU([text_background_x1, text_background_y1, text_background_x2, text_background_y2], detection) > 0.3:
200
+ is_overlap = True
201
+ break
202
+ # check if the text is out of the image
203
+ if text_background_x1 < 0 or text_background_x2 > image_size[0] or text_background_y1 < 0 or text_background_y2 > image_size[1]:
204
+ is_overlap = True
205
+ return is_overlap
206
+
207
+ # if pos == 'top left':
208
+ text_x = x1 + text_padding
209
+ text_y = y1 - text_padding
210
+
211
+ text_background_x1 = x1
212
+ text_background_y1 = y1 - 2 * text_padding - text_height
213
+
214
+ text_background_x2 = x1 + 2 * text_padding + text_width
215
+ text_background_y2 = y1
216
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
217
+ if not is_overlap:
218
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
219
+
220
+ # elif pos == 'outer left':
221
+ text_x = x1 - text_padding - text_width
222
+ text_y = y1 + text_padding + text_height
223
+
224
+ text_background_x1 = x1 - 2 * text_padding - text_width
225
+ text_background_y1 = y1
226
+
227
+ text_background_x2 = x1
228
+ text_background_y2 = y1 + 2 * text_padding + text_height
229
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
230
+ if not is_overlap:
231
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
232
+
233
+
234
+ # elif pos == 'outer right':
235
+ text_x = x2 + text_padding
236
+ text_y = y1 + text_padding + text_height
237
+
238
+ text_background_x1 = x2
239
+ text_background_y1 = y1
240
+
241
+ text_background_x2 = x2 + 2 * text_padding + text_width
242
+ text_background_y2 = y1 + 2 * text_padding + text_height
243
+
244
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
245
+ if not is_overlap:
246
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
247
+
248
+ # elif pos == 'top right':
249
+ text_x = x2 - text_padding - text_width
250
+ text_y = y1 - text_padding
251
+
252
+ text_background_x1 = x2 - 2 * text_padding - text_width
253
+ text_background_y1 = y1 - 2 * text_padding - text_height
254
+
255
+ text_background_x2 = x2
256
+ text_background_y2 = y1
257
+
258
+ is_overlap = get_is_overlap(detections, text_background_x1, text_background_y1, text_background_x2, text_background_y2, image_size)
259
+ if not is_overlap:
260
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
261
+
262
+ return text_x, text_y, text_background_x1, text_background_y1, text_background_x2, text_background_y2
util/omniparser.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from util.utils import get_som_labeled_img, get_caption_model_processor, get_yolo_model, check_ocr_box
2
+ import torch
3
+ from PIL import Image
4
+ import io
5
+ import base64
6
+ from typing import Dict
7
+ class Omniparser(object):
8
+ def __init__(self, config: Dict):
9
+ self.config = config
10
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
+
12
+ self.som_model = get_yolo_model(model_path=config['som_model_path'])
13
+ self.caption_model_processor = get_caption_model_processor(model_name=config['caption_model_name'], model_name_or_path=config['caption_model_path'], device=device)
14
+ print('Omniparser initialized!!!')
15
+
16
+ def parse(self, image_base64: str):
17
+ image_bytes = base64.b64decode(image_base64)
18
+ image = Image.open(io.BytesIO(image_bytes))
19
+ print('image size:', image.size)
20
+
21
+ box_overlay_ratio = max(image.size) / 3200
22
+ draw_bbox_config = {
23
+ 'text_scale': 0.8 * box_overlay_ratio,
24
+ 'text_thickness': max(int(2 * box_overlay_ratio), 1),
25
+ 'text_padding': max(int(3 * box_overlay_ratio), 1),
26
+ 'thickness': max(int(3 * box_overlay_ratio), 1),
27
+ }
28
+
29
+ (text, ocr_bbox), _ = check_ocr_box(image, display_img=False, output_bb_format='xyxy', easyocr_args={'text_threshold': 0.8}, use_paddleocr=False)
30
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image, self.som_model, BOX_TRESHOLD = self.config['BOX_TRESHOLD'], output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=self.caption_model_processor, ocr_text=text,use_local_semantics=True, iou_threshold=0.7, scale_img=False, batch_size=128)
31
+
32
+ return dino_labled_img, parsed_content_list
util/utils - 副本.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ # utility function
10
+ import os
11
+ from openai import AzureOpenAI
12
+
13
+ import json
14
+ import sys
15
+ import os
16
+ import cv2
17
+ import numpy as np
18
+ # %matplotlib inline
19
+ from matplotlib import pyplot as plt
20
+ import easyocr
21
+ from paddleocr import PaddleOCR
22
+ reader = easyocr.Reader(['en'])
23
+ paddle_ocr = PaddleOCR(
24
+ lang='en', # other lang also available
25
+ use_angle_cls=False,
26
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
27
+ show_log=False,
28
+ max_batch_size=1024,
29
+ use_dilation=True, # improves accuracy
30
+ det_db_score_mode='slow', # improves accuracy
31
+ rec_batch_num=1024)
32
+ import time
33
+ import base64
34
+
35
+ import os
36
+ import ast
37
+ import torch
38
+ from typing import Tuple, List, Union
39
+ from torchvision.ops import box_convert
40
+ import re
41
+ from torchvision.transforms import ToPILImage
42
+ import supervision as sv
43
+ import torchvision.transforms as T
44
+ from util.box_annotator import BoxAnnotator
45
+
46
+
47
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
48
+ if not device:
49
+ device = "cuda" if torch.cuda.is_available() else "cpu"
50
+ if model_name == "blip2":
51
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
52
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
53
+ if device == 'cpu':
54
+ model = Blip2ForConditionalGeneration.from_pretrained(
55
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
56
+ )
57
+ else:
58
+ model = Blip2ForConditionalGeneration.from_pretrained(
59
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
60
+ ).to(device)
61
+ elif model_name == "florence2":
62
+ from transformers import AutoProcessor, AutoModelForCausalLM
63
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
64
+ if device == 'cpu':
65
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
66
+ else:
67
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
68
+ return {'model': model.to(device), 'processor': processor}
69
+
70
+
71
+ def get_yolo_model(model_path):
72
+ from ultralytics import YOLO
73
+ # Load the model.
74
+ model = YOLO(model_path)
75
+ return model
76
+
77
+
78
+ @torch.inference_mode()
79
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=128):
80
+ # Number of samples per batch, --> 128 roughly takes 4 GB of GPU memory for florence v2 model
81
+ to_pil = ToPILImage()
82
+ if starting_idx:
83
+ non_ocr_boxes = filtered_boxes[starting_idx:]
84
+ else:
85
+ non_ocr_boxes = filtered_boxes
86
+ croped_pil_image = []
87
+ for i, coord in enumerate(non_ocr_boxes):
88
+ try:
89
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
90
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
91
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
92
+ cropped_image = cv2.resize(cropped_image, (64, 64))
93
+ croped_pil_image.append(to_pil(cropped_image))
94
+ except:
95
+ continue
96
+
97
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
98
+ if not prompt:
99
+ if 'florence' in model.config.name_or_path:
100
+ prompt = "<CAPTION>"
101
+ else:
102
+ prompt = "The image shows"
103
+
104
+ generated_texts = []
105
+ device = model.device
106
+ for i in range(0, len(croped_pil_image), batch_size):
107
+ start = time.time()
108
+ batch = croped_pil_image[i:i+batch_size]
109
+ t1 = time.time()
110
+ if model.device.type == 'cuda':
111
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
112
+ else:
113
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
114
+ if 'florence' in model.config.name_or_path:
115
+ generated_ids = model.generate(input_ids=inputs["input_ids"],pixel_values=inputs["pixel_values"],max_new_tokens=20,num_beams=1, do_sample=False)
116
+ else:
117
+ generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
118
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
119
+ generated_text = [gen.strip() for gen in generated_text]
120
+ generated_texts.extend(generated_text)
121
+
122
+ return generated_texts
123
+
124
+
125
+
126
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
127
+ to_pil = ToPILImage()
128
+ if ocr_bbox:
129
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
130
+ else:
131
+ non_ocr_boxes = filtered_boxes
132
+ croped_pil_image = []
133
+ for i, coord in enumerate(non_ocr_boxes):
134
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
135
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
136
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
137
+ croped_pil_image.append(to_pil(cropped_image))
138
+
139
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
140
+ device = model.device
141
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
142
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
143
+
144
+ batch_size = 5 # Number of samples per batch
145
+ generated_texts = []
146
+
147
+ for i in range(0, len(croped_pil_image), batch_size):
148
+ images = croped_pil_image[i:i+batch_size]
149
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
150
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
151
+ texts = [prompt] * len(images)
152
+ for i, txt in enumerate(texts):
153
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
154
+ inputs['input_ids'].append(input['input_ids'])
155
+ inputs['attention_mask'].append(input['attention_mask'])
156
+ inputs['pixel_values'].append(input['pixel_values'])
157
+ inputs['image_sizes'].append(input['image_sizes'])
158
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
159
+ for i, v in enumerate(inputs['input_ids']):
160
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
161
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
162
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
163
+
164
+ generation_args = {
165
+ "max_new_tokens": 25,
166
+ "temperature": 0.01,
167
+ "do_sample": False,
168
+ }
169
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
170
+ # # remove input tokens
171
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
172
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
173
+ response = [res.strip('\n').strip() for res in response]
174
+ generated_texts.extend(response)
175
+
176
+ return generated_texts
177
+
178
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
179
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
180
+
181
+ def box_area(box):
182
+ return (box[2] - box[0]) * (box[3] - box[1])
183
+
184
+ def intersection_area(box1, box2):
185
+ x1 = max(box1[0], box2[0])
186
+ y1 = max(box1[1], box2[1])
187
+ x2 = min(box1[2], box2[2])
188
+ y2 = min(box1[3], box2[3])
189
+ return max(0, x2 - x1) * max(0, y2 - y1)
190
+
191
+ def IoU(box1, box2):
192
+ intersection = intersection_area(box1, box2)
193
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
194
+ if box_area(box1) > 0 and box_area(box2) > 0:
195
+ ratio1 = intersection / box_area(box1)
196
+ ratio2 = intersection / box_area(box2)
197
+ else:
198
+ ratio1, ratio2 = 0, 0
199
+ return max(intersection / union, ratio1, ratio2)
200
+
201
+ def is_inside(box1, box2):
202
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
203
+ intersection = intersection_area(box1, box2)
204
+ ratio1 = intersection / box_area(box1)
205
+ return ratio1 > 0.95
206
+
207
+ boxes = boxes.tolist()
208
+ filtered_boxes = []
209
+ if ocr_bbox:
210
+ filtered_boxes.extend(ocr_bbox)
211
+ # print('ocr_bbox!!!', ocr_bbox)
212
+ for i, box1 in enumerate(boxes):
213
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
214
+ is_valid_box = True
215
+ for j, box2 in enumerate(boxes):
216
+ # keep the smaller box
217
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
218
+ is_valid_box = False
219
+ break
220
+ if is_valid_box:
221
+ # add the following 2 lines to include ocr bbox
222
+ if ocr_bbox:
223
+ # only add the box if it does not overlap with any ocr bbox
224
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
225
+ filtered_boxes.append(box1)
226
+ else:
227
+ filtered_boxes.append(box1)
228
+ return torch.tensor(filtered_boxes)
229
+
230
+
231
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
232
+ '''
233
+ ocr_bbox format: [{'type': 'text', 'bbox':[x,y], 'interactivity':False, 'content':str }, ...]
234
+ boxes format: [{'type': 'icon', 'bbox':[x,y], 'interactivity':True, 'content':None }, ...]
235
+
236
+ '''
237
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
238
+
239
+ def box_area(box):
240
+ return (box[2] - box[0]) * (box[3] - box[1])
241
+
242
+ def intersection_area(box1, box2):
243
+ x1 = max(box1[0], box2[0])
244
+ y1 = max(box1[1], box2[1])
245
+ x2 = min(box1[2], box2[2])
246
+ y2 = min(box1[3], box2[3])
247
+ return max(0, x2 - x1) * max(0, y2 - y1)
248
+
249
+ def IoU(box1, box2):
250
+ intersection = intersection_area(box1, box2)
251
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
252
+ if box_area(box1) > 0 and box_area(box2) > 0:
253
+ ratio1 = intersection / box_area(box1)
254
+ ratio2 = intersection / box_area(box2)
255
+ else:
256
+ ratio1, ratio2 = 0, 0
257
+ return max(intersection / union, ratio1, ratio2)
258
+
259
+ def is_inside(box1, box2):
260
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
261
+ intersection = intersection_area(box1, box2)
262
+ ratio1 = intersection / box_area(box1)
263
+ return ratio1 > 0.80
264
+
265
+ # boxes = boxes.tolist()
266
+ filtered_boxes = []
267
+ if ocr_bbox:
268
+ filtered_boxes.extend(ocr_bbox)
269
+ # print('ocr_bbox!!!', ocr_bbox)
270
+ for i, box1_elem in enumerate(boxes):
271
+ box1 = box1_elem['bbox']
272
+ is_valid_box = True
273
+ for j, box2_elem in enumerate(boxes):
274
+ # keep the smaller box
275
+ box2 = box2_elem['bbox']
276
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
277
+ is_valid_box = False
278
+ break
279
+ if is_valid_box:
280
+ if ocr_bbox:
281
+ # keep yolo boxes + prioritize ocr label
282
+ box_added = False
283
+ ocr_labels = ''
284
+ for box3_elem in ocr_bbox:
285
+ if not box_added:
286
+ box3 = box3_elem['bbox']
287
+ if is_inside(box3, box1): # ocr inside icon
288
+ # box_added = True
289
+ # delete the box3_elem from ocr_bbox
290
+ try:
291
+ # gather all ocr labels
292
+ ocr_labels += box3_elem['content'] + ' '
293
+ filtered_boxes.remove(box3_elem)
294
+ except:
295
+ continue
296
+ # break
297
+ elif is_inside(box1, box3): # icon inside ocr, don't added this icon box, no need to check other ocr bbox bc no overlap between ocr bbox, icon can only be in one ocr box
298
+ box_added = True
299
+ break
300
+ else:
301
+ continue
302
+ if not box_added:
303
+ if ocr_labels:
304
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels, 'source':'box_yolo_content_ocr'})
305
+ else:
306
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, 'source':'box_yolo_content_yolo'})
307
+ else:
308
+ filtered_boxes.append(box1)
309
+ return filtered_boxes # torch.tensor(filtered_boxes)
310
+
311
+
312
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
313
+ transform = T.Compose(
314
+ [
315
+ T.RandomResize([800], max_size=1333),
316
+ T.ToTensor(),
317
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
318
+ ]
319
+ )
320
+ image_source = Image.open(image_path).convert("RGB")
321
+ image = np.asarray(image_source)
322
+ image_transformed, _ = transform(image_source, None)
323
+ return image, image_transformed
324
+
325
+
326
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
327
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
328
+ """
329
+ This function annotates an image with bounding boxes and labels.
330
+
331
+ Parameters:
332
+ image_source (np.ndarray): The source image to be annotated.
333
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
334
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
335
+ phrases (List[str]): A list of labels for each bounding box.
336
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
337
+
338
+ Returns:
339
+ np.ndarray: The annotated image.
340
+ """
341
+ h, w, _ = image_source.shape
342
+ boxes = boxes * torch.Tensor([w, h, w, h])
343
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
344
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
345
+ detections = sv.Detections(xyxy=xyxy)
346
+
347
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
348
+
349
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
350
+ annotated_frame = image_source.copy()
351
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
352
+
353
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
354
+ return annotated_frame, label_coordinates
355
+
356
+
357
+ def predict(model, image, caption, box_threshold, text_threshold):
358
+ """ Use huggingface model to replace the original model
359
+ """
360
+ model, processor = model['model'], model['processor']
361
+ device = model.device
362
+
363
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
364
+ with torch.no_grad():
365
+ outputs = model(**inputs)
366
+
367
+ results = processor.post_process_grounded_object_detection(
368
+ outputs,
369
+ inputs.input_ids,
370
+ box_threshold=box_threshold, # 0.4,
371
+ text_threshold=text_threshold, # 0.3,
372
+ target_sizes=[image.size[::-1]]
373
+ )[0]
374
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
375
+ return boxes, logits, phrases
376
+
377
+
378
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
379
+ """ Use huggingface model to replace the original model
380
+ """
381
+ # model = model['model']
382
+ if scale_img:
383
+ result = model.predict(
384
+ source=image,
385
+ conf=box_threshold,
386
+ imgsz=imgsz,
387
+ iou=iou_threshold, # default 0.7
388
+ )
389
+ else:
390
+ result = model.predict(
391
+ source=image,
392
+ conf=box_threshold,
393
+ iou=iou_threshold, # default 0.7
394
+ )
395
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
396
+ conf = result[0].boxes.conf
397
+ phrases = [str(i) for i in range(len(boxes))]
398
+
399
+ return boxes, conf, phrases
400
+
401
+ def int_box_area(box, w, h):
402
+ x1, y1, x2, y2 = box
403
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
404
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
405
+ return area
406
+
407
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=128):
408
+ """Process either an image path or Image object
409
+
410
+ Args:
411
+ image_source: Either a file path (str) or PIL Image object
412
+ ...
413
+ """
414
+ if isinstance(image_source, str):
415
+ image_source = Image.open(image_source)
416
+ image_source = image_source.convert("RGB") # for CLIP
417
+ w, h = image_source.size
418
+ if not imgsz:
419
+ imgsz = (h, w)
420
+ # print('image size:', w, h)
421
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
422
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
423
+ image_source = np.asarray(image_source)
424
+ phrases = [str(i) for i in range(len(phrases))]
425
+
426
+ # annotate the image with labels
427
+ if ocr_bbox:
428
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
429
+ ocr_bbox=ocr_bbox.tolist()
430
+ else:
431
+ print('no ocr bbox!!!')
432
+ ocr_bbox = None
433
+
434
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
435
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
436
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
437
+
438
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
439
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
440
+ # get the index of the first 'content': None
441
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
442
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
443
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
444
+
445
+ # get parsed icon local semantics
446
+ time1 = time.time()
447
+ if use_local_semantics:
448
+ caption_model = caption_model_processor['model']
449
+ if 'phi3_v' in caption_model.config.model_type:
450
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
451
+ else:
452
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
453
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
454
+ icon_start = len(ocr_text)
455
+ parsed_content_icon_ls = []
456
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
457
+ for i, box in enumerate(filtered_boxes_elem):
458
+ if box['content'] is None:
459
+ box['content'] = parsed_content_icon.pop(0)
460
+ for i, txt in enumerate(parsed_content_icon):
461
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
462
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
463
+ else:
464
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
465
+ parsed_content_merged = ocr_text
466
+ print('time to get parsed content:', time.time()-time1)
467
+
468
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
469
+
470
+ phrases = [i for i in range(len(filtered_boxes))]
471
+
472
+ # draw boxes
473
+ if draw_bbox_config:
474
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
475
+ else:
476
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
477
+
478
+ pil_img = Image.fromarray(annotated_frame)
479
+ buffered = io.BytesIO()
480
+ pil_img.save(buffered, format="PNG")
481
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
482
+ if output_coord_in_ratio:
483
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
484
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
485
+
486
+ return encoded_image, label_coordinates, filtered_boxes_elem
487
+
488
+
489
+ def get_xywh(input):
490
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
491
+ x, y, w, h = int(x), int(y), int(w), int(h)
492
+ return x, y, w, h
493
+
494
+ def get_xyxy(input):
495
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
496
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
497
+ return x, y, xp, yp
498
+
499
+ def get_xywh_yolo(input):
500
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
501
+ x, y, w, h = int(x), int(y), int(w), int(h)
502
+ return x, y, w, h
503
+
504
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
505
+ if isinstance(image_source, str):
506
+ image_source = Image.open(image_source)
507
+ if image_source.mode == 'RGBA':
508
+ # Convert RGBA to RGB to avoid alpha channel issues
509
+ image_source = image_source.convert('RGB')
510
+ image_np = np.array(image_source)
511
+ w, h = image_source.size
512
+ if use_paddleocr:
513
+ if easyocr_args is None:
514
+ text_threshold = 0.5
515
+ else:
516
+ text_threshold = easyocr_args['text_threshold']
517
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
518
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
519
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
520
+ else: # EasyOCR
521
+ if easyocr_args is None:
522
+ easyocr_args = {}
523
+ result = reader.readtext(image_np, **easyocr_args)
524
+ coord = [item[0] for item in result]
525
+ text = [item[1] for item in result]
526
+ if display_img:
527
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
528
+ bb = []
529
+ for item in coord:
530
+ x, y, a, b = get_xywh(item)
531
+ bb.append((x, y, a, b))
532
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
533
+ # matplotlib expects RGB
534
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
535
+ else:
536
+ if output_bb_format == 'xywh':
537
+ bb = [get_xywh(item) for item in coord]
538
+ elif output_bb_format == 'xyxy':
539
+ bb = [get_xyxy(item) for item in coord]
540
+ return (text, bb), goal_filtering
util/utils.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from ultralytics import YOLO
2
+ import os
3
+ import io
4
+ import base64
5
+ import time
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import json
8
+ import requests
9
+ import ollama
10
+ # utility function
11
+ import os
12
+ from openai import AzureOpenAI
13
+
14
+ import json
15
+ import sys
16
+ import os
17
+ import cv2
18
+ import numpy as np
19
+ # %matplotlib inline
20
+ from matplotlib import pyplot as plt
21
+ import easyocr
22
+ from paddleocr import PaddleOCR
23
+ reader = easyocr.Reader(['en'])
24
+ paddle_ocr = PaddleOCR(
25
+ lang='en', # other lang also available
26
+ use_angle_cls=False,
27
+ use_gpu=False, # using cuda will conflict with pytorch in the same process
28
+ show_log=False,
29
+ max_batch_size=1024,
30
+ use_dilation=True, # improves accuracy
31
+ det_db_score_mode='slow', # improves accuracy
32
+ rec_batch_num=1024)
33
+ import time
34
+ import base64
35
+
36
+ import os
37
+ import ast
38
+ import torch
39
+ from typing import Tuple, List, Union
40
+ from torchvision.ops import box_convert
41
+ import re
42
+ from torchvision.transforms import ToPILImage
43
+ import supervision as sv
44
+ import torchvision.transforms as T
45
+ from util.box_annotator import BoxAnnotator
46
+
47
+
48
+ def get_caption_model_processor(model_name, model_name_or_path="Salesforce/blip2-opt-2.7b", device=None):
49
+ if not device:
50
+ device = "cuda" if torch.cuda.is_available() else "cpu"
51
+ if model_name == "blip2":
52
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
53
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
54
+ if device == 'cpu':
55
+ model = Blip2ForConditionalGeneration.from_pretrained(
56
+ model_name_or_path, device_map=None, torch_dtype=torch.float32
57
+ )
58
+ else:
59
+ model = Blip2ForConditionalGeneration.from_pretrained(
60
+ model_name_or_path, device_map=None, torch_dtype=torch.float16
61
+ ).to(device)
62
+ elif model_name == "florence":
63
+ from transformers import AutoProcessor, AutoModelForCausalLM
64
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-base", trust_remote_code=True)
65
+ if device == 'cpu':
66
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-base", torch_dtype=torch.float32, trust_remote_code=True)
67
+ else:
68
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-base", torch_dtype=torch.float16, trust_remote_code=True).to(device)
69
+ elif model_name == "ollama":
70
+ return {"model": None, "processor": None}
71
+ elif model_name == "florence2":
72
+ from transformers import AutoProcessor, AutoModelForCausalLM
73
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
74
+ if device == 'cpu':
75
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float32, trust_remote_code=True)
76
+ else:
77
+ model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, trust_remote_code=True).to(device)
78
+ return {'model': model.to(device), 'processor': processor}
79
+
80
+
81
+ def get_yolo_model(model_path):
82
+ from ultralytics import YOLO
83
+ # Load the model.
84
+ model = YOLO(model_path)
85
+ return model
86
+
87
+
88
+ @torch.inference_mode()
89
+ def get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=None, batch_size=128):
90
+ parsed_content_icon = []
91
+ # Number of samples per batch, --> 128 roughly takes 4 GB of GPU memory for florence v2 model
92
+ to_pil = ToPILImage()
93
+ if starting_idx:
94
+ non_ocr_boxes = filtered_boxes[starting_idx:]
95
+ else:
96
+ non_ocr_boxes = filtered_boxes
97
+ croped_pil_image = []
98
+ for i, coord in enumerate(non_ocr_boxes):
99
+ try:
100
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
101
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
102
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
103
+ cropped_image = cv2.resize(cropped_image, (64, 64))
104
+ croped_pil_image.append(to_pil(cropped_image))
105
+ except:
106
+ continue
107
+
108
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
109
+ if not prompt:
110
+ if 'florence' in model.config.name_or_path:
111
+ prompt = "<CAPTION>"
112
+ else:
113
+ prompt = "The image shows"
114
+
115
+ generated_texts = []
116
+ device = model.device
117
+ for i in range(0, len(croped_pil_image), batch_size):
118
+ start = time.time()
119
+ batch = croped_pil_image[i:i+batch_size]
120
+ t1 = time.time()
121
+ if model.device.type == 'cuda':
122
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt", do_resize=False).to(device=device, dtype=torch.float16)
123
+ else:
124
+ inputs = processor(images=batch, text=[prompt]*len(batch), return_tensors="pt").to(device=device)
125
+
126
+ # 显式生成 input_ids
127
+ text_inputs = processor.tokenizer([prompt]*len(batch), return_tensors="pt", padding=True)
128
+ inputs['input_ids'] = text_inputs['input_ids'].to(device)
129
+ if 'attention_mask' not in inputs:
130
+ inputs['attention_mask'] = text_inputs['attention_mask'].to(device)
131
+
132
+ # 尝试添加 decoder_input_ids
133
+ bos_token_id = processor.tokenizer.bos_token_id # 获取 BOS token ID
134
+ if bos_token_id is None: # 如果 BOS token ID 为 None,则使用 EOS token ID 作为 fallback
135
+ bos_token_id = processor.tokenizer.eos_token_id
136
+ decoder_input_ids = torch.tensor([[bos_token_id] * len(batch)]).T.to(device) # 创建 decoder_input_ids,形状为 (batch_size, 1)
137
+ inputs['decoder_input_ids'] = decoder_input_ids # 将 decoder_input_ids 添加到 inputs 字典
138
+
139
+ print("Before model.generate call:")
140
+ print(f" Input attention_mask shape: {inputs['attention_mask'].shape if 'attention_mask' in inputs else 'No attention_mask in inputs'}")
141
+ print(f" Input pixel_values shape: {inputs['pixel_values'].shape if 'pixel_values' in inputs else 'No pixel_values in inputs'}")
142
+ print(f" Input input_ids shape: {inputs['input_ids'].shape if 'input_ids' in inputs else 'No input_ids in inputs'}")
143
+ print(f" Full inputs dictionary: {inputs}")
144
+ print(f" Inputs dictionary keys: {inputs.keys()}") # 打印 inputs 字典的键
145
+
146
+ if 'florence' in model.config.name_or_path:
147
+ generated_ids = model.generate(**inputs, max_length=100, num_beams=5, no_repeat_ngram_size=2, early_stopping=True, num_return_sequences=1) # temperature=0.01, do_sample=True,
148
+ elif 'blip2' in model.config.name_or_path:
149
+ generated_ids = model.generate(**inputs, max_new_tokens=20)
150
+ elif 'phi3_v' in model.config.model_type:
151
+ generated_ids = model.generate(**inputs, max_new_tokens=50)
152
+ else: # clip-tag and mocov3
153
+ generated_ids = model(**inputs)
154
+
155
+ print("After model.generate call:") # 添加提示信息
156
+ if isinstance(generated_ids, tuple): # 处理不同模型输出
157
+ generated_ids = generated_ids[0] # 假设第一个元素是 generated_ids
158
+ print(f" Generated IDs object: {generated_ids}") # 打印 generated_ids 对象
159
+ print(f" Generated IDs shape: {generated_ids.sequences.shape}") # 修改为访问 sequences 属性
160
+ generated_ids_for_decode = generated_ids.sequences # 获取 sequences 属性用于解码
161
+
162
+ generated_texts_batch = processor.batch_decode(generated_ids_for_decode, skip_special_tokens=True) # 使用 sequences 属性进行解码
163
+ generated_texts.extend(generated_texts_batch)
164
+ end = time.time()
165
+ print(f"batch {i//batch_size} takes {end-start:.2f} seconds, infer time {end-t1:.2f} seconds, batch size {len(batch)}, generated text: {generated_texts_batch}")
166
+
167
+ parsed_content_icon = generated_texts # 先进行赋值
168
+ return parsed_content_icon # 然后返回 parsed_content_icon
169
+
170
+
171
+
172
+ def get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor):
173
+ to_pil = ToPILImage()
174
+ if ocr_bbox:
175
+ non_ocr_boxes = filtered_boxes[len(ocr_bbox):]
176
+ else:
177
+ non_ocr_boxes = filtered_boxes
178
+ croped_pil_image = []
179
+ for i, coord in enumerate(non_ocr_boxes):
180
+ xmin, xmax = int(coord[0]*image_source.shape[1]), int(coord[2]*image_source.shape[1])
181
+ ymin, ymax = int(coord[1]*image_source.shape[0]), int(coord[3]*image_source.shape[0])
182
+ cropped_image = image_source[ymin:ymax, xmin:xmax, :]
183
+ croped_pil_image.append(to_pil(cropped_image))
184
+
185
+ model, processor = caption_model_processor['model'], caption_model_processor['processor']
186
+ device = model.device
187
+ messages = [{"role": "user", "content": "<|image_1|>\ndescribe the icon in one sentence"}]
188
+ prompt = processor.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
189
+
190
+ batch_size = 5 # Number of samples per batch
191
+ generated_texts = []
192
+
193
+ for i in range(0, len(croped_pil_image), batch_size):
194
+ images = croped_pil_image[i:i+batch_size]
195
+ image_inputs = [processor.image_processor(x, return_tensors="pt") for x in images]
196
+ inputs ={'input_ids': [], 'attention_mask': [], 'pixel_values': [], 'image_sizes': []}
197
+ texts = [prompt] * len(images)
198
+ for i, txt in enumerate(texts):
199
+ input = processor._convert_images_texts_to_inputs(image_inputs[i], txt, return_tensors="pt")
200
+ inputs['input_ids'].append(input['input_ids'])
201
+ inputs['attention_mask'].append(input['attention_mask'])
202
+ inputs['pixel_values'].append(input['pixel_values'])
203
+ inputs['image_sizes'].append(input['image_sizes'])
204
+ max_len = max([x.shape[1] for x in inputs['input_ids']])
205
+ for i, v in enumerate(inputs['input_ids']):
206
+ inputs['input_ids'][i] = torch.cat([processor.tokenizer.pad_token_id * torch.ones(1, max_len - v.shape[1], dtype=torch.long), v], dim=1)
207
+ inputs['attention_mask'][i] = torch.cat([torch.zeros(1, max_len - v.shape[1], dtype=torch.long), inputs['attention_mask'][i]], dim=1)
208
+ inputs_cat = {k: torch.concatenate(v).to(device) for k, v in inputs.items()}
209
+
210
+ generation_args = {
211
+ "max_new_tokens": 25,
212
+ "temperature": 0.01,
213
+ "do_sample": False,
214
+ }
215
+ generate_ids = model.generate(**inputs_cat, eos_token_id=processor.tokenizer.eos_token_id, **generation_args)
216
+ # # remove input tokens
217
+ generate_ids = generate_ids[:, inputs_cat['input_ids'].shape[1]:]
218
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
219
+ response = [res.strip('\n').strip() for res in response]
220
+ generated_texts.extend(response)
221
+
222
+ return generated_texts
223
+
224
+ def remove_overlap(boxes, iou_threshold, ocr_bbox=None):
225
+ assert ocr_bbox is None or isinstance(ocr_bbox, List)
226
+
227
+ def box_area(box):
228
+ return (box[2] - box[0]) * (box[3] - box[1])
229
+
230
+ def intersection_area(box1, box2):
231
+ x1 = max(box1[0], box2[0])
232
+ y1 = max(box1[1], box2[1])
233
+ x2 = min(box1[2], box2[2])
234
+ y2 = min(box1[3], box2[3])
235
+ return max(0, x2 - x1) * max(0, y2 - y1)
236
+
237
+ def IoU(box1, box2):
238
+ intersection = intersection_area(box1, box2)
239
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
240
+ if box_area(box1) > 0 and box_area(box2) > 0:
241
+ ratio1 = intersection / box_area(box1)
242
+ ratio2 = intersection / box_area(box2)
243
+ else:
244
+ ratio1, ratio2 = 0, 0
245
+ return max(intersection / union, ratio1, ratio2)
246
+
247
+ def is_inside(box1, box2):
248
+ # return box1[0] >= box2[0] and box1[1] >= box2[1] and box1[2] <= box2[2] and box1[3] <= box2[3]
249
+ intersection = intersection_area(box1, box2)
250
+ ratio1 = intersection / box_area(box1)
251
+ return ratio1 > 0.95
252
+
253
+ boxes = boxes.tolist()
254
+ filtered_boxes = []
255
+ if ocr_bbox:
256
+ filtered_boxes.extend(ocr_bbox)
257
+ # print('ocr_bbox!!!', ocr_bbox)
258
+ for i, box1 in enumerate(boxes):
259
+ # if not any(IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2) for j, box2 in enumerate(boxes) if i != j):
260
+ is_valid_box = True
261
+ for j, box2 in enumerate(boxes):
262
+ # keep the smaller box
263
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
264
+ is_valid_box = False
265
+ break
266
+ if is_valid_box:
267
+ # add the following 2 lines to include ocr bbox
268
+ if ocr_bbox:
269
+ # only add the box if it does not overlap with any ocr bbox
270
+ if not any(IoU(box1, box3) > iou_threshold and not is_inside(box1, box3) for k, box3 in enumerate(ocr_bbox)):
271
+ filtered_boxes.append(box1)
272
+ else:
273
+ filtered_boxes.append(box1)
274
+ return torch.tensor(filtered_boxes)
275
+
276
+ def remove_overlap_new(boxes, iou_threshold, ocr_bbox=None):
277
+ if not ocr_bbox:
278
+ return boxes
279
+
280
+ def box_area(box):
281
+ return (box[2] - box[0]) * (box[3] - box[1])
282
+
283
+ def intersection_area(box1, box2):
284
+ x1 = max(box1[0], box2[0])
285
+ y1 = max(box1[1], box2[1])
286
+ x2 = min(box1[2], box2[2])
287
+ y2 = min(box1[3], box2[3])
288
+ return max(0, x2 - x1) * max(0, y2 - y1)
289
+
290
+ def IoU(box1, box2):
291
+ intersection = intersection_area(box1, box2)
292
+ union = box_area(box1) + box_area(box2) - intersection + 1e-6
293
+ if box_area(box1) > 0 and box_area(box2) > 0:
294
+ ratio1 = intersection / box_area(box1)
295
+ ratio2 = intersection / box_area(box2)
296
+ else:
297
+ ratio1, ratio2 = 0, 0
298
+ return max(intersection / union, ratio1, ratio2)
299
+
300
+ def is_inside(box1, box2):
301
+ intersection = intersection_area(box1, box2)
302
+ ratio1 = intersection / box_area(box1)
303
+ return ratio1 > 0.80 # 可调整阈值
304
+
305
+ filtered_boxes = []
306
+ if ocr_bbox:
307
+ filtered_boxes.extend(ocr_bbox)
308
+
309
+ for i, box1_elem in enumerate(boxes):
310
+ box1 = box1_elem['bbox']
311
+ is_valid_box = True
312
+ for j, box2_elem in enumerate(boxes):
313
+ box2 = box2_elem['bbox']
314
+ if i != j and IoU(box1, box2) > iou_threshold and box_area(box1) > box_area(box2):
315
+ is_valid_box = False
316
+ break
317
+
318
+ if is_valid_box:
319
+ if ocr_bbox:
320
+ box_added = False
321
+ ocr_labels = ''
322
+ indices_to_remove = [] # 存储需要移除的 ocr_bbox 元素的索引
323
+ for k, box3_elem in enumerate(ocr_bbox):
324
+ if not box_added:
325
+ box3 = box3_elem['bbox']
326
+ if is_inside(box3, box1):
327
+ try:
328
+ ocr_labels += box3_elem['content'] + ' '
329
+ indices_to_remove.append(k) # 记录索引
330
+ except:
331
+ continue
332
+ elif is_inside(box1, box3):
333
+ box_added = True
334
+ break
335
+ else:
336
+ continue
337
+
338
+ # 逆序移除元素,避免索引错位
339
+ for index in sorted(indices_to_remove, reverse=True):
340
+ filtered_boxes.pop(index)
341
+
342
+ if not box_added:
343
+ if ocr_labels:
344
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': ocr_labels, 'source':'box_yolo_content_ocr'})
345
+ else:
346
+ filtered_boxes.append({'type': 'icon', 'bbox': box1_elem['bbox'], 'interactivity': True, 'content': None, 'source':'box_yolo_content_yolo'})
347
+ else:
348
+ filtered_boxes.append(box1_elem) # 修改:添加box1_elem而不是box1
349
+ return filtered_boxes
350
+
351
+ def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
352
+ transform = T.Compose(
353
+ [
354
+ T.RandomResize([800], max_size=1333),
355
+ T.ToTensor(),
356
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
357
+ ]
358
+ )
359
+ image_source = Image.open(image_path).convert("RGB")
360
+ image = np.asarray(image_source)
361
+ image_transformed, _ = transform(image_source, None)
362
+ return image, image_transformed
363
+
364
+
365
+ def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str], text_scale: float,
366
+ text_padding=5, text_thickness=2, thickness=3) -> np.ndarray:
367
+ """
368
+ This function annotates an image with bounding boxes and labels.
369
+
370
+ Parameters:
371
+ image_source (np.ndarray): The source image to be annotated.
372
+ boxes (torch.Tensor): A tensor containing bounding box coordinates. in cxcywh format, pixel scale
373
+ logits (torch.Tensor): A tensor containing confidence scores for each bounding box.
374
+ phrases (List[str]): A list of labels for each bounding box.
375
+ text_scale (float): The scale of the text to be displayed. 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
376
+
377
+ Returns:
378
+ np.ndarray: The annotated image.
379
+ """
380
+ h, w, _ = image_source.shape
381
+ boxes = boxes * torch.Tensor([w, h, w, h])
382
+ xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
383
+ xywh = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xywh").numpy()
384
+ detections = sv.Detections(xyxy=xyxy)
385
+
386
+ labels = [f"{phrase}" for phrase in range(boxes.shape[0])]
387
+
388
+ box_annotator = BoxAnnotator(text_scale=text_scale, text_padding=text_padding,text_thickness=text_thickness,thickness=thickness) # 0.8 for mobile/web, 0.3 for desktop # 0.4 for mind2web
389
+ annotated_frame = image_source.copy()
390
+ annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels, image_size=(w,h))
391
+
392
+ label_coordinates = {f"{phrase}": v for phrase, v in zip(phrases, xywh)}
393
+ return annotated_frame, label_coordinates
394
+
395
+
396
+ def predict(model, image, caption, box_threshold, text_threshold):
397
+ """ Use huggingface model to replace the original model
398
+ """
399
+ model, processor = model['model'], model['processor']
400
+ device = model.device
401
+
402
+ inputs = processor(images=image, text=caption, return_tensors="pt").to(device)
403
+ with torch.no_grad():
404
+ outputs = model(**inputs)
405
+
406
+ results = processor.post_process_grounded_object_detection(
407
+ outputs,
408
+ inputs.input_ids,
409
+ box_threshold=box_threshold, # 0.4,
410
+ text_threshold=text_threshold, # 0.3,
411
+ target_sizes=[image.size[::-1]]
412
+ )[0]
413
+ boxes, logits, phrases = results["boxes"], results["scores"], results["labels"]
414
+ return boxes, logits, phrases
415
+
416
+
417
+ def predict_yolo(model, image, box_threshold, imgsz, scale_img, iou_threshold=0.7):
418
+ """ Use huggingface model to replace the original model
419
+ """
420
+ # model = model['model']
421
+ if scale_img:
422
+ result = model.predict(
423
+ source=image,
424
+ conf=box_threshold,
425
+ imgsz=imgsz,
426
+ iou=iou_threshold, # default 0.7
427
+ )
428
+ else:
429
+ result = model.predict(
430
+ source=image,
431
+ conf=box_threshold,
432
+ iou=iou_threshold, # default 0.7
433
+ )
434
+ boxes = result[0].boxes.xyxy#.tolist() # in pixel space
435
+ conf = result[0].boxes.conf
436
+ phrases = [str(i) for i in range(len(boxes))]
437
+
438
+ return boxes, conf, phrases
439
+
440
+ def int_box_area(box, w, h):
441
+ x1, y1, x2, y2 = box
442
+ int_box = [int(x1*w), int(y1*h), int(x2*w), int(y2*h)]
443
+ area = (int_box[2] - int_box[0]) * (int_box[3] - int_box[1])
444
+ return area
445
+
446
+ def get_som_labeled_img(image_source: Union[str, Image.Image], model=None, BOX_TRESHOLD=0.01, output_coord_in_ratio=False, ocr_bbox=None, text_scale=0.4, text_padding=5, draw_bbox_config=None, caption_model_processor=None, ocr_text=[], use_local_semantics=True, iou_threshold=0.9,prompt=None, scale_img=False, imgsz=None, batch_size=128):
447
+ """Process either an image path or Image object
448
+
449
+ Args:
450
+ image_source: Either a file path (str) or PIL Image object
451
+ ...
452
+ """
453
+
454
+ print(f"get_som_labeled_img 开始:ocr_bbox = {ocr_bbox}")
455
+
456
+ print(f"ocr_text 类型:{type(ocr_text)}")
457
+ print(f"ocr_text 内容:{ocr_text}")
458
+ (ocr_text, ocr_bbox) = check_ocr_box(image_source, display_img=False, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False)
459
+
460
+ # 检查 ocr_bbox 是否为空,如果为空,则初始化为空列表
461
+ if ocr_bbox is None:
462
+ ocr_bbox = []
463
+
464
+ if isinstance(image_source, str):
465
+ image_source = Image.open(image_source)
466
+ image_source = image_source.convert("RGB") # for CLIP
467
+ w, h = image_source.size
468
+ if not imgsz:
469
+ imgsz = (h, w)
470
+ # print('image size:', w, h)
471
+ xyxy, logits, phrases = predict_yolo(model=model, image=image_source, box_threshold=BOX_TRESHOLD, imgsz=imgsz, scale_img=scale_img, iou_threshold=0.1)
472
+ xyxy = xyxy / torch.Tensor([w, h, w, h]).to(xyxy.device)
473
+ image_source = np.asarray(image_source)
474
+ phrases = [str(i) for i in range(len(phrases))]
475
+
476
+ # annotate the image with labels
477
+ # 正确处理空列表和 None 值
478
+ if ocr_bbox is not None and len(ocr_bbox) > 0: # 修改后的条件判断
479
+ print("准备转换 ocr_bbox 类型") # 在类型转换之前
480
+ ocr_bbox = torch.tensor(ocr_bbox) / torch.Tensor([w, h, w, h])
481
+ print(f"转换 ocr_bbox 类型后:ocr_bbox = {ocr_bbox}")
482
+
483
+ print("准备将 ocr_bbox 转换为列表") # 在转换为列表之前
484
+ ocr_bbox = ocr_bbox.tolist()
485
+ print(f"将 ocr_bbox 转换为列表后:ocr_bbox = {ocr_bbox}")
486
+ else:
487
+ print('no ocr bbox!!!')
488
+ ocr_bbox = [] # 赋值为空列表,而不是 None
489
+
490
+ print(f"get_som_labeled_img AFTER OCR BBOX PROCESSING: ocr_bbox = {ocr_bbox}") # 2. Print after processing ocr_bbox
491
+
492
+ print(f"get_som_labeled_img AFTER OCR BBOX PROCESSING: ocr_bbox = {ocr_bbox}") # 2. Print after processing ocr_bbox
493
+
494
+ print("About to create ocr_bbox_elem") # 3. Before ocr_bbox_elem creation
495
+ # 检查 ocr_bbox 和 ocr_text 是否都为空
496
+ if ocr_bbox and ocr_text: # 两个列表都不为空时才执行
497
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
498
+ else:
499
+ ocr_bbox_elem = [] # 赋予空列表
500
+
501
+ print(f"ocr_bbox_elem created: ocr_bbox = {ocr_bbox}")
502
+
503
+ print(f"ocr_bbox_elem created: ocr_bbox = {ocr_bbox}") # 4. After ocr_bbox_elem creation
504
+
505
+ print("About to create xyxy_elem") # 5. Before xyxy_elem creation
506
+ xyxy_elem = [{'type': 'icon', 'bbox': box, 'interactivity': True, 'content': None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
507
+ print(f"xyxy_elem created: ocr_bbox = {ocr_bbox}") # 6. After xyxy_elem creation
508
+
509
+ print("About to create filtered_boxes") # 7. Before filtered_boxes creation
510
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
511
+ print(f"filtered_boxes created: ocr_bbox = {ocr_bbox}") # 8. After filtered_boxes creation
512
+
513
+ print(f"ocr_text 类型:{type(ocr_text)}")
514
+ print(f"ocr_text 内容:{ocr_text}")
515
+
516
+ # 检查 ocr_bbox 和 ocr_text 是否都为空
517
+ if ocr_bbox and ocr_text: # 两个列表都不为空时才执行
518
+ ocr_bbox_elem = [{'type': 'text', 'bbox':box, 'interactivity':False, 'content':txt, 'source': 'box_ocr_content_ocr'} for box, txt in zip(ocr_bbox, ocr_text) if int_box_area(box, w, h) > 0]
519
+ else:
520
+ ocr_bbox_elem = [] # 赋予空列表
521
+
522
+
523
+ xyxy_elem = [{'type': 'icon', 'bbox':box, 'interactivity':True, 'content':None} for box in xyxy.tolist() if int_box_area(box, w, h) > 0]
524
+ filtered_boxes = remove_overlap_new(boxes=xyxy_elem, iou_threshold=iou_threshold, ocr_bbox=ocr_bbox_elem)
525
+
526
+ # sort the filtered_boxes so that the one with 'content': None is at the end, and get the index of the first 'content': None
527
+ filtered_boxes_elem = sorted(filtered_boxes, key=lambda x: x['content'] is None)
528
+ # get the index of the first 'content': None
529
+ starting_idx = next((i for i, box in enumerate(filtered_boxes_elem) if box['content'] is None), -1)
530
+ filtered_boxes = torch.tensor([box['bbox'] for box in filtered_boxes_elem])
531
+ print('len(filtered_boxes):', len(filtered_boxes), starting_idx)
532
+
533
+ # get parsed icon local semantics
534
+ time1 = time.time()
535
+ caption_model = caption_model_processor['model'] if caption_model_processor else None
536
+
537
+ if caption_model_processor is None: # 增加 caption_model_processor 的 None 值检查
538
+ print("警告: caption_model_processor 为 None,图像描述功能可能无法使用。")
539
+ elif caption_model is not None: # 保留原有的 caption_model 的 None 值检查
540
+ if 'phi3_v' in caption_model.config.model_type:
541
+ parsed_content_icon = get_parsed_content_icon_phi3v(filtered_boxes, ocr_bbox, image_source, caption_model_processor)
542
+ else:
543
+ print("Before get_parsed_content_icon call:") # 添加提示信息
544
+ print(f" filtered_boxes shape: {filtered_boxes.shape}") # 打印 filtered_boxes 的形状
545
+ print(f" image_source type: {type(image_source)}") # 打印 image_source 的类型
546
+ print(f" caption_model_processor: {caption_model_processor}") # 打印 caption_model_processor
547
+ parsed_content_icon = get_parsed_content_icon(filtered_boxes, starting_idx, image_source, caption_model_processor, prompt=prompt,batch_size=batch_size)
548
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
549
+ icon_start = len(ocr_text)
550
+ parsed_content_icon_ls = []
551
+ # fill the filtered_boxes_elem None content with parsed_content_icon in order
552
+ for i, box in enumerate(filtered_boxes_elem):
553
+ if box['content'] is None:
554
+ box['content'] = parsed_content_icon.pop(0)
555
+ for i, txt in enumerate(parsed_content_icon):
556
+ parsed_content_icon_ls.append(f"Icon Box ID {str(i+icon_start)}: {txt}")
557
+ parsed_content_merged = ocr_text + parsed_content_icon_ls
558
+ else:
559
+ ocr_text = [f"Text Box ID {i}: {txt}" for i, txt in enumerate(ocr_text)]
560
+ parsed_content_merged = ocr_text
561
+ print('time to get parsed content:', time.time()-time1)
562
+
563
+ filtered_boxes = box_convert(boxes=filtered_boxes, in_fmt="xyxy", out_fmt="cxcywh")
564
+
565
+ phrases = [i for i in range(len(filtered_boxes))]
566
+
567
+ # draw boxes
568
+ if draw_bbox_config:
569
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, **draw_bbox_config)
570
+ else:
571
+ annotated_frame, label_coordinates = annotate(image_source=image_source, boxes=filtered_boxes, logits=logits, phrases=phrases, text_scale=text_scale, text_padding=text_padding)
572
+
573
+ pil_img = Image.fromarray(annotated_frame)
574
+ buffered = io.BytesIO()
575
+ pil_img.save(buffered, format="PNG")
576
+ encoded_image = base64.b64encode(buffered.getvalue()).decode('ascii')
577
+ if output_coord_in_ratio:
578
+ label_coordinates = {k: [v[0]/w, v[1]/h, v[2]/w, v[3]/h] for k, v in label_coordinates.items()}
579
+ assert w == annotated_frame.shape[1] and h == annotated_frame.shape[0]
580
+
581
+ return encoded_image, label_coordinates, filtered_boxes_elem
582
+
583
+
584
+ def get_xywh(input):
585
+ x, y, w, h = input[0][0], input[0][1], input[2][0] - input[0][0], input[2][1] - input[0][1]
586
+ x, y, w, h = int(x), int(y), int(w), int(h)
587
+ return x, y, w, h
588
+
589
+ def get_xyxy(input):
590
+ x, y, xp, yp = input[0][0], input[0][1], input[2][0], input[2][1]
591
+ x, y, xp, yp = int(x), int(y), int(xp), int(yp)
592
+ return x, y, xp, yp
593
+
594
+ def get_xywh_yolo(input):
595
+ x, y, w, h = input[0], input[1], input[2] - input[0], input[3] - input[1]
596
+ x, y, w, h = int(x), int(y), int(w), int(h)
597
+ return x, y, w, h
598
+
599
+ def check_ocr_box(image_source: Union[str, Image.Image], display_img = True, output_bb_format='xywh', goal_filtering=None, easyocr_args=None, use_paddleocr=False):
600
+ if image_source is ...: # 检查 image_source 是否为 ...
601
+ print("错误:image_source 是一个 ellipsis 对象!")
602
+ return ([], []) # 或者其他适当的处理方式
603
+
604
+ if isinstance(image_source, str):
605
+ image_source = Image.open(image_source)
606
+ if isinstance(image_source, str):
607
+ image_source = Image.open(image_source)
608
+ if image_source.mode == 'RGBA':
609
+ # Convert RGBA to RGB to avoid alpha channel issues
610
+ image_source = image_source.convert('RGB')
611
+ image_np = np.array(image_source)
612
+ w, h = image_source.size
613
+ if use_paddleocr:
614
+ if easyocr_args is None:
615
+ text_threshold = 0.5
616
+ else:
617
+ text_threshold = easyocr_args['text_threshold']
618
+ result = paddle_ocr.ocr(image_np, cls=False)[0]
619
+ coord = [item[0] for item in result if item[1][1] > text_threshold]
620
+ text = [item[1][0] for item in result if item[1][1] > text_threshold]
621
+ else: # EasyOCR
622
+ if easyocr_args is None:
623
+ easyocr_args = {}
624
+ result = reader.readtext(image_np, **easyocr_args)
625
+ coord = [item[0] for item in result]
626
+ text = [item[1] for item in result]
627
+ if display_img:
628
+ opencv_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
629
+ bb = []
630
+ for item in coord:
631
+ x, y, a, b = get_xywh(item)
632
+ bb.append((x, y, a, b))
633
+ cv2.rectangle(opencv_img, (x, y), (x+a, y+b), (0, 255, 0), 2)
634
+ # matplotlib expects RGB
635
+ plt.imshow(cv2.cvtColor(opencv_img, cv2.COLOR_BGR2RGB))
636
+ else:
637
+ if output_bb_format == 'xywh':
638
+ bb = [get_xywh(item) for item in coord]
639
+ elif output_bb_format == 'xyxy':
640
+ bb = [get_xyxy(item) for item in coord]
641
+ return (text, bb), goal_filtering