HBDing commited on
Commit
7cfa686
·
1 Parent(s): 9402da4
Files changed (2) hide show
  1. app.py +86 -62
  2. space.py +7 -152
app.py CHANGED
@@ -1,56 +1,67 @@
1
  import gradio as gr
2
- # import spaces #[uncomment to use ZeroGPU]
3
  from gradio_image_annotation import image_annotator
4
- from diffusers import StableDiffusionPipeline
5
- import os
6
- import torch
7
  from diffusers import EulerDiscreteScheduler
8
- from migc.migc_utils import seed_everything
 
 
 
9
  from migc.migc_pipeline import StableDiffusionMIGCPipeline, MIGCProcessor, AttentionStore
 
 
10
 
 
 
 
 
11
 
12
- from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
 
13
 
14
- # 下载文件
15
- migc_ckpt_path = hf_hub_download(
16
- repo_id="limuloo1999/MIGC",
17
- filename="MIGC_SD14.ckpt",
18
- repo_type="model" # 也可以省略,默认就是 model
19
- )
20
-
21
- RV_path = hf_hub_download(
22
- repo_id="SG161222/Realistic_Vision_V6.0_B1_noVAE",
23
- filename="Realistic_Vision_V6.0_NV_B1.safetensors",
24
- repo_type="model" # 也可以省略,默认就是 model
25
- )
26
-
27
-
28
-
29
- # Load model
30
- # pipe = StableDiffusionMIGCPipeline.from_pretrained(
31
- # "rSG161222/Realistic_Vision_V6.0_B1_noVAE",
32
- # torch_dtype=torch.float32
33
- # )
34
- pipe = StableDiffusionMIGCPipeline.from_single_file(
35
- RV_path,
36
- torch_dtype=torch.float32
37
- )
38
- pipe.safety_checker = None
39
- pipe.attention_store = AttentionStore()
40
- from migc.migc_utils import load_migc
41
- load_migc(pipe.unet , pipe.attention_store,
42
- migc_ckpt_path, attn_processor=MIGCProcessor)
43
- pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
44
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
45
 
46
- example_annotation = {
47
- "image": os.path.join(os.path.dirname(__file__), "background.png"),
48
- "boxes": [],
49
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- # @spaces.GPU
52
- def get_boxes_json(annotations):
53
- print(annotations)
54
  image = annotations["image"]
55
  width = image.shape[1]
56
  height = image.shape[0]
@@ -58,36 +69,49 @@ def get_boxes_json(annotations):
58
  prompt_final = [[]]
59
  bboxes = [[]]
60
  for box in boxes:
61
- box["xmin"] = box["xmin"] / width
62
- box["xmax"] = box["xmax"] / width
63
- box["ymin"] = box["ymin"] / height
64
- box["ymax"] = box["ymax"] / height
65
  prompt_final[0].append(box["label"])
66
  bboxes[0].append([box["xmin"], box["ymin"], box["xmax"], box["ymax"]])
67
- # import pdb; pdb.set_trace()
68
  prompt = ", ".join(prompt_final[0])
69
  prompt_final[0].insert(0, prompt)
 
70
  negative_prompt = 'worst quality, low quality, bad anatomy, watermark, text, blurry'
71
- image = pipe(prompt_final, bboxes, num_inference_steps=30, guidance_scale=7.5,
72
- MIGCsteps=15, aug_phase_with_and=False, negative_prompt=negative_prompt).images[0]
73
- return image
74
- # return annotations["boxes"]
 
 
 
 
 
 
75
 
 
76
  with gr.Blocks() as demo:
77
- with gr.Tab("DreamRenderer", id="DreamRenderer"):
78
  with gr.Row():
79
  with gr.Column(scale=1):
80
- annotator = image_annotator(
81
- example_annotation,
82
- height=512,
83
- width=512
84
- )
85
  with gr.Column(scale=1):
86
  generated_image = gr.Image(label="Generated Image", height=512, width=512)
 
 
 
 
87
 
88
- button_get = gr.Button("Generation")
89
- button_get.click(get_boxes_json, inputs=annotator, outputs=generated_image)
 
 
 
 
90
 
 
91
 
92
  if __name__ == "__main__":
93
- demo.launch()
 
1
  import gradio as gr
 
2
  from gradio_image_annotation import image_annotator
 
 
 
3
  from diffusers import EulerDiscreteScheduler
4
+ import torch
5
+ import os
6
+ import random
7
+
8
  from migc.migc_pipeline import StableDiffusionMIGCPipeline, MIGCProcessor, AttentionStore
9
+ from migc.migc_utils import seed_everything, load_migc
10
+ from huggingface_hub import hf_hub_download
11
 
12
+ # 下载模型文件
13
+ migc_ckpt_path = hf_hub_download(repo_id="limuloo1999/MIGC", filename="MIGC_SD14.ckpt")
14
+ RV_path = hf_hub_download(repo_id="SG161222/Realistic_Vision_V6.0_B1_noVAE", filename="Realistic_Vision_V6.0_NV_B1.safetensors")
15
+ anime_path = hf_hub_download(repo_id="ckpt/cetus-mix", filename="cetusMix_v4.safetensors")
16
 
17
+ # -------- 风格切换器类 --------
18
+ class StyleSwitcher:
19
+ def __init__(self):
20
+ self.pipe = None
21
+ self.attn_store = AttentionStore()
22
+ self.styles = {
23
+ "realistic": RV_path,
24
+ "anime": anime_path
25
+ }
26
+ self.current_style = None
27
 
28
+ def load_model(self, style):
29
+ if style == self.current_style:
30
+ return self.pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ if self.pipe:
33
+ del self.pipe
34
+ torch.cuda.empty_cache()
35
+ print(f"[Info] Switched from {self.current_style} to {style}.")
36
+
37
+ model_path = self.styles[style]
38
+ print(f"[Info] Loading {style} model...")
39
+
40
+ self.pipe = StableDiffusionMIGCPipeline.from_single_file(
41
+ model_path,
42
+ torch_dtype=torch.float32
43
+ )
44
+ self.pipe.safety_checker = None
45
+ self.pipe.attention_store = self.attn_store
46
+ load_migc(self.pipe.unet, self.attn_store, migc_ckpt_path, attn_processor=MIGCProcessor)
47
+ self.pipe = self.pipe.to("cuda" if torch.cuda.is_available() else "cpu")
48
+ self.pipe.scheduler = EulerDiscreteScheduler.from_config(self.pipe.scheduler.config)
49
+
50
+ self.current_style = style
51
+ return self.pipe
52
+
53
+ style_switcher = StyleSwitcher()
54
+
55
+ # ⬇️ 新增函数:返回随机 seed
56
+ def generate_random_seed():
57
+ return random.randint(0, 2**32 - 1)
58
+
59
+ # 生成函数
60
+ def get_boxes_json(annotations, seed_value, edit_mode, style_selection):
61
+ seed_everything(seed_value)
62
+
63
+ pipe = style_switcher.load_model(style_selection)
64
 
 
 
 
65
  image = annotations["image"]
66
  width = image.shape[1]
67
  height = image.shape[0]
 
69
  prompt_final = [[]]
70
  bboxes = [[]]
71
  for box in boxes:
72
+ box["xmin"] /= width
73
+ box["xmax"] /= width
74
+ box["ymin"] /= height
75
+ box["ymax"] /= height
76
  prompt_final[0].append(box["label"])
77
  bboxes[0].append([box["xmin"], box["ymin"], box["xmax"], box["ymax"]])
78
+
79
  prompt = ", ".join(prompt_final[0])
80
  prompt_final[0].insert(0, prompt)
81
+
82
  negative_prompt = 'worst quality, low quality, bad anatomy, watermark, text, blurry'
83
+ output_image = pipe(prompt_final, bboxes, num_inference_steps=30, guidance_scale=7.5,
84
+ MIGCsteps=15, aug_phase_with_and=False, negative_prompt=negative_prompt,
85
+ sa_preserve=True, use_sa_preserve=edit_mode).images[0]
86
+ return output_image
87
+
88
+ # 示例标注图
89
+ example_annotation = {
90
+ "image": os.path.join(os.path.dirname(__file__), "background.png"),
91
+ "boxes": [],
92
+ }
93
 
94
+ # ------------- Gradio UI -------------
95
  with gr.Blocks() as demo:
96
+ with gr.Tab("DreamRenderer", id="DreamRenderer"):
97
  with gr.Row():
98
  with gr.Column(scale=1):
99
+ annotator = image_annotator(example_annotation, height=512, width=512)
 
 
 
 
100
  with gr.Column(scale=1):
101
  generated_image = gr.Image(label="Generated Image", height=512, width=512)
102
+ seed_input = gr.Number(label="Seed (Optional)", precision=0)
103
+ seed_random_btn = gr.Button("🎲 Random Seed")
104
+ edit_mode_toggle = gr.Checkbox(label="Edit Mode")
105
+ style_selector = gr.Radio(choices=["realistic", "anime"], label="风格选择", value="realistic")
106
 
107
+ button_get = gr.Button("生成图像")
108
+ button_get.click(
109
+ fn=get_boxes_json,
110
+ inputs=[annotator, seed_input, edit_mode_toggle, style_selector],
111
+ outputs=generated_image
112
+ )
113
 
114
+ seed_random_btn.click(fn=generate_random_seed, inputs=[], outputs=seed_input)
115
 
116
  if __name__ == "__main__":
117
+ demo.launch(share=True)
space.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  from app import demo as app
4
  import os
5
 
6
- _docs = {'image_annotator': {'description': 'Creates a component to annotate images with bounding boxes. The bounding boxes can be created and edited by the user or be passed by code.\nIt is also possible to predefine a set of valid classes and colors.', 'members': {'__init__': {'value': {'type': 'dict | None', 'default': 'None', 'description': "A dict or None. The dictionary must contain a key 'image' with either an URL to an image, a numpy image or a PIL image. Optionally it may contain a key 'boxes' with a list of boxes. Each box must be a dict wit the keys: 'xmin', 'ymin', 'xmax' and 'ymax' with the absolute image coordinates of the box. Optionally can also include the keys 'label' and 'color' describing the label and color of the box. Color must be a tuple of RGB values (e.g. `(255,255,255)`)."}, 'boxes_alpha': {'type': 'float | None', 'default': 'None', 'description': 'Opacity of the bounding boxes 0 and 1.'}, 'label_list': {'type': 'list[str] | None', 'default': 'None', 'description': 'List of valid labels.'}, 'label_colors': {'type': 'list[str] | None', 'default': 'None', 'description': 'Optional list of colors for each label when `label_list` is used. Colors must be a tuple of RGB values (e.g. `(255,255,255)`).'}, 'box_min_size': {'type': 'int | None', 'default': 'None', 'description': 'Minimum valid bounding box size.'}, 'handle_size': {'type': 'int | None', 'default': 'None', 'description': 'Size of the bounding box resize handles.'}, 'box_thickness': {'type': 'int | None', 'default': 'None', 'description': 'Thickness of the bounding box outline.'}, 'box_selected_thickness': {'type': 'int | None', 'default': 'None', 'description': 'Thickness of the bounding box outline when it is selected.'}, 'disable_edit_boxes': {'type': 'bool | None', 'default': 'None', 'description': 'Disables the ability to set and edit the label and color of the boxes.'}, 'single_box': {'type': 'bool', 'default': 'False', 'description': 'If True, at most one box can be drawn.'}, 'height': {'type': 'int | str | None', 'default': 'None', 'description': 'The height of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.'}, 'width': {'type': 'int | str | None', 'default': 'None', 'description': 'The width of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.'}, 'image_mode': {'type': '"1"\n | "L"\n | "P"\n | "RGB"\n | "RGBA"\n | "CMYK"\n | "YCbCr"\n | "LAB"\n | "HSV"\n | "I"\n | "F"', 'default': '"RGB"', 'description': '"RGB" if color, or "L" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning.'}, 'sources': {'type': 'list["upload" | "webcam" | "clipboard"] | None', 'default': '["upload", "webcam", "clipboard"]', 'description': 'List of sources for the image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "clipboard" allows users to paste an image from the clipboard. If None, defaults to ["upload", "webcam", "clipboard"].'}, 'image_type': {'type': '"numpy" | "pil" | "filepath"', 'default': '"numpy"', 'description': 'The format the image is converted before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned.'}, 'label': {'type': 'str | None', 'default': 'None', 'description': 'The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.'}, 'container': {'type': 'bool', 'default': 'True', 'description': 'If True, will place the component in a container - providing some extra padding around the border.'}, 'scale': {'type': 'int | None', 'default': 'None', 'description': 'relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.'}, 'min_width': {'type': 'int', 'default': '160', 'description': 'minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.'}, 'interactive': {'type': 'bool | None', 'default': 'True', 'description': 'if True, will allow users to upload and annotate an image; if False, can only be used to display annotated images.'}, 'visible': {'type': 'bool', 'default': 'True', 'description': 'If False, component will be hidden.'}, 'elem_id': {'type': 'str | None', 'default': 'None', 'description': 'An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'elem_classes': {'type': 'list[str] | str | None', 'default': 'None', 'description': 'An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'render': {'type': 'bool', 'default': 'True', 'description': 'If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.'}, 'show_label': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will display label.'}, 'show_download_button': {'type': 'bool', 'default': 'True', 'description': 'If True, will show a button to download the image.'}, 'show_share_button': {'type': 'bool | None', 'default': 'None', 'description': 'If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.'}, 'show_clear_button': {'type': 'bool | None', 'default': 'True', 'description': 'If True, will show a button to clear the current image.'}, 'show_remove_button': {'type': 'bool | None', 'default': 'None', 'description': 'If True, will show a button to remove the selected bounding box.'}, 'handles_cursor': {'type': 'bool | None', 'default': 'True', 'description': 'If True, the cursor will change when hovering over box handles in drag mode. Can be CPU-intensive.'}}, 'postprocess': {'value': {'type': 'dict | None', 'description': 'A dict with an image and an optional list of boxes or None.'}}, 'preprocess': {'return': {'type': 'dict | None', 'description': 'A dict with the image and boxes or None.'}, 'value': None}}, 'events': {'clear': {'type': None, 'default': None, 'description': 'This listener is triggered when the user clears the image_annotator using the clear button for the component.'}, 'change': {'type': None, 'default': None, 'description': 'Triggered when the value of the image_annotator changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input.'}, 'upload': {'type': None, 'default': None, 'description': 'This listener is triggered when the user uploads a file into the image_annotator.'}}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'image_annotator': []}}}
7
 
8
  abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
 
@@ -16,164 +16,19 @@ with gr.Blocks(
16
  ],
17
  ),
18
  ) as demo:
19
- gr.Markdown(
20
- """
21
- # `gradio_image_annotation`
22
-
23
- <div style="display: flex; gap: 7px;">
24
- <a href="https://pypi.org/project/gradio_image_annotation/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_image_annotation"></a>
25
- </div>
26
-
27
- A Gradio component that can be used to annotate images with bounding boxes.
28
- """, elem_classes=["md-custom"], header_links=True)
29
  app.render()
30
- gr.Markdown(
31
- """
32
- ## Installation
33
-
34
- ```bash
35
- pip install gradio_image_annotation
36
- ```
37
-
38
- ## Usage
39
-
40
- ```python
41
- import gradio as gr
42
- from gradio_image_annotation import image_annotator
43
-
44
-
45
- example_annotation = {
46
- "image": "https://gradio-builds.s3.amazonaws.com/demo-files/base.png",
47
- "boxes": [
48
- {
49
- "xmin": 636,
50
- "ymin": 575,
51
- "xmax": 801,
52
- "ymax": 697,
53
- "label": "Vehicle",
54
- "color": (255, 0, 0)
55
- },
56
- {
57
- "xmin": 360,
58
- "ymin": 615,
59
- "xmax": 386,
60
- "ymax": 702,
61
- "label": "Person",
62
- "color": (0, 255, 0)
63
- }
64
- ]
65
- }
66
-
67
- examples_crop = [
68
- {
69
- "image": "https://raw.githubusercontent.com/gradio-app/gradio/main/guides/assets/logo.png",
70
- "boxes": [
71
- {
72
- "xmin": 30,
73
- "ymin": 70,
74
- "xmax": 530,
75
- "ymax": 500,
76
- "color": (100, 200, 255),
77
- }
78
- ],
79
- },
80
- {
81
- "image": "https://gradio-builds.s3.amazonaws.com/demo-files/base.png",
82
- "boxes": [
83
- {
84
- "xmin": 636,
85
- "ymin": 575,
86
- "xmax": 801,
87
- "ymax": 697,
88
- "color": (255, 0, 0),
89
- },
90
- ],
91
- },
92
- ]
93
-
94
-
95
- def crop(annotations):
96
- if annotations["boxes"]:
97
- box = annotations["boxes"][0]
98
- return annotations["image"][
99
- box["ymin"]:box["ymax"],
100
- box["xmin"]:box["xmax"]
101
- ]
102
- return None
103
-
104
-
105
- def get_boxes_json(annotations):
106
- return annotations["boxes"]
107
-
108
-
109
- with gr.Blocks() as demo:
110
- with gr.Tab("Object annotation", id="tab_object_annotation"):
111
- annotator = image_annotator(
112
- example_annotation,
113
- label_list=["Person", "Vehicle"],
114
- label_colors=[(0, 255, 0), (255, 0, 0)],
115
- )
116
- button_get = gr.Button("Get bounding boxes")
117
- json_boxes = gr.JSON()
118
- button_get.click(get_boxes_json, annotator, json_boxes)
119
-
120
- with gr.Tab("Crop", id="tab_crop"):
121
- with gr.Row():
122
- annotator_crop = image_annotator(
123
- examples_crop[0],
124
- image_type="numpy",
125
- disable_edit_boxes=True,
126
- single_box=True,
127
- )
128
- image_crop = gr.Image()
129
- button_crop = gr.Button("Crop")
130
- button_crop.click(crop, annotator_crop, image_crop)
131
-
132
- gr.Examples(examples_crop, annotator_crop)
133
-
134
- if __name__ == "__main__":
135
- demo.launch()
136
-
137
- ```
138
- """, elem_classes=["md-custom"], header_links=True)
139
-
140
-
141
- gr.Markdown("""
142
- ## `image_annotator`
143
-
144
- ### Initialization
145
- """, elem_classes=["md-custom"], header_links=True)
146
-
147
- gr.ParamViewer(value=_docs["image_annotator"]["members"]["__init__"], linkify=[])
148
-
149
-
150
- gr.Markdown("### Events")
151
- gr.ParamViewer(value=_docs["image_annotator"]["events"], linkify=['Event'])
152
-
153
-
154
-
155
-
156
- gr.Markdown("""
157
 
158
- ### User function
159
 
160
- The impact on the users predict function varies depending on whether the component is used as an input or output for an event (or both).
 
161
 
162
- - When used as an Input, the component only impacts the input signature of the user function.
163
- - When used as an output, the component only impacts the return signature of the user function.
164
 
165
- The code snippet below is accurate in cases where the component is used as both an input and an output.
166
 
167
- - **As input:** Is passed, a dict with the image and boxes or None.
168
- - **As output:** Should return, a dict with an image and an optional list of boxes or None.
169
 
170
- ```python
171
- def predict(
172
- value: dict | None
173
- ) -> dict | None:
174
- return value
175
- ```
176
- """, elem_classes=["md-custom", "image_annotator-user-fn"], header_links=True)
177
 
178
 
179
 
 
3
  from app import demo as app
4
  import os
5
 
6
+ _docs = {'LayoutPainter': {'description': 'Creates a component to annotate images with bounding boxes. The bounding boxes can be created and edited by the user or be passed by code.\nIt is also possible to predefine a set of valid classes and colors.', 'members': {'__init__': {'value': {'type': 'dict | None', 'default': 'None', 'description': "A dict or None. The dictionary must contain a key 'image' with either an URL to an image, a numpy image or a PIL image. Optionally it may contain a key 'boxes' with a list of boxes. Each box must be a dict wit the keys: 'xmin', 'ymin', 'xmax' and 'ymax' with the absolute image coordinates of the box. Optionally can also include the keys 'label' and 'color' describing the label and color of the box. Color must be a tuple of RGB values (e.g. `(255,255,255)`)."}, 'boxes_alpha': {'type': 'float | None', 'default': 'None', 'description': 'Opacity of the bounding boxes 0 and 1.'}, 'label_list': {'type': 'list[str] | None', 'default': 'None', 'description': 'List of valid labels.'}, 'label_colors': {'type': 'list[str] | None', 'default': 'None', 'description': 'Optional list of colors for each label when `label_list` is used. Colors must be a tuple of RGB values (e.g. `(255,255,255)`).'}, 'box_min_size': {'type': 'int | None', 'default': 'None', 'description': 'Minimum valid bounding box size.'}, 'handle_size': {'type': 'int | None', 'default': 'None', 'description': 'Size of the bounding box resize handles.'}, 'box_thickness': {'type': 'int | None', 'default': 'None', 'description': 'Thickness of the bounding box outline.'}, 'box_selected_thickness': {'type': 'int | None', 'default': 'None', 'description': 'Thickness of the bounding box outline when it is selected.'}, 'disable_edit_boxes': {'type': 'bool | None', 'default': 'None', 'description': 'Disables the ability to set and edit the label and color of the boxes.'}, 'single_box': {'type': 'bool', 'default': 'False', 'description': 'If True, at most one box can be drawn.'}, 'height': {'type': 'int | str | None', 'default': 'None', 'description': 'The height of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.'}, 'width': {'type': 'int | str | None', 'default': 'None', 'description': 'The width of the displayed image, specified in pixels if a number is passed, or in CSS units if a string is passed.'}, 'image_mode': {'type': '"1"\n | "L"\n | "P"\n | "RGB"\n | "RGBA"\n | "CMYK"\n | "YCbCr"\n | "LAB"\n | "HSV"\n | "I"\n | "F"', 'default': '"RGB"', 'description': '"RGB" if color, or "L" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning.'}, 'sources': {'type': 'list["upload" | "webcam" | "clipboard"] | None', 'default': '["upload", "webcam", "clipboard"]', 'description': 'List of sources for the image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "clipboard" allows users to paste an image from the clipboard. If None, defaults to ["upload", "webcam", "clipboard"].'}, 'image_type': {'type': '"numpy" | "pil" | "filepath"', 'default': '"numpy"', 'description': 'The format the image is converted before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. If the image is SVG, the `type` is ignored and the filepath of the SVG is returned.'}, 'label': {'type': 'str | None', 'default': 'None', 'description': 'The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.'}, 'container': {'type': 'bool', 'default': 'True', 'description': 'If True, will place the component in a container - providing some extra padding around the border.'}, 'scale': {'type': 'int | None', 'default': 'None', 'description': 'relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.'}, 'min_width': {'type': 'int', 'default': '160', 'description': 'minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.'}, 'interactive': {'type': 'bool | None', 'default': 'True', 'description': 'if True, will allow users to upload and annotate an image; if False, can only be used to display annotated images.'}, 'visible': {'type': 'bool', 'default': 'True', 'description': 'If False, component will be hidden.'}, 'elem_id': {'type': 'str | None', 'default': 'None', 'description': 'An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'elem_classes': {'type': 'list[str] | str | None', 'default': 'None', 'description': 'An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.'}, 'render': {'type': 'bool', 'default': 'True', 'description': 'If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.'}, 'show_label': {'type': 'bool | None', 'default': 'None', 'description': 'if True, will display label.'}, 'show_download_button': {'type': 'bool', 'default': 'True', 'description': 'If True, will show a button to download the image.'}, 'show_share_button': {'type': 'bool | None', 'default': 'None', 'description': 'If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.'}, 'show_clear_button': {'type': 'bool | None', 'default': 'True', 'description': 'If True, will show a button to clear the current image.'}, 'show_remove_button': {'type': 'bool | None', 'default': 'None', 'description': 'If True, will show a button to remove the selected bounding box.'}, 'handles_cursor': {'type': 'bool | None', 'default': 'True', 'description': 'If True, the cursor will change when hovering over box handles in drag mode. Can be CPU-intensive.'}}, 'postprocess': {'value': {'type': 'dict | None', 'description': 'A dict with an image and an optional list of boxes or None.'}}, 'preprocess': {'return': {'type': 'dict | None', 'description': 'A dict with the image and boxes or None.'}, 'value': None}}, 'events': {'clear': {'type': None, 'default': None, 'description': 'This listener is triggered when the user clears the image_annotator using the clear button for the component.'}, 'change': {'type': None, 'default': None, 'description': 'Triggered when the value of the image_annotator changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input.'}, 'upload': {'type': None, 'default': None, 'description': 'This listener is triggered when the user uploads a file into the image_annotator.'}}}, '__meta__': {'additional_interfaces': {}, 'user_fn_refs': {'image_annotator': []}}}
7
 
8
  abs_path = os.path.join(os.path.dirname(__file__), "css.css")
9
 
 
16
  ],
17
  ),
18
  ) as demo:
19
+
 
 
 
 
 
 
 
 
 
20
  app.render()
21
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
23
 
24
+ # gr.Markdown("### Events")
25
+ # gr.ParamViewer(value=_docs["image_annotator"]["events"], linkify=['Event'])
26
 
 
 
27
 
 
28
 
 
 
29
 
30
+ # gr.Markdown("""
31
+ # """, elem_classes=["md-custom", "image_annotator-user-fn"], header_links=True)
 
 
 
 
 
32
 
33
 
34