mie035
u
894cc56
import gradio as gr
import cv2
import math
import numpy as np
import os
import numpy as np
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
print("hello world")
# Height and width that will be used by the model
DESIRED_HEIGHT = 480
DESIRED_WIDTH = 480
# Performs resizing and showing the image
def resize_and_show(image):
h, w = image.shape[:2]
if h < w:
img = cv2.resize(image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH))))
else:
img = cv2.resize(image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT))
cv2.imshow('color', img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
def segmentate(filepath):
BG_COLOR = (192, 192, 192) # gray
MASK_COLOR = (255, 255, 255) # white
# Create the options that will be used for ImageSegmenter
base_options = python.BaseOptions(model_asset_path='./hair_segmenter.tflite')
options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
# Create the image segmenter
with vision.ImageSegmenter.create_from_options(options) as segmenter:
# Loop through demo image(s)
# Create the MediaPipe image file that will be segmented
image = mp.Image.create_from_file(filepath)
# Retrieve the masks for the segmented image
segmentation_result = segmenter.segment(image)
category_mask = segmentation_result.category_mask
# Generate solid color images for showing the output segmentation mask.
image_data = image.numpy_view()
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
fg_image[:] = MASK_COLOR
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
bg_image[:] = BG_COLOR
condition = np.stack((category_mask.numpy_view(),) * 3, axis=-1) > 0.2
output_image = np.where(condition, fg_image, bg_image)
# print(f'Segmentation mask of {name}:')
# resize_and_show(output_image)
return output_image
# GUI
title = 'mediapipe hair segmentation'
description = 'hair segmentation using mediapipe'
examples = [[f'examples/{name}', 3] for name in sorted(os.listdir('examples'))]
iface = gr.Interface(
fn=segmentate,
inputs=[
gr.Image(type='filepath', label='Input Image')
],
outputs=[
gr.Image(label='image segmentated')
],
examples=examples,
allow_flagging='never',
cache_examples=False,
title=title,
description=description
)
iface.launch()