import sys import gradio as gr import numpy as np from huggingface_hub import hf_hub_url, cached_download import PIL from PIL import Image import onnx import onnxruntime config_file_url = hf_hub_url("Jacopo/ToonClip", filename="model.onnx") model_file = cached_download(config_file_url) onnx_model = onnx.load(model_file) onnx.checker.check_model(onnx_model) opts = onnxruntime.SessionOptions() opts.intra_op_num_threads = 16 ort_session = onnxruntime.InferenceSession(model_file, sess_options=opts) input_name = ort_session.get_inputs()[0].name output_name = ort_session.get_outputs()[0].name def normalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)): # x = (x - mean) / std x = np.asarray(x, dtype=np.float32) if len(x.shape) == 4: for dim in range(3): x[:, dim, :, :] = (x[:, dim, :, :] - mean[dim]) / std[dim] if len(x.shape) == 3: for dim in range(3): x[dim, :, :] = (x[dim, :, :] - mean[dim]) / std[dim] return x def denormalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)): # x = (x * std) + mean x = np.asarray(x, dtype=np.float32) if len(x.shape) == 4: for dim in range(3): x[:, dim, :, :] = (x[:, dim, :, :] * std[dim]) + mean[dim] if len(x.shape) == 3: for dim in range(3): x[dim, :, :] = (x[dim, :, :] * std[dim]) + mean[dim] return x from PIL import Image def resize_image(image, size): width, height = image.size if max(width, height) > size: if width > height: ratio = width / size new_height = int(height / ratio) new_width = size else: ratio = height / size new_width = int(width / ratio) new_height = size resized_image = image.resize((new_width, new_height), Image.ANTIALIAS) return resized_image else: return image def nogan(input_img): input_img = resize_image(input_img, 1024) i = np.asarray(input_img) i = i.astype("float32") i = np.transpose(i, (2, 0, 1)) i = np.expand_dims(i, 0) i = i / 255.0 i = normalize(i, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ort_outs = ort_session.run([output_name], {input_name: i}) output = ort_outs output = output[0][0] output = denormalize(output, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) output = output * 255.0 output = output.astype('uint8') output = np.transpose(output, (1, 2, 0)) img = PIL.Image.fromarray(output, 'RGB') half = 0.2 output_image = img.resize( [int(half * s) for s in img.size] ) #output_image = output.thumbnail([sys.maxsize, 300], Image.ANTIALIAS) #output_image = img.resize((new_width, new_height), Image.ANTIALIAS) return output_image title = "Zoom, Clip, Toon" description = """Image to Toon Using AI""" article = """
The \"ToonClip\" model was trained by Jacopo Mangiavacchi and available at Github Repo ComicsHeroMobileUNet