File size: 5,305 Bytes
ec0dbf0 16f9a51 ec0dbf0 26ae524 098613c ec0dbf0 16f9a51 27e15cf 16f9a51 b99b09d 16f9a51 8a91ef2 0ce6512 16f9a51 0ce6512 16f9a51 8a91ef2 26ae524 24cb82a 16f9a51 24cb82a 16f9a51 24cb82a 16f9a51 223546c 24cb82a 16f9a51 223546c 24cb82a 16f9a51 8621860 ec0dbf0 16f9a51 223546c ec0dbf0 16f9a51 8621860 ec0dbf0 16f9a51 ec0dbf0 58f5984 16f9a51 24cb82a 58f5984 9436ade c42270d ffb800d c42270d ffb800d ec0dbf0 098613c 4e7b811 7c01b96 098613c 7c01b96 ec0dbf0 7c01b96 098613c 27e15cf 098613c ffb800d 098613c ffb800d 098613c ffb800d 27e15cf 098613c 4e7b811 73f8dbc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
import cv2
import numpy as np
from PIL import Image, ImageEnhance
from gradio_imageslider import ImageSlider
def apply_filter(image, filter_type, intensity):
image = np.array(image)
normalized_intensity = intensity / 100.0
if filter_type == "Grayscale":
return convert_to_grayscale(image)
elif filter_type == "Soft Glow":
base_intensity = 0.1
adjusted_intensity = base_intensity + (normalized_intensity * (1 - base_intensity))
gaussian = cv2.GaussianBlur(image, (15, 15), 0)
soft_glow = cv2.addWeighted(image, 1 - adjusted_intensity, gaussian, adjusted_intensity, 0)
return soft_glow
elif filter_type == "Portrait Enhancer":
base_intensity = 0.5
adjusted_intensity = base_intensity + (normalized_intensity * (1 - base_intensity))
image_pil = Image.fromarray(image)
enhancer = ImageEnhance.Sharpness(image_pil)
image_pil = enhancer.enhance(1 + 0.5 * adjusted_intensity)
enhancer = ImageEnhance.Color(image_pil)
image_pil = enhancer.enhance(1 + 0.5 * adjusted_intensity)
enhanced_image = np.array(image_pil)
return enhanced_image
elif filter_type == "Warm Tone":
warm_image = cv2.addWeighted(image, 1.0, np.full(image.shape, (20, 66, 112), dtype=np.uint8), 0.3 * normalized_intensity, 0)
return warm_image
elif filter_type == "Cold Tone":
cold_image = cv2.addWeighted(image, 1.0, np.full(image.shape, (112, 66, 20), dtype=np.uint8), 0.3 * normalized_intensity, 0)
return cold_image
elif filter_type == "High-Key":
high_key = cv2.convertScaleAbs(image, alpha=1.0 + 0.3 * normalized_intensity, beta=20)
return high_key
elif filter_type == "Low-Key":
low_key = cv2.convertScaleAbs(image, alpha=1.0 - 0.1 * normalized_intensity, beta=-10)
return low_key
elif filter_type == "Haze":
haze = cv2.addWeighted(image, 1.0, np.full(image.shape, 255, dtype=np.uint8), 0.3 * normalized_intensity, 0)
return haze
else:
return image
def convert_to_grayscale(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
def convert_and_save(image, filter_type, intensity):
image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
filtered_image = apply_filter(image_cv, filter_type, intensity)
filtered_image_pil = Image.fromarray(cv2.cvtColor(filtered_image, cv2.COLOR_BGR2RGB))
output_path = "filtered_image.jpg"
filtered_image_pil.save(output_path)
return filtered_image_pil, output_path
def update_image(image, filter_type, intensity):
filtered_image = apply_filter(np.array(image), filter_type, intensity)
filtered_image_pil = Image.fromarray(cv2.cvtColor(filtered_image, cv2.COLOR_BGR2RGB))
return [image, filtered_image_pil] # ์๋ณธ๊ณผ ํํฐ ์ ์ฉ ์ด๋ฏธ์ง๋ฅผ ์ฌ๋ผ์ด๋์ ํ์
def get_filter_description(filter_type):
descriptions = {
"Grayscale": "์ด๋ฏธ์ง๋ฅผ ํ๋ฐฑ์ผ๋ก ๋ณํํฉ๋๋ค.",
"Soft Glow": "๋ถ๋๋ฌ์ด ๋น์ ์ถ๊ฐํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ํ๊ฒ ๋ง๋ญ๋๋ค.",
"Portrait Enhancer": "ํผ๋ถ ํค์ ๊ท ์ผํ๊ฒ ํ๊ณ ์ ๋ช
๋๋ฅผ ์กฐ์ ํ์ฌ ์ธ๋ฌผ์ ๋์ฑ ๋๋ณด์ด๊ฒ ๋ง๋ญ๋๋ค.",
"Warm Tone": "๋ฐ๋ปํ ์์กฐ๋ฅผ ์ถ๊ฐํ์ฌ ์ด๋ฏธ์ง์ ์จ๊ธฐ๋ฅผ ๋ํฉ๋๋ค.",
"Cold Tone": "์ฐจ๊ฐ์ด ์์กฐ๋ฅผ ์ถ๊ฐํ์ฌ ์ด๋ฏธ์ง์ ์์ํจ์ ๋ํฉ๋๋ค.",
"High-Key": "๋ฐ๊ณ ํ์ฌํ ์ด๋ฏธ์ง๋ฅผ ๋ง๋ค์ด๋
๋๋ค.",
"Low-Key": "์ด๋์ด ํค์ ๊ฐ์กฐํ์ฌ ๋ถ์๊ธฐ ์๋ ์ด๋ฏธ์ง๋ฅผ ๋ง๋ญ๋๋ค.",
"Haze": "๋ถ๋๋ฝ๊ณ ํ๋ฆฟํ ํจ๊ณผ๋ฅผ ์ถ๊ฐํ์ฌ ๋ชฝํ์ ์ธ ์ด๋ฏธ์ง๋ฅผ ๋ง๋ญ๋๋ค."
}
return descriptions.get(filter_type, "")
with gr.Blocks() as iface:
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="์ด๋ฏธ์ง ์
๋ก๋")
filter_input = gr.Radio(
["Grayscale", "Soft Glow", "Portrait Enhancer", "Warm Tone", "Cold Tone", "High-Key", "Low-Key", "Haze"],
label="ํํฐ ์ ํ",
value="Soft Glow"
)
intensity_slider = gr.Slider(1, 100, value=50, label="ํํฐ ๊ฐ๋")
description_output = gr.Markdown(get_filter_description("Soft Glow"))
with gr.Column():
image_slider = ImageSlider(label="Before and After", type="pil")
download_link = gr.File(label="Download Filtered Image")
filter_input.change(fn=get_filter_description, inputs=filter_input, outputs=description_output)
filter_input.change(fn=update_image, inputs=[image_input, filter_input, intensity_slider], outputs=image_slider)
intensity_slider.change(fn=update_image, inputs=[image_input, filter_input, intensity_slider], outputs=image_slider)
process_button = gr.Button("ํํฐ ์ ์ฉ")
process_button.click(
fn=convert_and_save,
inputs=[image_input, filter_input, intensity_slider],
outputs=[image_slider, download_link]
)
iface.title = "์ธ๋ฌผ ์ฌ์ง์ ์ต์ ํ๋ ํํฐ"
iface.description = "์ธ๋ฌผ ์ฌ์ง์ ์ต์ ํ๋ ๋ค์ํ ํํฐ๋ฅผ ์ ์ฉํ ์ ์์ต๋๋ค."
iface.launch() |