|
import numpy as np |
|
from PIL import Image, ImageOps |
|
import logging |
|
|
|
class Image_Processor: |
|
def __init__(self): |
|
pass |
|
def is_image_white_by_percentage(self,image_path, white_threshold): |
|
image = image_path.convert('RGB') |
|
image_np = np.array(image) |
|
white_pixel = np.array([255, 255, 255]) |
|
white_pixels_count = np.sum(np.all(image_np == white_pixel, axis=-1)) |
|
total_pixels = image_np.shape[0] * image_np.shape[1] |
|
white_pixel_percentage = (white_pixels_count / total_pixels) * 100 |
|
return white_pixel_percentage > white_threshold |
|
|
|
def padding_white(self,image, output_size=(336, 336)): |
|
|
|
if image.mode != 'RGB': |
|
image = image.convert('RGB') |
|
new_image = ImageOps.pad(image, output_size, method=Image.Resampling.LANCZOS, color=(255, 255, 255)) |
|
return new_image |
|
|
|
def resize_image_with_aspect_ratio(self,img): |
|
target_size=336 |
|
width, height = img.size |
|
original_aspect_ratio = width / height |
|
if width > height: |
|
new_width = target_size |
|
new_height = int(target_size / original_aspect_ratio) |
|
else: |
|
new_height = target_size |
|
new_width = int(target_size * original_aspect_ratio) |
|
resized_img = img.resize((new_width, new_height)) |
|
return resized_img |
|
|
|
def get_processed_img(self,image): |
|
white_thresh = self.is_image_white_by_percentage(image,50) |
|
if white_thresh == True: |
|
resized_image = self.resize_image_with_aspect_ratio(image) |
|
final_image = self.padding_white(resized_image) |
|
logging.info('Resized and Padded Image') |
|
else: |
|
|
|
final_image = image.resize((336,336)) |
|
logging.info('Resized Image') |
|
|
|
final_image = final_image.convert('L') if final_image.mode != 'L' else final_image |
|
return final_image |