|
import gradio as gr |
|
import torch |
|
import os |
|
import sys |
|
from huggingface_hub import login |
|
import base64 |
|
import io |
|
from PIL import Image |
|
import requests |
|
import tempfile |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
|
|
print(f"Gradio version: {gr.__version__}") |
|
print(f"Python version: {sys.version}") |
|
|
|
|
|
|
|
hf_token = os.environ.get("HUGGINGFACE_TOKEN") |
|
if hf_token: |
|
print("Found HUGGINGFACE_TOKEN in environment variables") |
|
|
|
login(token=hf_token) |
|
print("Logged in with Hugging Face token") |
|
else: |
|
print("HUGGINGFACE_TOKEN not found in environment variables") |
|
|
|
|
|
|
|
|
|
def convert_webp_to_png(input_data): |
|
"""WebP formatındaki bir görseli PNG formatına dönüştürür""" |
|
try: |
|
|
|
img = None |
|
|
|
|
|
if isinstance(input_data, str) and (input_data.startswith('http://') or input_data.startswith('https://')): |
|
response = requests.get(input_data) |
|
img = Image.open(io.BytesIO(response.content)) |
|
|
|
|
|
elif isinstance(input_data, str) and input_data.startswith('data:'): |
|
format, imgstr = input_data.split(';base64,') |
|
img = Image.open(io.BytesIO(base64.b64decode(imgstr))) |
|
|
|
|
|
elif isinstance(input_data, bytes): |
|
img = Image.open(io.BytesIO(input_data)) |
|
|
|
|
|
elif isinstance(input_data, str) and os.path.exists(input_data): |
|
img = Image.open(input_data) |
|
|
|
|
|
elif isinstance(input_data, Image.Image): |
|
img = input_data |
|
|
|
|
|
if img is None: |
|
print(f"Couldn't process image data: {type(input_data)}") |
|
return input_data |
|
|
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") |
|
temp_filename = temp_file.name |
|
temp_file.close() |
|
|
|
|
|
if img.mode == 'RGBA': |
|
img = img.convert('RGB') |
|
|
|
|
|
img.save(temp_filename, format="PNG") |
|
print(f"Converted image saved to {temp_filename}") |
|
|
|
return temp_filename |
|
except Exception as e: |
|
print(f"Error converting image: {str(e)}") |
|
return input_data |
|
|
|
def custom_handler(model_result): |
|
"""Model sonucunu işle ve PNG'ye dönüştür""" |
|
try: |
|
print(f"Processing model result: {type(model_result)}") |
|
|
|
|
|
if isinstance(model_result, list): |
|
if len(model_result) > 0: |
|
|
|
result_item = model_result[0] |
|
return convert_webp_to_png(result_item) |
|
|
|
|
|
return convert_webp_to_png(model_result) |
|
except Exception as e: |
|
print(f"Error in custom handler: {str(e)}") |
|
return model_result |
|
|
|
def load_model(): |
|
try: |
|
print("Setting up a custom interface...") |
|
|
|
|
|
def generate_3d_render(prompt): |
|
try: |
|
print(f"Processing prompt: {prompt}") |
|
|
|
import gradio.external as ext |
|
result = ext.call_space( |
|
name="goofyai/3d_render_style_xl", |
|
fn_index=0, |
|
inputs=[prompt] |
|
) |
|
print(f"Got result from API: {type(result)}") |
|
|
|
|
|
processed_result = custom_handler(result) |
|
return processed_result |
|
except Exception as e: |
|
print(f"Error in generation: {str(e)}") |
|
return None |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_3d_render, |
|
inputs=gr.Textbox(label="Input", placeholder="Enter a prompt for 3D rendering"), |
|
outputs=gr.Image(label="Output", type="filepath"), |
|
title="3D Render Style XL", |
|
description="Enter a prompt to generate a 3D render in game-icon style" |
|
) |
|
return interface |
|
except Exception as e: |
|
print(f"Error setting up interface: {str(e)}") |
|
return None |
|
|
|
|
|
try: |
|
interface = load_model() |
|
if interface: |
|
print("Interface set up successfully, launching...") |
|
interface.launch( |
|
share=False, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True |
|
) |
|
else: |
|
print("Failed to set up the interface") |
|
except Exception as e: |
|
print(f"Error launching interface: {str(e)}") |