dnm3d / app.py
geyik1's picture
Upload app.py
c09783c verified
raw
history blame
5.77 kB
import gradio as gr
import torch
import os
import sys
from huggingface_hub import login
import base64
import io
from PIL import Image
import requests
import tempfile
# Force CPU usage if needed
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# More details about the environment
print(f"Gradio version: {gr.__version__}")
print(f"Python version: {sys.version}")
# Hugging Face API token'ı - önce environment variable olarak ara,
# sonra Hugging Face Secrets sisteminde ara
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
if hf_token:
print("Found HUGGINGFACE_TOKEN in environment variables")
# Token ile giriş yap
login(token=hf_token)
print("Logged in with Hugging Face token")
else:
print("HUGGINGFACE_TOKEN not found in environment variables")
# Hugging Face Spaces bu değişkeni otomatik olarak yükleyecek
# eğer Spaces UI üzerinden secret olarak eklediyseniz
def process_image(img_data):
"""Process image data to ensure it's in a valid format"""
try:
# If it's already a PIL Image
if isinstance(img_data, Image.Image):
return img_data
# If it's a URL
if isinstance(img_data, str) and (img_data.startswith('http://') or img_data.startswith('https://')):
response = requests.get(img_data)
return Image.open(io.BytesIO(response.content))
# If it's base64 encoded
if isinstance(img_data, str) and img_data.startswith('data:image'):
img_data = img_data.split(',')[1]
img_bytes = base64.b64decode(img_data)
return Image.open(io.BytesIO(img_bytes))
# If it's bytes
if isinstance(img_data, bytes):
return Image.open(io.BytesIO(img_data))
# If it's a numpy array
if hasattr(img_data, 'shape') and len(img_data.shape) >= 2:
return Image.fromarray(img_data)
# Default fallback
print(f"Unknown image format: {type(img_data)}")
return None
except Exception as e:
print(f"Error processing image: {str(e)}")
return None
def save_image(img, filename=None):
"""Save image to a temporary file and return the path"""
try:
if not filename:
temp_dir = tempfile.gettempdir()
filename = os.path.join(temp_dir, f"generated_image_{id(img)}.png")
img = process_image(img)
if img:
# Ensure the image is in RGB mode (not RGBA which can cause problems)
if img.mode == 'RGBA':
img = img.convert('RGB')
img.save(filename, format="PNG")
return filename
return None
except Exception as e:
print(f"Error saving image: {str(e)}")
return None
def generate_3d_render(prompt):
"""Generate a 3D render from the prompt"""
try:
# Attempt to use external API through Gradio
try:
print(f"Sending request to model with prompt: {prompt}")
# HF Spaces'te önceden tanımlanmış bir model arayüzümüz var,
# bu modeli doğrudan çağırıyoruz
import gradio.external as ext
result = ext.call_space(
name="goofyai/3d_render_style_xl",
fn_index=0, # Ana model fonksiyonu genellikle 0 indeksindedir
inputs=[prompt],
api_key=hf_token
)
# Sonuçları işle
if result and isinstance(result, list) and len(result) > 0:
print("Received response from model API")
# Görsel varsa işle
if hasattr(result[0], 'shape') or isinstance(result[0], (str, bytes, Image.Image)):
img = process_image(result[0])
if img:
# Görüntüyü PNG formatında kaydet (kaydedilmiş dosya yolunu döndürür)
saved_path = save_image(img)
if saved_path:
print(f"Image saved to {saved_path}")
return saved_path
return result[0] # İşlenemezse orijinal sonucu döndür
else:
print("Empty or invalid response from model API")
return None
except Exception as e:
print(f"Error calling external API: {str(e)}")
# Geri dönüş mekanizması - basit metin yanıtı
return f"Model API'sine erişilemiyor: {str(e)}"
except Exception as e:
print(f"Error in generate_3d_render: {str(e)}")
return f"Hata: {str(e)}"
def load_model():
try:
print("Setting up 3D render model interface...")
# Basit bir Gradio arayüzü oluştur
interface = gr.Interface(
fn=generate_3d_render,
inputs=gr.Textbox(label="Input", placeholder="Enter a prompt for 3D rendering"),
outputs=gr.Image(label="Output", type="filepath"),
title="3D Render Style XL",
description="Enter a prompt to generate a 3D render in game-icon style"
)
return interface
except Exception as e:
print(f"Error setting up interface: {str(e)}")
return None
# Create the interface
try:
interface = load_model()
if interface:
print("Interface set up successfully, launching...")
interface.launch(
share=False,
server_name="0.0.0.0",
server_port=7860,
show_error=True
)
else:
print("Failed to set up the interface")
except Exception as e:
print(f"Error launching interface: {str(e)}")