|
import gradio as gr |
|
import torch |
|
import os |
|
import sys |
|
from huggingface_hub import login |
|
from diffusers import StableDiffusionXLPipeline |
|
from PIL import Image |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
|
|
print(f"Gradio version: {gr.__version__}") |
|
print(f"Python version: {sys.version}") |
|
|
|
|
|
|
|
hf_token = os.environ.get("HUGGINGFACE_TOKEN") |
|
if hf_token: |
|
print("Found HUGGINGFACE_TOKEN in environment variables") |
|
|
|
login(token=hf_token) |
|
print("Logged in with Hugging Face token") |
|
else: |
|
print("HUGGINGFACE_TOKEN not found in environment variables") |
|
|
|
|
|
pipe = None |
|
|
|
def load_model(): |
|
global pipe |
|
try: |
|
print("Loading SDXL pipeline with 3D render LoRA...") |
|
|
|
|
|
pipe = StableDiffusionXLPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-xl-base-1.0", |
|
torch_dtype=torch.float16 if device == "cuda" else torch.float32, |
|
use_safetensors=True |
|
).to(device) |
|
|
|
|
|
pipe.load_lora_weights("goofyai/3d_render_style_xl") |
|
|
|
print("Model loaded successfully!") |
|
return True |
|
|
|
except Exception as e: |
|
print(f"Error loading model: {str(e)}") |
|
return False |
|
|
|
def generate_3d_icon(prompt): |
|
global pipe |
|
try: |
|
print(f"Generating 3D icon with prompt: {prompt}") |
|
|
|
if pipe is None: |
|
print("Model not loaded, attempting to load...") |
|
if not load_model(): |
|
raise Exception("Failed to load model") |
|
|
|
|
|
enhanced_prompt = f"3d style, 3d render, {prompt}, game icon, clean background, vibrant colors, high quality" |
|
|
|
|
|
with torch.no_grad(): |
|
image = pipe( |
|
enhanced_prompt, |
|
num_inference_steps=30, |
|
guidance_scale=7.5, |
|
height=512, |
|
width=512 |
|
).images[0] |
|
|
|
print("Image generated successfully") |
|
return image |
|
|
|
except Exception as e: |
|
print(f"Error generating icon: {str(e)}") |
|
|
|
|
|
image = Image.new('RGB', (512, 512), color='red') |
|
from PIL import ImageDraw |
|
draw = ImageDraw.Draw(image) |
|
draw.text((200, 250), "Generation Error", fill=(255, 255, 255)) |
|
return image |
|
|
|
|
|
def create_interface(): |
|
|
|
load_model() |
|
|
|
interface = gr.Interface( |
|
fn=generate_3d_icon, |
|
inputs=[ |
|
gr.Textbox(label="Prompt", placeholder="Describe your game icon", value="galatasaray") |
|
], |
|
outputs=gr.Image(type="pil", label="Generated Game Icon"), |
|
title="3D Game Icon Generator", |
|
description="Generate 3D-style game icons using AI" |
|
) |
|
|
|
return interface |
|
|
|
|
|
if __name__ == "__main__": |
|
try: |
|
interface = create_interface() |
|
print("Launching interface...") |
|
interface.launch( |
|
share=False, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True |
|
) |
|
except Exception as e: |
|
print(f"Error launching interface: {str(e)}") |