geyik1 commited on
Commit
37549c0
·
verified ·
1 Parent(s): 010c290

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import os
4
+ import sys
5
+ from huggingface_hub import login
6
+ from diffusers import StableDiffusionXLPipeline
7
+ from PIL import Image
8
+
9
+ # Force CPU usage if needed
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ print(f"Using device: {device}")
12
+
13
+ # More details about the environment
14
+ print(f"Gradio version: {gr.__version__}")
15
+ print(f"Python version: {sys.version}")
16
+
17
+ # Hugging Face API token'ı - önce environment variable olarak ara,
18
+ # sonra Hugging Face Secrets sisteminde ara
19
+ hf_token = os.environ.get("HUGGINGFACE_TOKEN")
20
+ if hf_token:
21
+ print("Found HUGGINGFACE_TOKEN in environment variables")
22
+ # Token ile giriş yap
23
+ login(token=hf_token)
24
+ print("Logged in with Hugging Face token")
25
+ else:
26
+ print("HUGGINGFACE_TOKEN not found in environment variables")
27
+
28
+ # Global model variable
29
+ pipe = None
30
+
31
+ def load_model():
32
+ global pipe
33
+ try:
34
+ print("Loading SDXL pipeline with 3D render LoRA...")
35
+
36
+ # Load SDXL base model
37
+ pipe = StableDiffusionXLPipeline.from_pretrained(
38
+ "stabilityai/stable-diffusion-xl-base-1.0",
39
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
40
+ use_safetensors=True
41
+ ).to(device)
42
+
43
+ # Load the 3D render LoRA
44
+ pipe.load_lora_weights("goofyai/3d_render_style_xl")
45
+
46
+ print("Model loaded successfully!")
47
+ return True
48
+
49
+ except Exception as e:
50
+ print(f"Error loading model: {str(e)}")
51
+ return False
52
+
53
+ def generate_3d_icon(prompt):
54
+ global pipe
55
+ try:
56
+ print(f"Generating 3D icon with prompt: {prompt}")
57
+
58
+ if pipe is None:
59
+ print("Model not loaded, attempting to load...")
60
+ if not load_model():
61
+ raise Exception("Failed to load model")
62
+
63
+ # Enhance prompt for 3D game icon style
64
+ enhanced_prompt = f"3d style, 3d render, {prompt}, game icon, clean background, vibrant colors, high quality"
65
+
66
+ # Generate image
67
+ with torch.no_grad():
68
+ image = pipe(
69
+ enhanced_prompt,
70
+ num_inference_steps=30,
71
+ guidance_scale=7.5,
72
+ height=512,
73
+ width=512
74
+ ).images[0]
75
+
76
+ print("Image generated successfully")
77
+ return image
78
+
79
+ except Exception as e:
80
+ print(f"Error generating icon: {str(e)}")
81
+
82
+ # Return error image with red background
83
+ image = Image.new('RGB', (512, 512), color='red')
84
+ from PIL import ImageDraw
85
+ draw = ImageDraw.Draw(image)
86
+ draw.text((200, 250), "Generation Error", fill=(255, 255, 255))
87
+ return image
88
+
89
+ # Create Gradio interface
90
+ def create_interface():
91
+ # Try to load model at startup
92
+ load_model()
93
+
94
+ interface = gr.Interface(
95
+ fn=generate_3d_icon,
96
+ inputs=[
97
+ gr.Textbox(label="Prompt", placeholder="Describe your game icon", value="galatasaray")
98
+ ],
99
+ outputs=gr.Image(type="pil", label="Generated Game Icon"),
100
+ title="3D Game Icon Generator",
101
+ description="Generate 3D-style game icons using AI"
102
+ )
103
+
104
+ return interface
105
+
106
+ # Launch the interface
107
+ if __name__ == "__main__":
108
+ try:
109
+ interface = create_interface()
110
+ print("Launching interface...")
111
+ interface.launch(
112
+ share=False,
113
+ server_name="0.0.0.0",
114
+ server_port=7860,
115
+ show_error=True
116
+ )
117
+ except Exception as e:
118
+ print(f"Error launching interface: {str(e)}")