import gradio as gr import torch import os import sys # Force CPU usage if needed device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using device: {device}") # More details about the environment print(f"Gradio version: {gr.__version__}") print(f"Python version: {sys.version}") # Hugging Face token - çevre değişkeninden hf_token = os.environ.get("HUGGINGFACE_TOKEN") if hf_token: print("Found HUGGINGFACE_TOKEN in environment variables") else: print("HUGGINGFACE_TOKEN not found in environment variables") # Basit, orijinal kodlara en yakın versiyonu kullanarak modeli yükle try: # En basit haliyle Gradio modeli yükle print("Loading 3D render style model...") interface = gr.load("goofyai/3d_render_style_xl", src="spaces") # Arayüzü başlat print("Model loaded successfully, launching interface...") interface.launch( share=False, server_name="0.0.0.0", server_port=7860, show_error=True ) except Exception as e: print(f"Error loading or launching model: {str(e)}") # Hata durumunda basit bir yedek arayüz oluştur try: print("Creating a basic fallback interface...") def basic_render(prompt): return "Model yüklenemedi. Lütfen HuggingFace kimlik bilgilerinizi kontrol edin." backup_interface = gr.Interface( fn=basic_render, inputs=gr.Textbox(label="Input", placeholder="Enter a prompt for 3D rendering"), outputs=gr.Textbox(label="Output"), title="3D Render Style XL (Fallback Mode)", description="Model currently unavailable. Please check your HuggingFace credentials." ) backup_interface.launch( share=False, server_name="0.0.0.0", server_port=7860, show_error=True ) except Exception as e: print(f"Error creating fallback interface: {str(e)}")