Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoImageProcessor, | |
AutoModelForDepthEstimation | |
import numpy as np | |
from PIL import Image | |
import cv2 | |
# デバイス設定 | |
device = "cpu" | |
print(f"Using device: {device}") | |
# モデル読み込み | |
model_name = "depth-anything/Depth-Anything-V2-Small-hf" | |
processor = AutoImageProcessor.from_pretrained(model_name) | |
model = AutoModelForDepthEstimation.from_pretrained(model_name) | |
model.to(device) | |
model.eval() | |
print("Model loaded successfully") | |
def predict_depth(image): | |
"""深度推定関数""" | |
if image is None: | |
return None, None | |
try: | |
# 画像処理 | |
if hasattr(image, 'convert'): | |
image = image.convert('RGB') | |
# サイズ調整 | |
max_size = 256 | |
if max(image.size) > max_size: | |
ratio = max_size / max(image.size) | |
new_size = tuple(int(dim * ratio) for dim in image.size) | |
image = image.resize(new_size, Image.Resampling.LANCZOS) | |
# 深度推定 | |
inputs = processor(images=image, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
depth = outputs.predicted_depth.squeeze().cpu().numpy() | |
# 可視化 | |
depth_norm = ((depth - depth.min()) / (depth.max() - | |
depth.min()) * 255).astype(np.uint8) | |
depth_colored = cv2.applyColorMap(depth_norm, | |
cv2.COLORMAP_VIRIDIS) | |
depth_colored = cv2.cvtColor(depth_colored, cv2.COLOR_BGR2RGB) | |
depth_image = Image.fromarray(depth_colored) | |
return image, depth_image | |
except Exception as e: | |
print(f"Error: {e}") | |
return image, None | |
# Gradioインターフェース | |
demo = gr.Interface( | |
fn=predict_depth, | |
inputs=gr.Image(type="pil"), | |
outputs=[ | |
gr.Image(type="pil", label="Original"), | |
gr.Image(type="pil", label="Depth Map") | |
], | |
title="Depth Estimation API", | |
description="DepthAnything V2による深度推定" | |
) | |
if __name__ == "__main__": | |
demo.launch() |