|
import gradio as gr
|
|
import torch
|
|
from PIL import Image as PILImage
|
|
from transformers import AutoImageProcessor, SiglipForImageClassification
|
|
|
|
|
|
MODEL_IDENTIFIER = r"Ateeqq/ai-vs-human-image-detector"
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
print(f"Loading model: {MODEL_IDENTIFIER} on device: {device}")
|
|
processor = AutoImageProcessor.from_pretrained(MODEL_IDENTIFIER)
|
|
model = SiglipForImageClassification.from_pretrained(MODEL_IDENTIFIER)
|
|
model.to(device)
|
|
model.eval()
|
|
print("Model loaded successfully.")
|
|
|
|
|
|
def predict(image):
|
|
"""
|
|
Takes a PIL image, preprocesses it, and returns the prediction probabilities.
|
|
"""
|
|
if image is None:
|
|
return None
|
|
|
|
|
|
inputs = processor(images=image, return_tensors="pt").to(device)
|
|
|
|
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
logits = outputs.logits
|
|
|
|
|
|
probabilities = torch.softmax(logits, dim=-1)[0]
|
|
|
|
|
|
confidences = {model.config.id2label[i]: score.item() for i, score in enumerate(probabilities)}
|
|
return confidences
|
|
|
|
|
|
|
|
image_input = gr.Image(type="pil", label="Upload an Image")
|
|
label_output = gr.Label(num_top_classes=2, label="Prediction")
|
|
|
|
|
|
title = "AI vs Human Image Detector"
|
|
description = """
|
|
This Space uses the `Ateeqq/ai-vs-human-image-detector` model to classify an image as either AI-generated or Human-made.
|
|
Upload an image to see the prediction.
|
|
"""
|
|
article = "Model by [Ateeqq](https://huggingface.co/Ateeqq) | Gradio app created with AI"
|
|
|
|
|
|
gr.Interface(
|
|
fn=predict,
|
|
inputs=image_input,
|
|
outputs=label_output,
|
|
title=title,
|
|
description=description,
|
|
article=article
|
|
).launch(share=True, server_name="0.0.0.0") |