Ayan515 commited on
Commit
e2ab2df
·
verified ·
1 Parent(s): 20bd3b9

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +17 -0
  2. deepfake_api.py +136 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.9-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file into the container
8
+ COPY requirements.txt .
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Copy the rest of the application's code to the working directory
14
+ COPY . .
15
+
16
+ # Command to run the application
17
+ CMD ["uvicorn", "deepfake_api:app", "--host", "0.0.0.0", "--port", "7860"]
deepfake_api.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # === FASTAPI BACKEND (main.py) ===
2
+
3
+ from fastapi import FastAPI, UploadFile, File, HTTPException
4
+ from fastapi.responses import JSONResponse
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+ from transformers import pipeline
7
+ from PIL import Image
8
+ import io
9
+ import torch
10
+ import numpy as np
11
+ import cv2
12
+ import base64
13
+
14
+ app = FastAPI()
15
+
16
+ app.add_middleware(
17
+ CORSMiddleware,
18
+ allow_origins=["*"],
19
+ allow_credentials=True,
20
+ allow_methods=["*"],
21
+ allow_headers=["*"],
22
+ )
23
+
24
+ device = 0 if torch.cuda.is_available() else -1
25
+
26
+ MODELS_CONFIG = {
27
+ "SwinV2 Based": {"path": "haywoodsloan/ai-image-detector-deploy", "weight": 0.15},
28
+ "ViT Based": {"path": "Heem2/AI-vs-Real-Image-Detection", "weight": 0.15},
29
+ "SDXL Dataset": {"path": "Organika/sdxl-detector", "weight": 0.15},
30
+ "SDXL + FLUX": {"path": "cmckinle/sdxl-flux-detector_v1.1", "weight": 0.15},
31
+ "DeepFake v2": {"path": "prithivMLmods/Deep-Fake-Detector-v2-Model", "weight": 0.15},
32
+ "Midjourney/SDXL": {"path": "ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL", "weight": 0.10},
33
+ "ViT v4": {"path": "date3k2/vit-real-fake-classification-v4", "weight": 0.15},
34
+ }
35
+
36
+ models = {}
37
+ for name, config in MODELS_CONFIG.items():
38
+ try:
39
+ models[name] = pipeline("image-classification", model=config["path"], device=device)
40
+ except Exception as e:
41
+ print(f"Failed to load model {name}: {e}")
42
+
43
+ def pil_to_base64(image):
44
+ buffered = io.BytesIO()
45
+ image.save(buffered, format="JPEG")
46
+ return "data:image/jpeg;base64," + base64.b64encode(buffered.getvalue()).decode("utf-8")
47
+
48
+ def gen_ela(img_array, quality=90):
49
+ if img_array.shape[2] == 4:
50
+ img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2RGB)
51
+ encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
52
+ _, buffer = cv2.imencode('.jpg', img_array, encode_param)
53
+ compressed_img = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
54
+ ela_img = cv2.absdiff(img_array, compressed_img)
55
+ ela_img = cv2.convertScaleAbs(ela_img, alpha=10)
56
+ return Image.fromarray(cv2.cvtColor(ela_img, cv2.COLOR_BGR2RGB))
57
+
58
+ def gradient_processing(image_array):
59
+ gray_img = cv2.cvtColor(image_array, cv2.COLOR_BGR2GRAY)
60
+ dx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=3)
61
+ dy = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=3)
62
+ gradient_magnitude = cv2.magnitude(dx, dy)
63
+ gradient_img = cv2.normalize(gradient_magnitude, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
64
+ return Image.fromarray(gradient_img)
65
+
66
+ @app.post("/detect")
67
+ async def detect(image: UploadFile = File(...)):
68
+ try:
69
+ import time
70
+ start_time = time.time()
71
+
72
+ image_bytes = await image.read()
73
+ input_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
74
+
75
+ img_np = np.array(input_image)
76
+ img_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
77
+
78
+ individual_results = []
79
+ weighted_ai_score = 0
80
+ total_weight = 0
81
+
82
+ aiModels = []
83
+ colors = ["bg-red-500", "bg-orange-500", "bg-yellow-500", "bg-green-500", "bg-blue-500", "bg-purple-500", "bg-pink-500"]
84
+
85
+ for i, (name, model_pipeline) in enumerate(models.items()):
86
+ model_weight = MODELS_CONFIG[name]["weight"]
87
+ predictions = model_pipeline(input_image)
88
+ confidence = {p['label'].lower(): p['score'] for p in predictions}
89
+
90
+ artificial_score = (
91
+ confidence.get('artificial', 0) or confidence.get('ai image', 0) or
92
+ confidence.get('ai', 0) or confidence.get('deepfake', 0) or
93
+ confidence.get('ai_gen', 0) or confidence.get('fake', 0)
94
+ )
95
+ real_score = (
96
+ confidence.get('real', 0) or confidence.get('real image', 0) or
97
+ confidence.get('human', 0) or confidence.get('realism', 0)
98
+ )
99
+
100
+ if artificial_score > 0 and real_score == 0:
101
+ real_score = 1.0 - artificial_score
102
+ elif real_score > 0 and artificial_score == 0:
103
+ artificial_score = 1.0 - real_score
104
+
105
+ weighted_ai_score += artificial_score * model_weight
106
+ total_weight += model_weight
107
+
108
+ aiModels.append({
109
+ "name": name,
110
+ "percentage": round(artificial_score * 100, 2),
111
+ "color": colors[i % len(colors)]
112
+ })
113
+
114
+ final_score = (weighted_ai_score / total_weight) * 100 if total_weight > 0 else 0
115
+ verdict = final_score > 50
116
+ processing_time = int((time.time() - start_time) * 1000)
117
+
118
+ # Forensics
119
+ ela_img = gen_ela(img_bgr)
120
+ gradient_img = gradient_processing(img_bgr)
121
+
122
+ return JSONResponse({
123
+ "filename": image.filename,
124
+ "isDeepfake": verdict,
125
+ "confidence": round(final_score, 2),
126
+ "aiModels": aiModels,
127
+ "processingTime": processing_time,
128
+ "forensics": {
129
+ "original": pil_to_base64(input_image),
130
+ "ela": pil_to_base64(ela_img),
131
+ "gradient": pil_to_base64(gradient_img)
132
+ },
133
+ "verdictMessage": f"Consensus: {'Likely AI-Generated' if verdict else 'Likely Human-Made (Real)'}"
134
+ })
135
+ except Exception as e:
136
+ raise HTTPException(status_code=500, detail=str(e))
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ python-multipart
4
+ transformers
5
+ torch
6
+ Pillow
7
+ numpy
8
+ opencv-python-headless
9
+ python-base64