Spaces:
Sleeping
Sleeping
feat: added some required features
Browse files- app.py +4 -1
- features/image_classifier/__init__.py +0 -0
- features/image_classifier/controller.py +0 -0
- features/image_classifier/inferencer.py +0 -0
- features/image_classifier/model_loader.py +0 -0
- features/image_classifier/preprocess.py +9 -0
- features/image_classifier/routes.py +21 -0
- features/nepali_text_classifier/model_loader.py +1 -1
- features/text_classifier/model_loader.py +1 -1
- models/.gitattributes +35 -0
app.py
CHANGED
|
@@ -6,7 +6,9 @@ from slowapi.util import get_remote_address
|
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
| 8 |
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
|
|
|
| 9 |
from config import ACCESS_RATE
|
|
|
|
| 10 |
import requests
|
| 11 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
| 12 |
|
|
@@ -27,11 +29,12 @@ app.add_middleware(SlowAPIMiddleware)
|
|
| 27 |
# Include your routes
|
| 28 |
app.include_router(text_classifier_router, prefix="/text")
|
| 29 |
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
|
|
|
| 30 |
@app.get("/")
|
| 31 |
@limiter.limit(ACCESS_RATE)
|
| 32 |
async def root(request: Request):
|
| 33 |
return {
|
| 34 |
"message": "API is working",
|
| 35 |
-
"endpoints": ["/text/analyse", "/text/upload", "/text/analyse-sentences", "/text/analyse-sentance-file"]
|
| 36 |
}
|
| 37 |
|
|
|
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
from features.text_classifier.routes import router as text_classifier_router
|
| 8 |
from features.nepali_text_classifier.routes import router as nepali_text_classifier_router
|
| 9 |
+
from features.image_classifier.routes import router as image_classifier_router
|
| 10 |
from config import ACCESS_RATE
|
| 11 |
+
|
| 12 |
import requests
|
| 13 |
limiter = Limiter(key_func=get_remote_address, default_limits=[ACCESS_RATE])
|
| 14 |
|
|
|
|
| 29 |
# Include your routes
|
| 30 |
app.include_router(text_classifier_router, prefix="/text")
|
| 31 |
app.include_router(nepali_text_classifier_router,prefix="/NP")
|
| 32 |
+
app.include_router(image_classifier_router,prefix="/AI-image")
|
| 33 |
@app.get("/")
|
| 34 |
@limiter.limit(ACCESS_RATE)
|
| 35 |
async def root(request: Request):
|
| 36 |
return {
|
| 37 |
"message": "API is working",
|
| 38 |
+
"endpoints": ["/text/analyse", "/text/upload", "/text/analyse-sentences", "/text/analyse-sentance-file","/NP/analyse","/NP/upload","/NP/analyse-sentences","/NP/file-sentences-analyse","/AI-image/analyse"]
|
| 39 |
}
|
| 40 |
|
features/image_classifier/__init__.py
ADDED
|
File without changes
|
features/image_classifier/controller.py
ADDED
|
File without changes
|
features/image_classifier/inferencer.py
ADDED
|
File without changes
|
features/image_classifier/model_loader.py
ADDED
|
File without changes
|
features/image_classifier/preprocess.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
def image_preprocessing(img_path):
|
| 4 |
+
img =cv2.imread(img_path)
|
| 5 |
+
img = cv2.resize(img,(128,128))
|
| 6 |
+
img= cv2.cvtColor(img,cv2.COLOR_BayerGR2RGB)
|
| 7 |
+
img = img/255.0
|
| 8 |
+
img = np.expand_dims(img,axis=0)
|
| 9 |
+
return img
|
features/image_classifier/routes.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from slowapi import Limiter
|
| 2 |
+
from config import ACCESS_RATE
|
| 3 |
+
from fastapi import APIRouter, File, Request, Depends, HTTPException, UploadFile
|
| 4 |
+
from fastapi.security import HTTPBearer
|
| 5 |
+
from slowapi import Limiter
|
| 6 |
+
from slowapi.util import get_remote_address
|
| 7 |
+
router = APIRouter()
|
| 8 |
+
limiter = Limiter(key_func=get_remote_address)
|
| 9 |
+
security = HTTPBearer()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@router.post("/analyse")
|
| 13 |
+
@limiter.limit(ACCESS_RATE)
|
| 14 |
+
async def analyse(request: Request, file:UploadFile,token: str = Depends(security)):
|
| 15 |
+
return {"filename": file}
|
| 16 |
+
|
| 17 |
+
@router.get("/health")
|
| 18 |
+
@limiter.limit(ACCESS_RATE)
|
| 19 |
+
def health(request: Request):
|
| 20 |
+
return {"status": "ok"}
|
| 21 |
+
|
features/nepali_text_classifier/model_loader.py
CHANGED
|
@@ -8,7 +8,7 @@ from huggingface_hub import snapshot_download
|
|
| 8 |
from transformers import AutoTokenizer, AutoModel
|
| 9 |
|
| 10 |
# Configs
|
| 11 |
-
REPO_ID = "
|
| 12 |
BASE_DIR = "./np_text_model"
|
| 13 |
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
| 14 |
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
|
|
|
| 8 |
from transformers import AutoTokenizer, AutoModel
|
| 9 |
|
| 10 |
# Configs
|
| 11 |
+
REPO_ID = "can-org/Nepali-AI-VS-HUMAN"
|
| 12 |
BASE_DIR = "./np_text_model"
|
| 13 |
TOKENIZER_DIR = os.path.join(BASE_DIR, "classifier") # <- update this to match your uploaded folder
|
| 14 |
WEIGHTS_PATH = os.path.join(BASE_DIR, "model_95_acc.pth") # <- change to match actual uploaded weight
|
features/text_classifier/model_loader.py
CHANGED
|
@@ -6,7 +6,7 @@ from huggingface_hub import snapshot_download
|
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
load_dotenv()
|
| 9 |
-
REPO_ID = "
|
| 10 |
MODEL_DIR = "./models"
|
| 11 |
TOKENIZER_DIR = os.path.join(MODEL_DIR, "model")
|
| 12 |
WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|
|
|
|
| 6 |
import torch
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
load_dotenv()
|
| 9 |
+
REPO_ID = "can-org/AI-Content-Checker"
|
| 10 |
MODEL_DIR = "./models"
|
| 11 |
TOKENIZER_DIR = os.path.join(MODEL_DIR, "model")
|
| 12 |
WEIGHTS_PATH = os.path.join(MODEL_DIR, "model_weights.pth")
|
models/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|