Spaces:
Running
Running
feat: added files , sentence wise text detector for np language
Browse files
.gitignore
CHANGED
@@ -59,3 +59,4 @@ model/
|
|
59 |
models/.gitattributes #<-- This line can stay if you only want to ignore that file, not the whole folder
|
60 |
|
61 |
todo.md
|
|
|
|
59 |
models/.gitattributes #<-- This line can stay if you only want to ignore that file, not the whole folder
|
60 |
|
61 |
todo.md
|
62 |
+
np_text_model
|
features/nepali_text_classifier/controller.py
CHANGED
@@ -1,12 +1,21 @@
|
|
1 |
import asyncio
|
2 |
-
from
|
|
|
3 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
4 |
import os
|
5 |
|
6 |
from features.nepali_text_classifier.inferencer import classify_text
|
|
|
|
|
7 |
|
8 |
security = HTTPBearer()
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
11 |
token = credentials.credentials
|
12 |
expected_token = os.getenv("MY_SECRET_TOKEN")
|
@@ -18,18 +27,104 @@ async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(secur
|
|
18 |
return token
|
19 |
|
20 |
async def nepali_text_analysis(text: str):
|
21 |
-
|
22 |
words = text.split()
|
23 |
if len(words) < 10:
|
24 |
raise HTTPException(status_code=400, detail="Text must contain at least 10 words")
|
25 |
if len(text) > 10000:
|
26 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def classify(text: str):
|
35 |
return classify_text(text)
|
|
|
1 |
import asyncio
|
2 |
+
from io import BytesIO
|
3 |
+
from fastapi import HTTPException, UploadFile, status, Depends
|
4 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
5 |
import os
|
6 |
|
7 |
from features.nepali_text_classifier.inferencer import classify_text
|
8 |
+
from features.nepali_text_classifier.preprocess import *
|
9 |
+
import re
|
10 |
|
11 |
security = HTTPBearer()
|
12 |
|
13 |
+
def contains_english(text: str) -> bool:
|
14 |
+
# Remove escape characters
|
15 |
+
cleaned = text.replace("\n", "").replace("\t", "")
|
16 |
+
return bool(re.search(r'[a-zA-Z]', cleaned))
|
17 |
+
|
18 |
+
|
19 |
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
20 |
token = credentials.credentials
|
21 |
expected_token = os.getenv("MY_SECRET_TOKEN")
|
|
|
27 |
return token
|
28 |
|
29 |
async def nepali_text_analysis(text: str):
|
30 |
+
end_symbol_for_NP_text(text)
|
31 |
words = text.split()
|
32 |
if len(words) < 10:
|
33 |
raise HTTPException(status_code=400, detail="Text must contain at least 10 words")
|
34 |
if len(text) > 10000:
|
35 |
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
36 |
|
37 |
+
result = await asyncio.to_thread(classify_text, text)
|
38 |
+
|
39 |
+
return result
|
40 |
+
|
41 |
+
|
42 |
+
#Extract text form uploaded files(.docx,.pdf,.txt)
|
43 |
+
async def extract_file_contents(file:UploadFile)-> str:
|
44 |
+
content = await file.read()
|
45 |
+
file_stream = BytesIO(content)
|
46 |
+
if file.content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
|
47 |
+
return parse_docx(file_stream)
|
48 |
+
elif file.content_type =="application/pdf":
|
49 |
+
return parse_pdf(file_stream)
|
50 |
+
elif file.content_type =="text/plain":
|
51 |
+
return parse_txt(file_stream)
|
52 |
+
else:
|
53 |
+
raise HTTPException(status_code=415,detail="Invalid file type. Only .docx,.pdf and .txt are allowed")
|
54 |
+
|
55 |
+
async def handle_file_upload(file: UploadFile):
|
56 |
+
try:
|
57 |
+
file_contents = await extract_file_contents(file)
|
58 |
+
end_symbol_for_NP_text(file_contents)
|
59 |
+
if len(file_contents) > 10000:
|
60 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
61 |
+
|
62 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
63 |
+
if not cleaned_text:
|
64 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
65 |
+
|
66 |
+
result = await asyncio.to_thread(classify_text, cleaned_text)
|
67 |
+
return result
|
68 |
+
except Exception as e:
|
69 |
+
logging.error(f"Error processing file: {e}")
|
70 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
async def handle_sentence_level_analysis(text: str):
|
75 |
+
text = text.strip()
|
76 |
+
if len(text) > 10000:
|
77 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
78 |
+
|
79 |
+
end_symbol_for_NP_text(text)
|
80 |
+
|
81 |
+
# Split text into sentences
|
82 |
+
sentences = [s.strip() + "।" for s in text.split("।") if s.strip()]
|
83 |
+
|
84 |
+
results = []
|
85 |
+
for sentence in sentences:
|
86 |
+
end_symbol_for_NP_text(sentence)
|
87 |
+
result = await asyncio.to_thread(classify_text, sentence)
|
88 |
+
results.append({
|
89 |
+
"text": sentence,
|
90 |
+
"result": result["label"],
|
91 |
+
"likelihood": result["confidence"]
|
92 |
+
})
|
93 |
+
|
94 |
+
return {"analysis": results}
|
95 |
+
|
96 |
+
|
97 |
+
async def handle_file_sentence(file:UploadFile):
|
98 |
+
try:
|
99 |
+
file_contents = await extract_file_contents(file)
|
100 |
+
if len(file_contents) > 10000:
|
101 |
+
raise HTTPException(status_code=413, detail="Text must be less than 10,000 characters")
|
102 |
+
|
103 |
+
cleaned_text = file_contents.replace("\n", " ").replace("\t", " ").strip()
|
104 |
+
if not cleaned_text:
|
105 |
+
raise HTTPException(status_code=404, detail="The file is empty or only contains whitespace.")
|
106 |
+
# Ensure text ends with danda so last sentence is included
|
107 |
+
|
108 |
+
# Split text into sentences
|
109 |
+
sentences = [s.strip() + "।" for s in cleaned_text.split("।") if s.strip()]
|
110 |
+
|
111 |
+
results = []
|
112 |
+
for sentence in sentences:
|
113 |
+
end_symbol_for_NP_text(sentence)
|
114 |
+
|
115 |
+
result = await asyncio.to_thread(classify_text, sentence)
|
116 |
+
results.append({
|
117 |
+
"text": sentence,
|
118 |
+
"result": result["label"],
|
119 |
+
"likelihood": result["confidence"]
|
120 |
+
})
|
121 |
+
|
122 |
+
return {"analysis": results}
|
123 |
+
|
124 |
+
except Exception as e:
|
125 |
+
logging.error(f"Error processing file: {e}")
|
126 |
+
raise HTTPException(status_code=500, detail="Error processing the file")
|
127 |
+
|
128 |
|
129 |
def classify(text: str):
|
130 |
return classify_text(text)
|
features/nepali_text_classifier/inferencer.py
CHANGED
@@ -19,3 +19,5 @@ def classify_text(text: str):
|
|
19 |
|
20 |
return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}
|
21 |
|
|
|
|
|
|
19 |
|
20 |
return {"label": "Human" if pred == 0 else "AI", "confidence": round(prob_percent, 2)}
|
21 |
|
22 |
+
|
23 |
+
|
features/nepali_text_classifier/preprocess.py
CHANGED
@@ -30,3 +30,9 @@ def parse_pdf(file: BytesIO):
|
|
30 |
def parse_txt(file: BytesIO):
|
31 |
return file.read().decode("utf-8")
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def parse_txt(file: BytesIO):
|
31 |
return file.read().decode("utf-8")
|
32 |
|
33 |
+
|
34 |
+
def end_symbol_for_NP_text(text):
|
35 |
+
if not text.endswith("।"):
|
36 |
+
text += "।"
|
37 |
+
|
38 |
+
|
features/nepali_text_classifier/routes.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
from slowapi import Limiter
|
2 |
from config import ACCESS_RATE
|
3 |
-
from .controller import nepali_text_analysis
|
4 |
from .inferencer import classify_text
|
5 |
-
from fastapi import APIRouter, Request, Depends, HTTPException
|
6 |
from fastapi.security import HTTPBearer
|
7 |
from slowapi import Limiter
|
8 |
from slowapi.util import get_remote_address
|
9 |
from pydantic import BaseModel
|
|
|
10 |
router = APIRouter()
|
11 |
limiter = Limiter(key_func=get_remote_address)
|
12 |
security = HTTPBearer()
|
@@ -18,10 +19,25 @@ class TextInput(BaseModel):
|
|
18 |
@router.post("/analyse")
|
19 |
@limiter.limit(ACCESS_RATE)
|
20 |
async def analyse(request: Request, data: TextInput, token: str = Depends(security)):
|
21 |
-
# Token is available as `token.credentials`, add validation if needed
|
22 |
result = classify_text(data.text)
|
23 |
return result
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
@router.get("/health")
|
26 |
@limiter.limit(ACCESS_RATE)
|
27 |
def health(request: Request):
|
|
|
1 |
from slowapi import Limiter
|
2 |
from config import ACCESS_RATE
|
3 |
+
from .controller import handle_file_sentence, handle_sentence_level_analysis, nepali_text_analysis
|
4 |
from .inferencer import classify_text
|
5 |
+
from fastapi import APIRouter, File, Request, Depends, HTTPException, UploadFile
|
6 |
from fastapi.security import HTTPBearer
|
7 |
from slowapi import Limiter
|
8 |
from slowapi.util import get_remote_address
|
9 |
from pydantic import BaseModel
|
10 |
+
from .controller import handle_file_upload
|
11 |
router = APIRouter()
|
12 |
limiter = Limiter(key_func=get_remote_address)
|
13 |
security = HTTPBearer()
|
|
|
19 |
@router.post("/analyse")
|
20 |
@limiter.limit(ACCESS_RATE)
|
21 |
async def analyse(request: Request, data: TextInput, token: str = Depends(security)):
|
|
|
22 |
result = classify_text(data.text)
|
23 |
return result
|
24 |
|
25 |
+
@router.post("/upload")
|
26 |
+
@limiter.limit(ACCESS_RATE)
|
27 |
+
async def upload_file(request:Request,file:UploadFile=File(...),token:str=Depends(security)):
|
28 |
+
return await handle_file_upload(file)
|
29 |
+
|
30 |
+
@router.post("/analyse-sentences")
|
31 |
+
@limiter.limit(ACCESS_RATE)
|
32 |
+
async def upload_file(request:Request,data:TextInput,token:str=Depends(security)):
|
33 |
+
return await handle_sentence_level_analysis(data.text)
|
34 |
+
|
35 |
+
@router.post("/file-sentences-analyse")
|
36 |
+
@limiter.limit(ACCESS_RATE)
|
37 |
+
async def analyze_sentance_file(request: Request, file: UploadFile = File(...), token: str = Depends(security)):
|
38 |
+
return await handle_file_sentence(file)
|
39 |
+
|
40 |
+
|
41 |
@router.get("/health")
|
42 |
@limiter.limit(ACCESS_RATE)
|
43 |
def health(request: Request):
|
features/text_classifier/controller.py
CHANGED
@@ -52,7 +52,7 @@ async def extract_file_contents(file: UploadFile) -> str:
|
|
52 |
else:
|
53 |
raise HTTPException(
|
54 |
status_code=415,
|
55 |
-
detail="Invalid file type. Only .docx, .pdf
|
56 |
)
|
57 |
|
58 |
# Classify text from uploaded file
|
|
|
52 |
else:
|
53 |
raise HTTPException(
|
54 |
status_code=415,
|
55 |
+
detail="Invalid file type. Only .docx, .pdf and .txt are allowed."
|
56 |
)
|
57 |
|
58 |
# Classify text from uploaded file
|