|
import gradio as gr |
|
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
|
|
MODEL_NAME = "Devishetty100/redmoon-gibberishdetective" |
|
|
|
try: |
|
|
|
classifier = pipeline( |
|
"text-classification", |
|
model=MODEL_NAME |
|
) |
|
except: |
|
try: |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") |
|
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) |
|
classifier = pipeline( |
|
"text-classification", |
|
model=model, |
|
tokenizer=tokenizer, |
|
device=0 if torch.cuda.is_available() else -1 |
|
) |
|
except Exception as e: |
|
raise gr.Error(f"Failed to load model: {str(e)}\n\nPlease ensure all model files exist in the repository.") |
|
|
|
def predict(text): |
|
try: |
|
if not text.strip(): |
|
return {"Error": "Please enter some text"} |
|
|
|
result = classifier(text)[0] |
|
return { |
|
"Prediction": result['label'], |
|
"Confidence": f"{result['score']:.2%}" |
|
} |
|
except Exception as e: |
|
return {"Error": str(e)} |
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
gr.Markdown(""" |
|
# π Gibberish Detective |
|
*Using model: {MODEL_NAME}* |
|
""") |
|
|
|
with gr.Row(): |
|
input_text = gr.Textbox(label="Input Text", placeholder="Enter text here...", lines=3) |
|
output_json = gr.JSON(label="Results") |
|
|
|
gr.Examples( |
|
examples=[ |
|
["This is proper English"], |
|
["Xkjsd hfkjshdf 9834 kjsdhf!"], |
|
["Hello world"], |
|
["Hfjsd kjsadf lkjsdf 1234!"] |
|
], |
|
inputs=input_text |
|
) |
|
|
|
input_text.submit(predict, inputs=input_text, outputs=output_json) |
|
gr.Button("Analyze").click(predict, inputs=input_text, outputs=output_json) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |