Spaces:
Running
Running
File size: 2,818 Bytes
db0f499 8ed0852 f4f3763 63a8066 8ed0852 db0f499 6a0b78f f4f3763 6a0b78f 28a91e8 66d1627 4dfc57d 6a0b78f 66d1627 6a0b78f 09e7c03 6a0b78f 09e7c03 db0f499 8ed0852 f3db4f6 8ed0852 f4f3763 6a0b78f 8ed0852 6a0b78f f4f3763 e9043bf db0f499 6a0b78f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import gradio as gr
from transformers import pipeline
# Define model names
models = {
"ModernBERT Slop Classifier v1": "underscore2/modernbert_base_slop_classifier",
"ModernBERT Slop Classifier v2": "underscore2/modernbert_base_slop_classifier_v2",
"ModernBert Large Slop Classifier v3 (Best accuracy)": "underscore2/modernbert_large_slop_classifier_v3"
}
# Define the mapping for user-friendly labels
# Note: Transformers pipelines often output 'LABEL_0', 'LABEL_1'.
# We handle potential variations like just '0', '1'.
label_map = {
"LABEL_0": "Human (0)",
"0": "Human (0)",
"LABEL_1": "LLM (1)",
"1": "LLM (1)"
}
large_v3 = pipeline("text-classification", model="underscore2/modernbert_large_slop_classifier_v3", top_k=None)
# Function to load the selected model and classify text
def classify_text(model_name, text):
try:
if models[model_name] != "underscore2/modernbert_large_slop_classifier_v3":
classifier = pipeline("text-classification", model=models[model_name], top_k=None)
else:
classifier = large_v3
predictions = classifier(text)
# Process predictions to use friendly labels
processed_results = {}
if predictions and isinstance(predictions, list) and predictions[0]:
# predictions[0] should be a list of label dicts like [{'label': 'LABEL_1', 'score': 0.9...}, ...]
for pred in predictions[0]:
raw_label = pred["label"]
score = pred["score"]
# Use the map to get a friendly name, fallback to the raw label if not found
friendly_label = label_map.get(raw_label, raw_label)
processed_results[friendly_label] = score
return processed_results
except Exception as e:
# Handle potential errors during model loading or inference
print(f"Error: {e}")
# Return an error message suitable for gr.Label
return {"Error": f"Failed to process: {e}"}
# Create the Gradio interface
interface = gr.Interface(
fn=classify_text,
inputs=[
gr.Dropdown(
list(models.keys()),
label="Select Model",
value=list(models.keys())[2] # Default model
),
gr.Textbox(
lines=2,
placeholder="Enter text to classify", # Corrected placeholder
value="This is an example sentence." # Changed example text
)
],
# The gr.Label component works well for showing classification scores
outputs=gr.Label(num_top_classes=2), # Show both classes explicitly
title="ModernBERT Slop Classifier",
description="Enter a sentence to see the slop and confidence scores", # Updated description
)
# Launch the app
if __name__ == "__main__":
interface.launch()
|