Spaces:
Sleeping
Sleeping
# handler.py | |
import requests | |
# Replace this with your actual model URL (must be PUBLIC!) | |
MODEL_URL = "https://api-inference.huggingface.co/models/CLASSIFIED-HEX/X" | |
def generate_text(prompt, max_tokens=250, temperature=0.7, top_p=0.95, top_k=50, repetition_penalty=1.2, trim_output=False): | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": max_tokens, | |
"temperature": temperature, | |
"top_p": top_p, | |
"top_k": top_k, | |
"repetition_penalty": repetition_penalty | |
} | |
} | |
try: | |
response = requests.post(MODEL_URL, json=payload) | |
if response.status_code != 200: | |
return f"❌ Error {response.status_code}: {response.text}" | |
result = response.json() | |
text = result[0].get("generated_text", "") if isinstance(result, list) else result.get("generated_text", "") | |
if trim_output and text.startswith(prompt): | |
text = text[len(prompt):].lstrip() | |
return text or "⚠️ No output returned." | |
except Exception as e: | |
return f"🔥 Generation failed: {str(e)}" | |