from huggingface_hub import InferenceClient import os class LLMHandler: def __init__(self): self.client = InferenceClient( model="mistralai/Mistral-7B-Instruct-v0.3", # Updated to v0.3 token=os.getenv("HF_TOKEN") ) def get_deadline_suggestion(self, task_description): prompt = f"""You are a task management assistant. Analyze the task below and provide a realistic deadline suggestion. Task Description: "{task_description}" Follow this format: 1. **Estimated Hours**: [X] 2. **Recommended Deadline**: [YYYY-MM-DD HH:MM] 3. **Priority**: [High/Medium/Low] 4. **Notes**: [Brief explanation] Example: 1. **Estimated Hours**: 8 2. **Recommended Deadline**: 2024-04-10 18:00 3. **Priority**: High 4. **Notes**: Research papers typically take 5–7 days for 5000 words. Now analyze the task and return only the structured output.""" try: response = self.client.chat.completions.create( messages=[{"role": "user", "content": prompt}], max_tokens=500, temperature=0.3 ) return response.choices[0].message.content except Exception as e: return f"LLM Error: {str(e)}. Please check HF_TOKEN or try again later." # Singleton instance llm = LLMHandler()