File size: 949 Bytes
4f8d413
 
 
01f5c07
 
 
 
 
 
4f8d413
 
01f5c07
4f8d413
 
01f5c07
 
 
 
 
 
 
 
 
 
 
f01ed26
4f8d413
01f5c07
 
 
 
4f8d413
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import os
from huggingface_hub import InferenceClient

# Initialize client without provider (Hugging Face handles routing)
client = InferenceClient(
    model="Qwen/Qwen2.5-7B-Instruct",
    token=os.environ.get("HF_TOKEN")  # Make sure HF_TOKEN is set in Secrets
)

def analyze_data(prompt):
    """
    Use chat completions API to generate insights from raw search data
    """
    try:
        # Format prompt as a chat message
        messages = [
            {
                "role": "user", 
                "content": prompt
            }
        ]
        
        # Get response from LLM
        completion = client.chat.completions.create(
            messages=messages,
            max_tokens=4096  # Control response length
        )
        
        # Return only the content part of the response
        return completion.choices[0].message.content
        
    except Exception as e:
        return f"LLM generation failed: {str(e)}"