naso / src /services /huggingface.js
mgbam's picture
Create src/services/huggingface.js
414512a verified
// This service calls the Hugging Face Inference API.
// It's best for models fine-tuned for JSON output, like Llama 3.
export const callHuggingFaceAPI = async (prompt, apiKey, maxTokens = 2000) => {
if (!apiKey) {
throw new Error("Hugging Face API Key is required.");
}
// Recommended model: Meta's Llama 3 is excellent at following instructions.
const API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct";
const headers = {
"Authorization": `Bearer ${apiKey}`,
"Content-Type": "application/json",
};
const payload = {
inputs: prompt,
parameters: {
max_new_tokens: maxTokens,
return_full_text: false, // Important: only return the generated text
temperature: 0.6, // A bit of creativity but still factual
top_p: 0.9,
}
};
try {
const response = await fetch(API_URL, {
method: "POST",
headers: headers,
body: JSON.stringify(payload),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`Hugging Face API request failed: ${response.status} - ${errorText}`);
}
const data = await response.json();
// The response is an array, we take the first element's generated text.
if (data && data[0] && data[0].generated_text) {
return data[0].generated_text;
} else {
throw new Error("Invalid response structure from Hugging Face API.");
}
} catch (error) {
console.error("Hugging Face API Error:", error);
throw error;
}
};