|
|
|
import express from "express"; |
|
import path from "path"; |
|
import { fileURLToPath } from "url"; |
|
import dotenv from "dotenv"; |
|
import cookieParser from "cookie-parser"; |
|
import bodyParser from "body-parser"; |
|
import fs from "fs"; |
|
import { |
|
createRepo, |
|
uploadFiles, |
|
whoAmI, |
|
spaceInfo, |
|
fileExists, |
|
} from "@huggingface/hub"; |
|
import { InferenceClient } from "@huggingface/inference"; |
|
import OpenAI from "openai"; |
|
import { TextServiceClient } from "@google-ai/generativelanguage"; |
|
import { Readable } from "stream"; |
|
import { pipeline } from "@xenova/transformers"; |
|
|
|
import checkUser from "./middlewares/checkUser.js"; |
|
import { PROVIDERS } from "./utils/providers.js"; |
|
import { COLORS } from "./utils/colors.js"; |
|
|
|
|
|
dotenv.config(); |
|
|
|
const app = express(); |
|
const __filename = fileURLToPath(import.meta.url); |
|
const __dirname = path.dirname(__filename); |
|
const PORT = process.env.APP_PORT || 5173; |
|
const REDIRECT_URI = |
|
process.env.REDIRECT_URI || `http://localhost:${PORT}/auth/login`; |
|
const MODEL_ID = "deepseek-ai/DeepSeek-V3-0324"; |
|
const MAX_REQUESTS_PER_IP = 2; |
|
|
|
|
|
const invalidApiKeys = new Set(); |
|
|
|
|
|
const LOCAL_MODELS = { |
|
CHAT: "Xenova/distilgpt2", |
|
CODE: "Xenova/codegen-350M-mono", |
|
INSTRUCTION: "Xenova/gpt2-imdb", |
|
QA: "Xenova/distilbert-base-uncased-distilled-squad", |
|
SUMMARIZATION: "Xenova/distilbart-cnn-6-6", |
|
HTML: "Xenova/codegen-350M-mono", |
|
PORTUGUESE: "Xenova/gpt2-imdb", |
|
}; |
|
|
|
|
|
function selectModelForPrompt(prompt) { |
|
const promptLower = prompt.toLowerCase(); |
|
|
|
|
|
const portugueseWords = ['como', 'está', 'você', 'olá', 'por', 'que', 'para', 'quero', 'preciso', 'site', 'código', 'crie']; |
|
const isPortuguese = portugueseWords.some(word => promptLower.includes(word)); |
|
|
|
if (isPortuguese && promptLower.includes("html")) { |
|
return LOCAL_MODELS.HTML; |
|
} |
|
|
|
|
|
if (promptLower.includes("código") || |
|
promptLower.includes("code") || |
|
promptLower.includes("programa") || |
|
promptLower.includes("function") || |
|
promptLower.includes("javascript") || |
|
promptLower.includes("css")) { |
|
return LOCAL_MODELS.CODE; |
|
} |
|
|
|
if (promptLower.includes("html") || |
|
promptLower.includes("página") || |
|
promptLower.includes("site") || |
|
promptLower.includes("website") || |
|
promptLower.includes("web")) { |
|
return LOCAL_MODELS.HTML; |
|
} |
|
|
|
if (promptLower.includes("resumo") || |
|
promptLower.includes("resume") || |
|
promptLower.includes("sumarize")) { |
|
return LOCAL_MODELS.SUMMARIZATION; |
|
} |
|
|
|
if (promptLower.includes("?") || |
|
promptLower.startsWith("o que") || |
|
promptLower.startsWith("como") || |
|
promptLower.startsWith("por que") || |
|
promptLower.startsWith("when") || |
|
promptLower.startsWith("what") || |
|
promptLower.startsWith("how")) { |
|
return LOCAL_MODELS.QA; |
|
} |
|
|
|
|
|
if (promptLower.includes("crie") || |
|
promptLower.includes("faça") || |
|
promptLower.includes("implemente") || |
|
promptLower.includes("desenvolva") || |
|
promptLower.includes("gere")) { |
|
return LOCAL_MODELS.INSTRUCTION; |
|
} |
|
|
|
|
|
return isPortuguese ? LOCAL_MODELS.PORTUGUESE : LOCAL_MODELS.CHAT; |
|
} |
|
|
|
|
|
const modelCache = {}; |
|
|
|
|
|
function getGenerationConfig(modelType, prompt) { |
|
|
|
const config = { |
|
max_new_tokens: 512, |
|
temperature: 0.7, |
|
do_sample: true, |
|
top_k: 50, |
|
top_p: 0.95, |
|
}; |
|
|
|
|
|
switch (modelType) { |
|
case LOCAL_MODELS.CODE: |
|
case LOCAL_MODELS.HTML: |
|
|
|
return { |
|
...config, |
|
max_new_tokens: 1024, |
|
temperature: 0.4, |
|
top_p: 0.85, |
|
repetition_penalty: 1.2, |
|
}; |
|
|
|
case LOCAL_MODELS.QA: |
|
|
|
return { |
|
...config, |
|
max_new_tokens: 300, |
|
temperature: 0.3, |
|
top_k: 20, |
|
}; |
|
|
|
case LOCAL_MODELS.SUMMARIZATION: |
|
|
|
return { |
|
...config, |
|
max_new_tokens: 400, |
|
temperature: 0.6, |
|
no_repeat_ngram_size: 3, |
|
}; |
|
|
|
case LOCAL_MODELS.INSTRUCTION: |
|
|
|
return { |
|
...config, |
|
max_new_tokens: 800, |
|
temperature: 0.8, |
|
top_p: 0.92, |
|
}; |
|
|
|
default: |
|
|
|
return config; |
|
} |
|
} |
|
|
|
|
|
async function getModelPipeline(modelName) { |
|
if (!modelCache[modelName]) { |
|
console.log(`Carregando modelo local: ${modelName}`); |
|
modelCache[modelName] = await pipeline('text-generation', modelName); |
|
} |
|
return modelCache[modelName]; |
|
} |
|
|
|
app.use(cookieParser()); |
|
app.use(bodyParser.json()); |
|
app.use(express.static(path.join(__dirname, "dist"))); |
|
|
|
const ipAddresses = new Map(); |
|
|
|
const getPTag = (repoId) => |
|
`<p style="border-radius:8px;text-align:center;font-size:12px;color:#fff;margin-top:16px;position:fixed;left:8px;bottom:8px;z-index:10;background:rgba(0,0,0,0.8);padding:4px 8px;"> |
|
Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width:16px;height:16px;vertical-align:middle;margin-right:3px;filter:brightness(0) invert(1);"> |
|
<a href="https://enzostvs-deepsite.hf.space" target="_blank" style="color:#fff;text-decoration:underline;">DeepSite</a> - |
|
🧬 <a href="https://enzostvs-deepsite.hf.space?remix=${repoId}" target="_blank" style="color:#fff;text-decoration:underline;">Remix</a> |
|
</p>`; |
|
|
|
|
|
async function pipeStream(stream, res) { |
|
|
|
if (stream[Symbol.asyncIterator]) { |
|
for await (const chunk of stream) { |
|
res.write(typeof chunk === "string" ? chunk : chunk.toString()); |
|
} |
|
} |
|
|
|
else if (typeof stream.on === "function") { |
|
await new Promise((resolve, reject) => { |
|
stream.on("data", (c) => res.write(c)); |
|
stream.on("end", resolve); |
|
stream.on("error", reject); |
|
}); |
|
} else { |
|
throw new Error("Stream type not supported"); |
|
} |
|
} |
|
|
|
|
|
app.get("/api/login", (_req, res) => { |
|
const redirectUrl = `https://huggingface.co/oauth/authorize?client_id=${process.env.OAUTH_CLIENT_ID}&redirect_uri=${REDIRECT_URI}&response_type=code&scope=openid%20profile%20write-repos%20manage-repos%20inference-api&prompt=consent&state=1234567890`; |
|
res.send({ ok: true, redirectUrl }); |
|
}); |
|
|
|
app.get("/auth/login", async (req, res) => { |
|
const { code } = req.query; |
|
if (!code) return res.redirect(302, "/"); |
|
|
|
const Authorization = `Basic ${Buffer.from( |
|
`${process.env.OAUTH_CLIENT_ID}:${process.env.OAUTH_CLIENT_SECRET}` |
|
).toString("base64")}`; |
|
|
|
const tokenRes = await fetch("https://huggingface.co/oauth/token", { |
|
method: "POST", |
|
headers: { "Content-Type": "application/x-www-form-urlencoded", Authorization }, |
|
body: new URLSearchParams({ |
|
grant_type: "authorization_code", |
|
code, |
|
redirect_uri: REDIRECT_URI, |
|
}), |
|
}); |
|
const { access_token } = await tokenRes.json(); |
|
if (!access_token) return res.redirect(302, "/"); |
|
|
|
res.cookie("hf_token", access_token, { |
|
httpOnly: false, |
|
secure: true, |
|
sameSite: "none", |
|
maxAge: 30 * 24 * 60 * 60 * 1000, |
|
}); |
|
res.redirect(302, "/"); |
|
}); |
|
|
|
app.get("/auth/logout", (req, res) => { |
|
res.clearCookie("hf_token", { httpOnly: false, secure: true, sameSite: "none" }); |
|
res.redirect(302, "/"); |
|
}); |
|
|
|
app.get("/api/@me", checkUser, async (req, res) => { |
|
let { hf_token } = req.cookies; |
|
if (process.env.HF_TOKEN) { |
|
return res.send({ preferred_username: "local-use", isLocalUse: true }); |
|
} |
|
try { |
|
const userRes = await fetch("https://huggingface.co/oauth/userinfo", { |
|
headers: { Authorization: `Bearer ${hf_token}` }, |
|
}); |
|
const user = await userRes.json(); |
|
res.send(user); |
|
} catch (err) { |
|
res.clearCookie("hf_token", { httpOnly: false, secure: true, sameSite: "none" }); |
|
res.status(401).send({ ok: false, message: err.message }); |
|
} |
|
}); |
|
|
|
|
|
app.post("/api/deploy", checkUser, async (req, res) => { |
|
const { html, title, path: repoPath, prompts } = req.body; |
|
if (!html || (!repoPath && !title)) { |
|
return res.status(400).send({ ok: false, message: "Missing required fields" }); |
|
} |
|
|
|
let hf_token = req.cookies.hf_token; |
|
if (process.env.HF_TOKEN) hf_token = process.env.HF_TOKEN; |
|
|
|
try { |
|
const repo = { type: "space", name: repoPath || "" }; |
|
let readme, newHtml = html; |
|
|
|
if (!repoPath) { |
|
const { name: username } = await whoAmI({ accessToken: hf_token }); |
|
const slug = title |
|
.toLowerCase() |
|
.replace(/[^a-z0-9]+/g, "-") |
|
.split("-") |
|
.filter(Boolean) |
|
.join("-") |
|
.slice(0, 96); |
|
repo.name = `${username}/${slug}`; |
|
await createRepo({ repo, accessToken: hf_token }); |
|
|
|
const [cFrom, cTo] = [ |
|
COLORS[Math.floor(Math.random() * COLORS.length)], |
|
COLORS[Math.floor(Math.random() * COLORS.length)], |
|
]; |
|
readme = `--- |
|
title: ${slug} |
|
emoji: 🐳 |
|
colorFrom: ${cFrom} |
|
colorTo: ${cTo} |
|
sdk: static |
|
pinned: false |
|
tags: |
|
- deepsite |
|
--- |
|
See https://huggingface.co/docs/hub/spaces-config-reference`; |
|
} |
|
|
|
newHtml = html.replace(/<\/body>/, `${getPTag(repo.name)}</body>`); |
|
|
|
const htmlFile = new Blob([newHtml], { type: "text/html" }); |
|
htmlFile.name = "index.html"; |
|
const promptTxt = new Blob([prompts.join("\n")], { type: "text/plain" }); |
|
promptTxt.name = "prompts.txt"; |
|
|
|
const files = [htmlFile, promptTxt]; |
|
if (readme) { |
|
const md = new Blob([readme], { type: "text/markdown" }); |
|
md.name = "README.md"; |
|
files.push(md); |
|
} |
|
|
|
await uploadFiles({ repo, files, accessToken: hf_token }); |
|
res.send({ ok: true, path: repo.name }); |
|
} catch (err) { |
|
res.status(500).send({ ok: false, message: err.message }); |
|
} |
|
}); |
|
|
|
|
|
app.post("/api/agent/:agentId/message", async (req, res) => { |
|
const { agentId } = req.params; |
|
const { prompt, html, previousPrompt, provider, gemini_api_key, chatgpt_api_key, context } = req.body; |
|
|
|
res.setHeader("Content-Type", "text/plain; charset=utf-8"); |
|
res.setHeader("Cache-Control", "no-cache"); |
|
res.setHeader("Connection", "keep-alive"); |
|
|
|
const log = (msg) => { |
|
console.log(msg); |
|
res.write(`[LOG] ${msg}\n`); |
|
}; |
|
|
|
|
|
if (agentId === 'orchestrator') { |
|
|
|
log(`[Agent:${agentId}] Modelo selecionado: ${provider || 'auto'}`); |
|
|
|
|
|
if (gemini_api_key) log(`[Agent:${agentId}] Chave Gemini (primeiros 5 caracteres): ${gemini_api_key.substring(0, 5)}...`); |
|
if (chatgpt_api_key) log(`[Agent:${agentId}] Chave ChatGPT (primeiros 5 caracteres): ${chatgpt_api_key.substring(0, 5)}...`); |
|
if (req.body.hf_token) log(`[Agent:${agentId}] Token HF Body (primeiros 5 caracteres): ${req.body.hf_token.substring(0, 5)}...`); |
|
log(`[Agent:${agentId}] Orquestrador recebeu solicitação.`); |
|
log(`[Agent:${agentId}] prompt: ${prompt}`); |
|
|
|
try { |
|
|
|
|
|
const chatKey = chatgpt_api_key || process.env.CHATGPT_API_KEY; |
|
|
|
if (!chatKey) { |
|
res.write("❌ Orquestrador requer uma chave do ChatGPT para funcionar.\n"); |
|
return res.end(); |
|
} |
|
|
|
|
|
if (invalidApiKeys.has(chatKey)) { |
|
log(`[Agent:${agentId}] Chave ChatGPT inválida (quota excedida anteriormente)`); |
|
res.write("❌ A chave do ChatGPT não está válida (quota excedida). Por favor, use outra chave ou outro agente.\n"); |
|
return res.end(); |
|
} |
|
|
|
log(`[Agent:${agentId}] Criando plano de execução...`); |
|
|
|
const systemPrompt = `Você é um orquestrador de agentes de IA. Sua tarefa é: |
|
1. Analisar a solicitação do usuário |
|
2. Criar um plano de execução com passos específicos |
|
3. Detalhar quais agentes devem executar cada passo |
|
|
|
Responda APENAS no seguinte formato JSON: |
|
{ |
|
"plano": "Nome do plano", |
|
"descrição": "Descrição detalhada do que será feito", |
|
"passos": [ |
|
{ |
|
"id": 1, |
|
"descrição": "Descrição detalhada do passo", |
|
"agente": "ID do agente responsável", |
|
"prompt": "Instrução específica para este agente" |
|
} |
|
] |
|
} |
|
|
|
Os agentes disponíveis são: mike, alex, david`; |
|
|
|
|
|
try { |
|
|
|
const response = await fetch('https://api.openai.com/v1/chat/completions', { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/json', |
|
'Authorization': `Bearer ${chatKey}` |
|
}, |
|
body: JSON.stringify({ |
|
model: 'gpt-3.5-turbo', |
|
messages: [ |
|
{ role: "system", content: systemPrompt }, |
|
{ role: "user", content: prompt } |
|
], |
|
max_tokens: 1000 |
|
}) |
|
}); |
|
|
|
if (!response.ok) { |
|
const error = await response.text(); |
|
throw new Error(`OpenAI API error: ${error}`); |
|
} |
|
|
|
const completion = await response.json(); |
|
|
|
|
|
const planText = completion.choices[0].message.content; |
|
log(`[Agent:${agentId}] Plano criado.`); |
|
|
|
try { |
|
|
|
const plan = JSON.parse(planText); |
|
log(`[Agent:${agentId}] Plano estruturado: ${plan.plano}`); |
|
res.write(`# Plano: ${plan.plano}\n\n`); |
|
res.write(`${plan.descrição}\n\n`); |
|
res.write(`## Passos:\n`); |
|
|
|
plan.passos.forEach((step, index) => { |
|
res.write(`${index + 1}. ${step.descrição} (Agente: ${step.agente})\n`); |
|
}); |
|
|
|
res.write(`\nVou iniciar a execução do plano passo a passo.`); |
|
} catch (parseError) { |
|
|
|
log(`[Agent:${agentId}] Erro ao processar plano como JSON: ${parseError.message}`); |
|
res.write(planText); |
|
} |
|
} catch (apiError) { |
|
log(`[Agent:${agentId}] Erro na API: ${apiError.message}`); |
|
|
|
|
|
if (apiError.message && (apiError.message.includes("quota") || apiError.message.includes("insufficient_quota"))) { |
|
log(`[Agent:${agentId}] ChatGPT quota excedida, marcando chave como inválida`); |
|
invalidApiKeys.add(chatKey); |
|
res.write(`❌ Quota da API ChatGPT excedida. Por favor, use outra chave ou outro agente.\n`); |
|
} else { |
|
res.write(`❌ Erro ao consultar API: ${apiError.message}\n`); |
|
} |
|
} |
|
} catch (err) { |
|
log(`[Agent:${agentId}] Erro do orquestrador: ${err.message}`); |
|
res.write(`❌ Erro ao processar com o orquestrador: ${err.message}\n`); |
|
} |
|
|
|
return res.end(); |
|
} |
|
|
|
|
|
|
|
log(`[Agent:${agentId}] Modelo selecionado: ${provider || 'auto'}`); |
|
|
|
|
|
if (gemini_api_key) log(`[Agent:${agentId}] Chave Gemini (primeiros 5 caracteres): ${gemini_api_key.substring(0, 5)}...`); |
|
if (chatgpt_api_key) log(`[Agent:${agentId}] Chave ChatGPT (primeiros 5 caracteres): ${chatgpt_api_key.substring(0, 5)}...`); |
|
if (req.body.hf_token) log(`[Agent:${agentId}] Token HF Body (primeiros 5 caracteres): ${req.body.hf_token.substring(0, 5)}...`); |
|
log(`[Agent:${agentId}] received prompt.`); |
|
log(`[Agent:${agentId}] prompt: ${prompt}`); |
|
if (previousPrompt) log(`[Agent:${agentId}] previousPrompt: ${previousPrompt}`); |
|
if (html) log(`[Agent:${agentId}] html length: ${html.length}`); |
|
if (context) log(`[Agent:${agentId}] contexto recebido: ${context.substring(0, 100)}...`); |
|
|
|
|
|
const modelNames = ["Gemini", "ChatGPT", "Hugging Face (DeepSeek)"]; |
|
|
|
|
|
const handlers = []; |
|
|
|
|
|
const gemKey = gemini_api_key || process.env.GEMINI_API_KEY; |
|
if (gemKey) { |
|
log(`[Agent:${agentId}] handler Gemini enabled.`); |
|
handlers.push({ |
|
name: "Gemini", |
|
handler: async (p) => { |
|
log(`[Agent:${agentId}] calling Google Gemini...`); |
|
|
|
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=${gemKey}`, { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/json' |
|
}, |
|
body: JSON.stringify({ |
|
contents: [ |
|
{ |
|
parts: [ |
|
{ text: p } |
|
] |
|
} |
|
], |
|
generationConfig: { |
|
temperature: 0.7, |
|
maxOutputTokens: 1024, |
|
} |
|
}) |
|
}); |
|
|
|
if (!response.ok) { |
|
const error = await response.json(); |
|
log(`[Agent:${agentId}] Gemini error: ${JSON.stringify(error)}`); |
|
throw new Error(`Gemini API error: ${error.error?.message || 'Unknown error'}`); |
|
} |
|
|
|
const data = await response.json(); |
|
const text = data.candidates?.[0]?.content?.parts?.[0]?.text || ''; |
|
return text; |
|
} |
|
}); |
|
} |
|
|
|
|
|
const chatKey = chatgpt_api_key || process.env.CHATGPT_API_KEY; |
|
if (chatKey) { |
|
log(`[Agent:${agentId}] handler ChatGPT enabled.`); |
|
|
|
if (invalidApiKeys.has(chatKey)) { |
|
log(`[Agent:${agentId}] Chave ChatGPT ignorada (quota excedida anteriormente)`); |
|
} else { |
|
handlers.push({ |
|
name: "ChatGPT", |
|
handler: async (p) => { |
|
log(`[Agent:${agentId}] calling OpenAI ChatGPT...`); |
|
|
|
try { |
|
const response = await fetch('https://api.openai.com/v1/chat/completions', { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/json', |
|
'Authorization': `Bearer ${chatKey}` |
|
}, |
|
body: JSON.stringify({ |
|
model: 'gpt-3.5-turbo', |
|
messages: [{ role: 'user', content: p }], |
|
max_tokens: 1000 |
|
}) |
|
}); |
|
|
|
if (!response.ok) { |
|
const error = await response.text(); |
|
|
|
|
|
if (error.includes("quota") || error.includes("insufficient_quota")) { |
|
log(`[Agent:${agentId}] ChatGPT quota excedida, adicionando chave à lista de inválidas`); |
|
invalidApiKeys.add(chatKey); |
|
} |
|
|
|
log(`[Agent:${agentId}] OpenAI error: ${error}`); |
|
throw new Error(`OpenAI API error: ${error}`); |
|
} |
|
|
|
const result = await response.json(); |
|
return result.choices[0].message.content; |
|
} catch (error) { |
|
|
|
if (error.message && (error.message.includes("quota") || error.message.includes("insufficient_quota"))) { |
|
log(`[Agent:${agentId}] ChatGPT quota excedida, adicionando chave à lista de inválidas`); |
|
invalidApiKeys.add(chatKey); |
|
} |
|
throw error; |
|
} |
|
} |
|
}); |
|
} |
|
} |
|
|
|
|
|
log(`[Agent:${agentId}] handler DeepSeek (HF) fallback enabled.`); |
|
|
|
|
|
const hfToken = req.body.hf_token || process.env.HF_TOKEN || req.cookies.hf_token || process.env.DEFAULT_HF_TOKEN; |
|
|
|
log(`[Agent:${agentId}] HF Token sources:`); |
|
log(`[Agent:${agentId}] - req.body.hf_token: ${req.body.hf_token ? 'presente' : 'ausente'}`); |
|
log(`[Agent:${agentId}] - process.env.HF_TOKEN: ${process.env.HF_TOKEN ? 'presente' : 'ausente'}`); |
|
log(`[Agent:${agentId}] - req.cookies.hf_token: ${req.cookies.hf_token ? 'presente' : 'ausente'}`); |
|
log(`[Agent:${agentId}] - process.env.DEFAULT_HF_TOKEN: ${process.env.DEFAULT_HF_TOKEN ? 'presente' : 'ausente'}`); |
|
|
|
log(`[Agent:${agentId}] HF token final length: ${hfToken ? hfToken.length : 0}`); |
|
if (hfToken && hfToken.startsWith('hf_')) { |
|
log(`[Agent:${agentId}] HF token parece válido (começa com 'hf_')`); |
|
} else { |
|
log(`[Agent:${agentId}] HF token parece inválido: ${hfToken ? 'não começa com hf_' : 'é nulo'}`); |
|
} |
|
|
|
handlers.push({ |
|
name: "Hugging Face (DeepSeek)", |
|
handler: async (p) => { |
|
log(`[Agent:${agentId}] calling Hugging Face DeepSeek...`); |
|
const providerInfo = PROVIDERS[provider === "auto" ? "novita" : provider] || PROVIDERS.novita; |
|
const response = await fetch(`https://api-inference.huggingface.co/models/${MODEL_ID}`, { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/json', |
|
'Authorization': `Bearer ${hfToken}` |
|
}, |
|
body: JSON.stringify({ |
|
inputs: { |
|
messages: [{ role: 'user', content: p }] |
|
}, |
|
parameters: { |
|
max_tokens: providerInfo.max_tokens |
|
}, |
|
options: { |
|
provider: providerInfo.id |
|
} |
|
}) |
|
}); |
|
|
|
if (!response.ok) { |
|
const error = await response.text(); |
|
log(`[Agent:${agentId}] Hugging Face error: ${error}`); |
|
throw new Error(`Hugging Face API error: ${response.status} ${error}`); |
|
} |
|
|
|
const result = await response.json(); |
|
return result.generated_text || "Sem resposta do modelo"; |
|
} |
|
}); |
|
|
|
|
|
function getFallbackModels(taskType) { |
|
const fallbacks = { |
|
[LOCAL_MODELS.HTML]: [LOCAL_MODELS.CODE, LOCAL_MODELS.INSTRUCTION], |
|
[LOCAL_MODELS.CODE]: [LOCAL_MODELS.INSTRUCTION, LOCAL_MODELS.CHAT], |
|
[LOCAL_MODELS.QA]: [LOCAL_MODELS.CHAT, LOCAL_MODELS.INSTRUCTION], |
|
[LOCAL_MODELS.INSTRUCTION]: [LOCAL_MODELS.CHAT], |
|
[LOCAL_MODELS.SUMMARIZATION]: [LOCAL_MODELS.CHAT, LOCAL_MODELS.INSTRUCTION], |
|
[LOCAL_MODELS.CHAT]: [LOCAL_MODELS.INSTRUCTION], |
|
[LOCAL_MODELS.PORTUGUESE]: [LOCAL_MODELS.CHAT, LOCAL_MODELS.INSTRUCTION], |
|
}; |
|
|
|
return fallbacks[taskType] || [LOCAL_MODELS.CHAT]; |
|
} |
|
|
|
|
|
async function generateWithFallback(prompt, primaryModel, agentId, log) { |
|
const fallbackModels = [primaryModel, ...getFallbackModels(primaryModel)]; |
|
|
|
let lastError = null; |
|
|
|
for (const modelName of fallbackModels) { |
|
try { |
|
log(`[Agent:${agentId}] Tentando modelo: ${modelName}...`); |
|
|
|
|
|
const generator = await getModelPipeline(modelName); |
|
log(`[Agent:${agentId}] Modelo carregado, gerando resposta...`); |
|
|
|
|
|
const generationOptions = getGenerationConfig(modelName, prompt); |
|
|
|
|
|
const result = await generator(prompt, generationOptions); |
|
|
|
|
|
let text = result[0].generated_text; |
|
|
|
|
|
if (text.startsWith(prompt)) { |
|
text = text.substring(prompt.length).trim(); |
|
} |
|
|
|
|
|
if (modelName === LOCAL_MODELS.HTML && text.includes('</html>')) { |
|
text = text.substring(0, text.indexOf('</html>') + 7); |
|
} |
|
|
|
log(`[Agent:${agentId}] Resposta gerada com sucesso usando ${modelName}!`); |
|
return { text, modelUsed: modelName }; |
|
|
|
} catch (err) { |
|
log(`[Agent:${agentId}] Erro com modelo ${modelName}: ${err.message}`); |
|
lastError = err; |
|
|
|
} |
|
} |
|
|
|
|
|
throw lastError || new Error("Todos os modelos falharam"); |
|
} |
|
|
|
|
|
handlers.push({ |
|
name: "Modelo Local", |
|
handler: async (p) => { |
|
log(`[Agent:${agentId}] Tentando usar modelo localmente via Transformers.js...`); |
|
|
|
try { |
|
|
|
const modelName = selectModelForPrompt(p); |
|
log(`[Agent:${agentId}] Análise inicial sugere modelo: ${modelName}`); |
|
|
|
|
|
const { text, modelUsed } = await generateWithFallback(p, modelName, agentId, log); |
|
|
|
|
|
if (modelName !== modelUsed) { |
|
log(`[Agent:${agentId}] Modelo original falhou, usando alternativa: ${modelUsed}`); |
|
} |
|
|
|
return text; |
|
|
|
} catch (err) { |
|
log(`[Agent:${agentId}] Erro em todos os modelos locais: ${err.message}`); |
|
if (err.stack) { |
|
log(`[Agent:${agentId}] Stack: ${err.stack.split("\n")[0]}`); |
|
} |
|
throw new Error(`Erro ao usar modelos locais: ${err.message}`); |
|
} |
|
} |
|
}); |
|
|
|
|
|
let results = []; |
|
let anySuccess = false; |
|
|
|
|
|
for (let i = 0; i < handlers.length; i++) { |
|
const handler = handlers[i]; |
|
let success = false; |
|
let result = ""; |
|
let error = ""; |
|
|
|
try { |
|
log(`[Agent:${agentId}] Tentando modelo: ${handler.name}...`); |
|
result = await handler.handler(prompt); |
|
success = true; |
|
anySuccess = true; |
|
log(`[Agent:${agentId}] ✅ ${handler.name} respondeu com sucesso!`); |
|
} catch (e) { |
|
error = e.message; |
|
log(`[Agent:${agentId}] ❌ ${handler.name} falhou: ${error}`); |
|
} |
|
|
|
results.push({ |
|
name: handler.name, |
|
success, |
|
result: success ? result : "", |
|
error: !success ? error : "" |
|
}); |
|
} |
|
|
|
|
|
res.write("\n\n=== RESULTADOS DA CONSULTA ===\n\n"); |
|
|
|
|
|
const firstSuccess = results.find(r => r.success); |
|
if (firstSuccess) { |
|
res.write(`⭐ MELHOR RESPOSTA (${firstSuccess.name}):\n\n`); |
|
res.write(firstSuccess.result); |
|
res.write("\n\n"); |
|
} |
|
|
|
|
|
res.write("📊 RESUMO:\n"); |
|
results.forEach(r => { |
|
if (r.success) { |
|
res.write(`✅ ${r.name}: Sucesso (${r.result.length} caracteres)\n`); |
|
} else { |
|
res.write(`❌ ${r.name}: Erro - ${r.error}\n`); |
|
} |
|
}); |
|
|
|
if (!anySuccess) { |
|
res.write("\n❌ Todos os provedores falharam. Verifique as configurações das chaves de API.\n"); |
|
} |
|
|
|
res.end(); |
|
}); |
|
|
|
|
|
app.post("/api/ask-ai", async (req, res) => { |
|
const { prompt, html, previousPrompt, provider } = req.body; |
|
|
|
|
|
const log = (msg) => { |
|
console.log(msg); |
|
if (res.headersSent) return; |
|
if (res.getHeader('Content-Type') !== 'text/plain') { |
|
res.setHeader("Content-Type", "text/plain"); |
|
res.setHeader("Cache-Control", "no-cache"); |
|
res.setHeader("Connection", "keep-alive"); |
|
} |
|
}; |
|
|
|
if (!prompt) { |
|
return res.status(400).send({ ok: false, message: "Missing prompt" }); |
|
} |
|
|
|
|
|
let { hf_token } = req.cookies; |
|
let token = hf_token; |
|
if (process.env.HF_TOKEN) token = process.env.HF_TOKEN; |
|
|
|
const ip = ( |
|
req.headers["x-forwarded-for"]?.split(",")[0] || |
|
req.socket.remoteAddress || |
|
"" |
|
).trim(); |
|
if (!token) { |
|
ipAddresses.set(ip, (ipAddresses.get(ip) || 0) + 1); |
|
if (ipAddresses.get(ip) > MAX_REQUESTS_PER_IP) { |
|
return res.status(429).send({ |
|
ok: false, |
|
openLogin: true, |
|
message: "Log In to continue using the service", |
|
}); |
|
} |
|
token = process.env.DEFAULT_HF_TOKEN; |
|
} |
|
|
|
const DEFAULT = PROVIDERS.novita; |
|
const sel = provider === "auto" ? DEFAULT : PROVIDERS[provider] || DEFAULT; |
|
if (provider !== "auto" && prompt.length + (html?.length||0) >= sel.max_tokens) { |
|
return res.status(400).send({ |
|
ok: false, |
|
openSelectProvider: true, |
|
message: `Context too long. ${sel.name} allows ${sel.max_tokens} tokens.`, |
|
}); |
|
} |
|
|
|
res.setHeader("Content-Type", "text/plain"); |
|
res.setHeader("Cache-Control", "no-cache"); |
|
res.setHeader("Connection", "keep-alive"); |
|
|
|
const client = new InferenceClient(token); |
|
let buffer = ""; |
|
|
|
|
|
try { |
|
log(`[ASK-AI] Tentando usar modelo local via Transformers.js...`); |
|
|
|
|
|
const fullPrompt = `Crie um documento HTML completo baseado na seguinte solicitação: |
|
${prompt} |
|
${html ? `\nEstá é a versão atual do código:\n${html}` : ''} |
|
IMPORTANTE: |
|
- Retorne APENAS o código HTML/CSS/JS sem qualquer comentário ou explicação. |
|
- O documento deve ser um arquivo HTML completo com as tags <!DOCTYPE html>, <html>, <head> e <body>. |
|
- Inclua estilos CSS diretamente na tag <style>. |
|
- Inclua JavaScript diretamente na tag <script>. |
|
- Não use bibliotecas externas a menos que seja especificado. |
|
Código HTML:`; |
|
|
|
try { |
|
|
|
log(`[ASK-AI] Selecionando modelo para geração de HTML...`); |
|
|
|
|
|
const primaryModel = LOCAL_MODELS.HTML; |
|
|
|
try { |
|
|
|
const generationOptions = { |
|
max_new_tokens: 1500, |
|
temperature: 0.3, |
|
do_sample: true, |
|
top_p: 0.85, |
|
repetition_penalty: 1.2, |
|
no_repeat_ngram_size: 5, |
|
}; |
|
|
|
|
|
log(`[ASK-AI] Iniciando geração com sistema de fallback...`); |
|
|
|
|
|
const fallbackModels = [primaryModel, ...getFallbackModels(primaryModel)]; |
|
let generatedHtml = null; |
|
let modelUsed = null; |
|
let lastError = null; |
|
|
|
|
|
for (const modelName of fallbackModels) { |
|
try { |
|
log(`[ASK-AI] Tentando modelo: ${modelName}...`); |
|
const generator = await getModelPipeline(modelName); |
|
|
|
|
|
const modelConfig = getGenerationConfig(modelName, fullPrompt); |
|
|
|
|
|
log(`[ASK-AI] Gerando HTML com ${modelName}...`); |
|
const result = await generator(fullPrompt, modelConfig); |
|
generatedHtml = result[0].generated_text; |
|
modelUsed = modelName; |
|
|
|
log(`[ASK-AI] HTML gerado com sucesso usando ${modelName}`); |
|
break; |
|
} catch (modelErr) { |
|
log(`[ASK-AI] Erro com modelo ${modelName}: ${modelErr.message}`); |
|
lastError = modelErr; |
|
|
|
} |
|
} |
|
|
|
|
|
if (!generatedHtml) { |
|
throw lastError || new Error("Todos os modelos falharam"); |
|
} |
|
|
|
|
|
if (generatedHtml.includes(fullPrompt)) { |
|
generatedHtml = generatedHtml.replace(fullPrompt, '').trim(); |
|
} |
|
|
|
|
|
let finalHtml = ''; |
|
const htmlMatch = generatedHtml.match(/<(!DOCTYPE|html)[\s\S]*<\/html>/i); |
|
|
|
if (htmlMatch) { |
|
finalHtml = htmlMatch[0]; |
|
log(`[ASK-AI] HTML completo extraído da resposta`); |
|
} else if (generatedHtml.includes("<html")) { |
|
|
|
const startIdx = generatedHtml.indexOf("<html"); |
|
const endIdx = generatedHtml.indexOf("</html>"); |
|
|
|
if (endIdx > startIdx) { |
|
finalHtml = generatedHtml.substring(startIdx, endIdx + 7); |
|
log(`[ASK-AI] Parte HTML extraída da resposta`); |
|
} else { |
|
|
|
finalHtml = formatarComoHTML(generatedHtml); |
|
log(`[ASK-AI] HTML formatado a partir da resposta parcial`); |
|
} |
|
} else { |
|
|
|
finalHtml = formatarComoHTML(generatedHtml); |
|
log(`[ASK-AI] HTML criado a partir da resposta`); |
|
} |
|
|
|
|
|
log(`[ASK-AI] Enviando HTML gerado com ${modelUsed} (${finalHtml.length} bytes)`); |
|
res.write(finalHtml); |
|
return res.end(); |
|
|
|
} catch (localErr) { |
|
|
|
log(`[ASK-AI] Erro ao usar todos os modelos locais: ${localErr.message}. Tentando API remota...`); |
|
} |
|
} catch (localErr) { |
|
|
|
log(`[ASK-AI] Erro ao usar modelo local: ${localErr.message}. Tentando API remota...`); |
|
} |
|
|
|
|
|
const stream = client.chatCompletionStream({ |
|
model: MODEL_ID, |
|
provider: sel.id, |
|
messages: [ |
|
{ |
|
role: "system", |
|
content: `ONLY USE HTML/CSS/JS... ALWAYS RETURN A SINGLE HTML DOCUMENT`, |
|
}, |
|
...(previousPrompt |
|
? [{ role: "user", content: previousPrompt }] |
|
: []), |
|
...(html |
|
? [{ role: "assistant", content: `Current code: ${html}` }] |
|
: []), |
|
{ role: "user", content: prompt }, |
|
], |
|
...(sel.id !== "sambanova" ? { max_tokens: sel.max_tokens } : {}), |
|
}); |
|
|
|
for await (const v of stream) { |
|
const chunk = v.choices?.[0]?.delta?.content; |
|
if (chunk) { |
|
buffer += chunk; |
|
res.write(chunk); |
|
if (buffer.includes("</html>")) break; |
|
} |
|
} |
|
|
|
return res.end(); |
|
} catch (err) { |
|
if (err.message.includes("exceeded your monthly included credits")) { |
|
return res.status(402).send({ |
|
ok: false, |
|
openProModal: true, |
|
message: err.message, |
|
}); |
|
} |
|
if (!res.headersSent) { |
|
return res.status(500).send({ |
|
ok: false, |
|
message: err.message || "Error processing request", |
|
}); |
|
} |
|
res.end(); |
|
} |
|
}); |
|
|
|
|
|
app.get("/api/remix/:username/:repo", async (req, res) => { |
|
const { username, repo } = req.params; |
|
let token = req.cookies.hf_token || process.env.DEFAULT_HF_TOKEN; |
|
if (process.env.HF_TOKEN) token = process.env.HF_TOKEN; |
|
|
|
const repoId = `${username}/${repo}`; |
|
const url = `https://huggingface.co/spaces/${repoId}/raw/main/index.html`; |
|
|
|
try { |
|
const space = await spaceInfo({ name: repoId, accessToken: token, additionalFields: ["author"] }); |
|
if (!space || space.sdk !== "static" || space.private) { |
|
return res.status(404).send({ ok: false, message: "Space not found" }); |
|
} |
|
|
|
const resp = await fetch(url); |
|
if (!resp.ok) return res.status(404).send({ ok: false, message: "Space not found" }); |
|
|
|
let htmlTxt = await resp.text(); |
|
htmlTxt = htmlTxt.replace(getPTag(repoId), ""); |
|
|
|
let user = null; |
|
try { |
|
const u = await fetch("https://huggingface.co/oauth/userinfo", { |
|
headers: { Authorization: `Bearer ${req.cookies.hf_token}` }, |
|
}); |
|
user = await u.json(); |
|
} catch {} |
|
|
|
res.send({ ok: true, html: htmlTxt, isOwner: space.author === user?.preferred_username, path: repoId }); |
|
} catch (e) { |
|
res.status(500).send({ ok: false, message: e.message }); |
|
} |
|
}); |
|
|
|
app.get("*", (_req, res) => { |
|
res.sendFile(path.join(__dirname, "dist", "index.html")); |
|
}); |
|
|
|
app.listen(PORT, () => { |
|
console.log(`Server is running on port ${PORT}`); |
|
}); |
|
|
|
|
|
function formatarComoHTML(content) { |
|
return `<!DOCTYPE html> |
|
<html> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
<title>Generated Page</title> |
|
<style> |
|
body { |
|
font-family: Arial, sans-serif; |
|
line-height: 1.6; |
|
margin: 0; |
|
padding: 20px; |
|
color: #333; |
|
} |
|
h1, h2, h3 { color: #2c3e50; } |
|
a { color: #3498db; text-decoration: none; } |
|
a:hover { text-decoration: underline; } |
|
.container { max-width: 1200px; margin: 0 auto; padding: 0 15px; } |
|
</style> |
|
</head> |
|
<body> |
|
<div class="container"> |
|
${content} |
|
</div> |
|
</body> |
|
</html>`; |
|
} |
|
|