import express from 'express'; import { fal } from '@fal-ai/client'; // --- Express App Setup --- const app = express(); app.use(express.json({ limit: '50mb' })); app.use(express.urlencoded({ extended: true, limit: '50mb' })); const PORT = process.env.PORT || 3000; // === 全局定义限制 === (Remains the same) const PROMPT_LIMIT = 4800; const SYSTEM_PROMPT_LIMIT = 4800; // === 限制定义结束 === // 定义 fal-ai/any-llm 支持的模型列表 (Remains the same) const FAL_SUPPORTED_MODELS = [ "anthropic/claude-3.7-sonnet", "anthropic/claude-3.5-sonnet", "anthropic/claude-3-5-haiku", "anthropic/claude-3-haiku", "google/gemini-pro-1.5", "google/gemini-flash-1.5", "google/gemini-flash-1.5-8b", "google/gemini-2.0-flash-001", "meta-llama/llama-3.2-1b-instruct", "meta-llama/llama-3.2-3b-instruct", "meta-llama/llama-3.1-8b-instruct", "meta-llama/llama-3.1-70b-instruct", "openai/gpt-4o-mini", "openai/gpt-4o", "deepseek/deepseek-r1", "meta-llama/llama-4-maverick", "meta-llama/llama-4-scout" ]; // Helper function to get owner from model ID (Remains the same) const getOwner = (modelId) => { if (modelId && modelId.includes('/')) { return modelId.split('/')[0]; } return 'fal-ai'; }; /** * Determines if an error likely indicates an API key issue (auth, quota, etc.). * @param {Error} error - The error object caught from the fal client. * @returns {boolean} - True if the error suggests a key failure, false otherwise. */ function isKeyRelatedError(error) { const errorMessage = error?.message?.toLowerCase() || ''; const errorStatus = error?.status; // Assuming the error object might have a status property // Check for common indicators of key issues if (errorStatus === 401 || errorStatus === 403 || // Unauthorized, Forbidden errorMessage.includes('authentication failed') || errorMessage.includes('invalid api key') || errorMessage.includes('permission denied')) { return true; } if (errorStatus === 429 || // Too Many Requests (Rate Limit / Quota) errorMessage.includes('rate limit exceeded') || errorMessage.includes('quota exceeded')) { return true; } return false; } // API Key 鉴权中间件 (Modified to extract FAL key directly) const apiKeyAuth = (req, res, next) => { const authHeader = req.headers['authorization']; if (!authHeader) { console.warn('Unauthorized: No Authorization header provided'); return res.status(401).json({ error: 'Unauthorized: No API Key provided' }); } const authParts = authHeader.split(' '); if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') { console.warn('Unauthorized: Invalid Authorization header format'); return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' }); } const providedKey = authParts[1]; if (!providedKey || providedKey.trim() === '') { console.warn('Unauthorized: Empty API Key'); return res.status(401).json({ error: 'Unauthorized: Empty API Key' }); } // Store the FAL key in the request object for later use req.falKey = providedKey; console.log(`Received request with FAL key ending in ...${providedKey.slice(-4)}`); next(); }; app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth); // GET /v1/models endpoint (Remains the same) app.get('/v1/models', (req, res) => { console.log("Received request for GET /v1/models"); try { const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({ id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId) })); res.json({ object: "list", data: modelsData }); console.log("Successfully returned model list."); } catch (error) { console.error("Error processing GET /v1/models:", error); res.status(500).json({ error: "Failed to retrieve model list." }); } }); // === convertMessagesToFalPrompt 函数 (Remains the same) === function convertMessagesToFalPrompt(messages) { let fixed_system_prompt_content = ""; const conversation_message_blocks = []; // console.log(`Original messages count: ${messages.length}`); // Less verbose logging // 1. 分离 System 消息,格式化 User/Assistant 消息 for (const message of messages) { let content = (message.content === null || message.content === undefined) ? "" : String(message.content); switch (message.role) { case 'system': fixed_system_prompt_content += `System: ${content}\n\n`; break; case 'user': conversation_message_blocks.push(`Human: ${content}\n\n`); break; case 'assistant': conversation_message_blocks.push(`Assistant: ${content}\n\n`); break; default: console.warn(`Unsupported role: ${message.role}`); continue; } } // 2. 截断合并后的 system 消息(如果超长) if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) { const originalLength = fixed_system_prompt_content.length; fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT); console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT}`); } fixed_system_prompt_content = fixed_system_prompt_content.trim(); // 3. 计算 system_prompt 中留给对话历史的剩余空间 let space_occupied_by_fixed_system = 0; if (fixed_system_prompt_content.length > 0) { space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度 } const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system); // console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`); // 4. 反向填充 User/Assistant 对话历史 const prompt_history_blocks = []; const system_prompt_history_blocks = []; let current_prompt_length = 0; let current_system_history_length = 0; let promptFull = false; let systemHistoryFull = (remaining_system_limit <= 0); // console.log(`Processing ${conversation_message_blocks.length} user/assistant messages for recency filling.`); for (let i = conversation_message_blocks.length - 1; i >= 0; i--) { const message_block = conversation_message_blocks[i]; const block_length = message_block.length; if (promptFull && systemHistoryFull) { // console.log(`Both prompt and system history slots full. Omitting older messages from index ${i}.`); break; } // 优先尝试放入 prompt if (!promptFull) { if (current_prompt_length + block_length <= PROMPT_LIMIT) { prompt_history_blocks.unshift(message_block); current_prompt_length += block_length; continue; } else { promptFull = true; // console.log(`Prompt limit (${PROMPT_LIMIT}) reached. Trying system history slot.`); } } // 如果 prompt 满了,尝试放入 system_prompt 的剩余空间 if (!systemHistoryFull) { if (current_system_history_length + block_length <= remaining_system_limit) { system_prompt_history_blocks.unshift(message_block); current_system_history_length += block_length; continue; } else { systemHistoryFull = true; // console.log(`System history limit (${remaining_system_limit}) reached.`); } } } // 5. *** 组合最终的 prompt 和 system_prompt (包含分隔符逻辑) *** const system_prompt_history_content = system_prompt_history_blocks.join('').trim(); const final_prompt = prompt_history_blocks.join('').trim(); // 定义分隔符 const SEPARATOR = "\n\n-------下面是比较早之前的对话内容-----\n\n"; let final_system_prompt = ""; const hasFixedSystem = fixed_system_prompt_content.length > 0; const hasSystemHistory = system_prompt_history_content.length > 0; if (hasFixedSystem && hasSystemHistory) { final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content; // console.log("Combining fixed system prompt and history with separator."); } else if (hasFixedSystem) { final_system_prompt = fixed_system_prompt_content; // console.log("Using only fixed system prompt."); } else if (hasSystemHistory) { final_system_prompt = system_prompt_history_content; // console.log("Using only history in system prompt slot."); } // 6. 返回结果 const result = { system_prompt: final_system_prompt, prompt: final_prompt }; console.log(`Final system_prompt length: ${result.system_prompt.length}, Final prompt length: ${result.prompt.length}`); return result; } // === convertMessagesToFalPrompt 函数结束 === /** * Makes a call to the fal.ai API using the provided key. * @param {'stream' | 'subscribe'} operation - The fal operation to perform. * @param {string} functionId - The fal function ID (e.g., "fal-ai/any-llm"). * @param {object} params - The parameters for the fal function call (input, logs, etc.). * @param {string} falKey - The FAL API key to use for this request. * @returns {Promise} - The result from the fal call (stream or subscription result). * @throws {Error} - Throws an error if the call fails. */ async function callFalApi(operation, functionId, params, falKey) { try { // Configure fal client with the provided key fal.config({ credentials: falKey }); if (operation === 'stream') { const streamResult = await fal.stream(functionId, params); console.log(`Successfully initiated stream with key ending in ...${falKey.slice(-4)}`); return streamResult; } else { // 'subscribe' (non-stream) const result = await fal.subscribe(functionId, params); console.log(`Successfully completed subscribe request with key ending in ...${falKey.slice(-4)}`); if (result && result.error) { console.warn(`Fal-ai returned an application error (non-stream) with key ...${falKey.slice(-4)}: ${JSON.stringify(result.error)}`); } return result; } } catch (error) { console.error(`Error using key ending in ...${falKey.slice(-4)}:`, error.message || error); if (isKeyRelatedError(error)) { console.error(`Key-related error detected with key ending in ...${falKey.slice(-4)}`); } throw error; } } // POST /v1/chat/completions endpoint (Modified to use client-provided key) app.post('/v1/chat/completions', async (req, res) => { const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body; const falKey = req.falKey; // Get the FAL key from the request object console.log(`Received chat completion request for model: ${model}, stream: ${stream}`); if (!FAL_SUPPORTED_MODELS.includes(model)) { console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`); } if (!model || !messages || !Array.isArray(messages) || messages.length === 0) { console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages }); return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' }); } try { const { prompt, system_prompt } = convertMessagesToFalPrompt(messages); const falInput = { model: model, prompt: prompt, ...(system_prompt && { system_prompt: system_prompt }), reasoning: !!reasoning, }; console.log("Prepared Fal Input (lengths):", { system_prompt: system_prompt?.length, prompt: prompt?.length }); if (stream) { res.setHeader('Content-Type', 'text/event-stream; charset=utf-8'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); res.setHeader('Access-Control-Allow-Origin', '*'); res.flushHeaders(); let previousOutput = ''; let falStream; try { falStream = await callFalApi('stream', "fal-ai/any-llm", { input: falInput }, falKey); for await (const event of falStream) { const currentOutput = (event && typeof event.output === 'string') ? event.output : ''; const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true; const errorInfo = (event && event.error) ? event.error : null; if (errorInfo) { console.error("Error received *during* fal stream:", errorInfo); const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] }; res.write(`data: ${JSON.stringify(errorChunk)}\n\n`); break; } let deltaContent = ''; if (currentOutput.startsWith(previousOutput)) { deltaContent = currentOutput.substring(previousOutput.length); } else if (currentOutput.length > 0) { console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length }); deltaContent = currentOutput; previousOutput = ''; } previousOutput = currentOutput; if (deltaContent || !isPartial) { const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] }; res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`); } } res.write(`data: [DONE]\n\n`); res.end(); console.log("Stream finished successfully."); } catch (streamError) { console.error('Error during stream processing:', streamError); if (!res.writableEnded) { try { const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError); const finalErrorChunk = { error: { message: "Stream failed", type: "proxy_error", details: errorDetails } }; res.write(`data: ${JSON.stringify(finalErrorChunk)}\n\n`); res.write(`data: [DONE]\n\n`); res.end(); } catch (finalError) { console.error('Error sending final stream error message to client:', finalError); if (!res.writableEnded) { res.end(); } } } } } else { // Non-stream console.log("Executing non-stream request..."); const result = await callFalApi('subscribe', "fal-ai/any-llm", { input: falInput, logs: true }, falKey); console.log("Received non-stream result from fal-ai."); if (result && result.error) { console.error("Fal-ai returned an application error in non-stream mode (after successful API call):", result.error); return res.status(500).json({ object: "error", message: `Fal-ai application error: ${JSON.stringify(result.error)}`, type: "fal_ai_error", param: null, code: result.error.code || null }); } const openAIResponse = { id: `chatcmpl-${result?.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, message: { role: "assistant", content: result?.output || "" }, finish_reason: "stop" }], usage: { prompt_tokens: null, completion_tokens: null, total_tokens: null }, system_fingerprint: null, ...(result?.reasoning && { fal_reasoning: result.reasoning }), }; res.json(openAIResponse); console.log("Returned non-stream response successfully."); } } catch (error) { console.error('Unhandled error in /v1/chat/completions:', error); if (!res.headersSent) { const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error); const errorType = isKeyRelatedError(error) ? "api_key_error" : "proxy_internal_error"; res.status(500).json({ error: { message: `Internal Server Error in Proxy: ${errorMessage}`, type: errorType, details: error.stack // Optional: include stack in dev/debug mode } }); } else if (!res.writableEnded) { console.error("Headers already sent, attempting to end response after error."); res.end(); } } }); // --- Server Start --- app.listen(PORT, () => { console.log(`===========================================================`); console.log(` Fal OpenAI Proxy Server (Direct Key Mode)`); console.log(` Listening on port: ${PORT}`); console.log(` Using client-provided FAL API keys directly`); console.log(` Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`); console.log(` Chat Completions: POST http://localhost:${PORT}/v1/chat/completions`); console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`); console.log(`===========================================================`); }); // Root path response app.get('/', (req, res) => { res.send('Fal OpenAI Proxy (Direct Key Mode) is running.'); });