|
import express from 'express'; |
|
import { fal } from '@fal-ai/client'; |
|
|
|
|
|
|
|
const rawFalKeys = process.env.FAL_KEY; |
|
const API_KEY = process.env.API_KEY; |
|
|
|
if (!rawFalKeys) { |
|
|
|
console.error("Error: FAL_KEY environment variable is not set (should be comma-separated)."); |
|
process.exit(1); |
|
} |
|
|
|
if (!API_KEY) { |
|
console.error("Error: API_KEY environment variable is not set."); |
|
process.exit(1); |
|
} |
|
|
|
|
|
let falKeys = rawFalKeys.split(',') |
|
.map(key => key.trim()) |
|
.filter(key => key.length > 0) |
|
.map(key => ({ |
|
key: key, |
|
failed: false, |
|
failedTimestamp: 0 |
|
})); |
|
|
|
if (falKeys.length === 0) { |
|
|
|
console.error("Error: No valid keys found in FAL_KEY after processing the environment variable."); |
|
process.exit(1); |
|
} |
|
|
|
let currentKeyIndex = 0; |
|
const failedKeyCooldown = 60 * 1000; |
|
|
|
|
|
console.log(`Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY environment variable.`); |
|
console.log(`Failed key cooldown period: ${failedKeyCooldown / 1000} seconds.`); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function getNextKey() { |
|
const totalKeys = falKeys.length; |
|
if (totalKeys === 0) return null; |
|
|
|
let attempts = 0; |
|
while (attempts < totalKeys) { |
|
const keyIndex = currentKeyIndex % totalKeys; |
|
const keyInfo = falKeys[keyIndex]; |
|
|
|
currentKeyIndex = (currentKeyIndex + 1) % totalKeys; |
|
|
|
|
|
if (keyInfo.failed) { |
|
const now = Date.now(); |
|
if (now - keyInfo.failedTimestamp < failedKeyCooldown) { |
|
|
|
attempts++; |
|
continue; |
|
} else { |
|
console.log(`Cooldown finished for key index ${keyIndex}. Resetting failure status.`); |
|
keyInfo.failed = false; |
|
keyInfo.failedTimestamp = 0; |
|
} |
|
} |
|
|
|
return keyInfo; |
|
} |
|
|
|
console.warn("All FAL keys are currently marked as failed and in cooldown."); |
|
return null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
function markKeyFailed(keyInfo) { |
|
if (keyInfo && !keyInfo.failed) { |
|
keyInfo.failed = true; |
|
keyInfo.failedTimestamp = Date.now(); |
|
const keyIndex = falKeys.findIndex(k => k.key === keyInfo.key); |
|
console.warn(`Marking key index ${keyIndex} (ending ...${keyInfo.key.slice(-4)}) as failed.`); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function isKeyRelatedError(error) { |
|
const errorMessage = error?.message?.toLowerCase() || ''; |
|
const errorStatus = error?.status; |
|
|
|
|
|
if (errorStatus === 401 || errorStatus === 403 || |
|
errorMessage.includes('authentication failed') || |
|
errorMessage.includes('invalid api key') || |
|
errorMessage.includes('permission denied')) { |
|
return true; |
|
} |
|
if (errorStatus === 429 || |
|
errorMessage.includes('rate limit exceeded') || |
|
errorMessage.includes('quota exceeded')) { |
|
return true; |
|
} |
|
|
|
|
|
return false; |
|
} |
|
|
|
|
|
const app = express(); |
|
app.use(express.json({ limit: '50mb' })); |
|
app.use(express.urlencoded({ extended: true, limit: '50mb' })); |
|
|
|
const PORT = process.env.PORT || 3000; |
|
|
|
|
|
const apiKeyAuth = (req, res, next) => { |
|
const authHeader = req.headers['authorization']; |
|
|
|
if (!authHeader) { |
|
console.warn('Unauthorized: No Authorization header provided'); |
|
return res.status(401).json({ error: 'Unauthorized: No API Key provided' }); |
|
} |
|
|
|
const authParts = authHeader.split(' '); |
|
if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') { |
|
console.warn('Unauthorized: Invalid Authorization header format'); |
|
return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' }); |
|
} |
|
|
|
const providedKey = authParts[1]; |
|
if (providedKey !== API_KEY) { |
|
console.warn('Unauthorized: Invalid API Key'); |
|
return res.status(401).json({ error: 'Unauthorized: Invalid API Key' }); |
|
} |
|
|
|
next(); |
|
}; |
|
|
|
app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth); |
|
|
|
|
|
const PROMPT_LIMIT = 4800; |
|
const SYSTEM_PROMPT_LIMIT = 4800; |
|
|
|
|
|
|
|
const FAL_SUPPORTED_MODELS = [ |
|
"anthropic/claude-3.7-sonnet", |
|
"anthropic/claude-3.5-sonnet", |
|
"anthropic/claude-3-5-haiku", |
|
"anthropic/claude-3-haiku", |
|
"google/gemini-pro-1.5", |
|
"google/gemini-flash-1.5", |
|
"google/gemini-flash-1.5-8b", |
|
"google/gemini-2.0-flash-001", |
|
"meta-llama/llama-3.2-1b-instruct", |
|
"meta-llama/llama-3.2-3b-instruct", |
|
"meta-llama/llama-3.1-8b-instruct", |
|
"meta-llama/llama-3.1-70b-instruct", |
|
"openai/gpt-4o-mini", |
|
"openai/gpt-4o", |
|
"deepseek/deepseek-r1", |
|
"meta-llama/llama-4-maverick", |
|
"meta-llama/llama-4-scout" |
|
]; |
|
|
|
|
|
const getOwner = (modelId) => { |
|
if (modelId && modelId.includes('/')) { |
|
return modelId.split('/')[0]; |
|
} |
|
return 'fal-ai'; |
|
}; |
|
|
|
|
|
app.get('/v1/models', (req, res) => { |
|
console.log("Received request for GET /v1/models"); |
|
try { |
|
const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({ |
|
id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId) |
|
})); |
|
res.json({ object: "list", data: modelsData }); |
|
console.log("Successfully returned model list."); |
|
} catch (error) { |
|
console.error("Error processing GET /v1/models:", error); |
|
res.status(500).json({ error: "Failed to retrieve model list." }); |
|
} |
|
}); |
|
|
|
|
|
function convertMessagesToFalPrompt(messages) { |
|
let fixed_system_prompt_content = ""; |
|
const conversation_message_blocks = []; |
|
|
|
|
|
|
|
for (const message of messages) { |
|
let content = (message.content === null || message.content === undefined) ? "" : String(message.content); |
|
switch (message.role) { |
|
case 'system': |
|
fixed_system_prompt_content += `System: ${content}\n\n`; |
|
break; |
|
case 'user': |
|
conversation_message_blocks.push(`Human: ${content}\n\n`); |
|
break; |
|
case 'assistant': |
|
conversation_message_blocks.push(`Assistant: ${content}\n\n`); |
|
break; |
|
default: |
|
console.warn(`Unsupported role: ${message.role}`); |
|
continue; |
|
} |
|
} |
|
|
|
|
|
if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) { |
|
const originalLength = fixed_system_prompt_content.length; |
|
fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT); |
|
console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT}`); |
|
} |
|
fixed_system_prompt_content = fixed_system_prompt_content.trim(); |
|
|
|
|
|
|
|
let space_occupied_by_fixed_system = 0; |
|
if (fixed_system_prompt_content.length > 0) { |
|
space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; |
|
} |
|
const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system); |
|
|
|
|
|
|
|
|
|
const prompt_history_blocks = []; |
|
const system_prompt_history_blocks = []; |
|
let current_prompt_length = 0; |
|
let current_system_history_length = 0; |
|
let promptFull = false; |
|
let systemHistoryFull = (remaining_system_limit <= 0); |
|
|
|
|
|
for (let i = conversation_message_blocks.length - 1; i >= 0; i--) { |
|
const message_block = conversation_message_blocks[i]; |
|
const block_length = message_block.length; |
|
|
|
if (promptFull && systemHistoryFull) { |
|
|
|
break; |
|
} |
|
|
|
|
|
if (!promptFull) { |
|
if (current_prompt_length + block_length <= PROMPT_LIMIT) { |
|
prompt_history_blocks.unshift(message_block); |
|
current_prompt_length += block_length; |
|
continue; |
|
} else { |
|
promptFull = true; |
|
|
|
} |
|
} |
|
|
|
|
|
if (!systemHistoryFull) { |
|
if (current_system_history_length + block_length <= remaining_system_limit) { |
|
system_prompt_history_blocks.unshift(message_block); |
|
current_system_history_length += block_length; |
|
continue; |
|
} else { |
|
systemHistoryFull = true; |
|
|
|
} |
|
} |
|
} |
|
|
|
|
|
const system_prompt_history_content = system_prompt_history_blocks.join('').trim(); |
|
const final_prompt = prompt_history_blocks.join('').trim(); |
|
|
|
|
|
const SEPARATOR = "\n\n-------下面是比较早之前的对话内容-----\n\n"; |
|
|
|
let final_system_prompt = ""; |
|
|
|
const hasFixedSystem = fixed_system_prompt_content.length > 0; |
|
const hasSystemHistory = system_prompt_history_content.length > 0; |
|
|
|
if (hasFixedSystem && hasSystemHistory) { |
|
final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content; |
|
|
|
} else if (hasFixedSystem) { |
|
final_system_prompt = fixed_system_prompt_content; |
|
|
|
} else if (hasSystemHistory) { |
|
final_system_prompt = system_prompt_history_content; |
|
|
|
} |
|
|
|
|
|
const result = { |
|
system_prompt: final_system_prompt, |
|
prompt: final_prompt |
|
}; |
|
|
|
console.log(`Final system_prompt length: ${result.system_prompt.length}, Final prompt length: ${result.prompt.length}`); |
|
|
|
return result; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async function tryFalCallWithFailover(operation, functionId, params) { |
|
const maxRetries = falKeys.length; |
|
let lastError = null; |
|
|
|
for (let i = 0; i < maxRetries; i++) { |
|
const keyInfo = getNextKey(); |
|
if (!keyInfo) { |
|
throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL keys are currently unavailable (failed or in cooldown)."); |
|
} |
|
|
|
const currentFalKey = keyInfo.key; |
|
console.log(`Attempt ${i + 1}/${maxRetries}: Using key ending in ...${currentFalKey.slice(-4)}`); |
|
|
|
try { |
|
|
|
|
|
|
|
fal.config({ credentials: currentFalKey }); |
|
|
|
if (operation === 'stream') { |
|
const streamResult = await fal.stream(functionId, params); |
|
console.log(`Successfully initiated stream with key ending in ...${currentFalKey.slice(-4)}`); |
|
return streamResult; |
|
} else { |
|
const result = await fal.subscribe(functionId, params); |
|
console.log(`Successfully completed subscribe request with key ending in ...${currentFalKey.slice(-4)}`); |
|
|
|
if (result && result.error) { |
|
console.warn(`Fal-ai returned an application error (non-stream) with key ...${currentFalKey.slice(-4)}: ${JSON.stringify(result.error)}`); |
|
} |
|
return result; |
|
} |
|
} catch (error) { |
|
console.error(`Error using key ending in ...${currentFalKey.slice(-4)}:`, error.message || error); |
|
lastError = error; |
|
|
|
if (isKeyRelatedError(error)) { |
|
markKeyFailed(keyInfo); |
|
console.log(`Key marked as failed. Trying next key if available...`); |
|
} else { |
|
console.error("Non-key related error occurred. Aborting retries."); |
|
throw error; |
|
} |
|
} |
|
} |
|
|
|
console.error("All FAL keys failed after attempting each one."); |
|
throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL API keys failed."); |
|
} |
|
|
|
|
|
|
|
app.post('/v1/chat/completions', async (req, res) => { |
|
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body; |
|
|
|
console.log(`Received chat completion request for model: ${model}, stream: ${stream}`); |
|
|
|
if (!FAL_SUPPORTED_MODELS.includes(model)) { |
|
console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`); |
|
} |
|
if (!model || !messages || !Array.isArray(messages) || messages.length === 0) { |
|
console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages }); |
|
return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' }); |
|
} |
|
|
|
try { |
|
const { prompt, system_prompt } = convertMessagesToFalPrompt(messages); |
|
|
|
const falInput = { |
|
model: model, |
|
prompt: prompt, |
|
...(system_prompt && { system_prompt: system_prompt }), |
|
reasoning: !!reasoning, |
|
}; |
|
|
|
console.log("Prepared Fal Input (lengths):", { system_prompt: system_prompt?.length, prompt: prompt?.length }); |
|
|
|
if (stream) { |
|
res.setHeader('Content-Type', 'text/event-stream; charset=utf-8'); |
|
res.setHeader('Cache-Control', 'no-cache'); |
|
res.setHeader('Connection', 'keep-alive'); |
|
res.setHeader('Access-Control-Allow-Origin', '*'); |
|
res.flushHeaders(); |
|
|
|
let previousOutput = ''; |
|
let falStream; |
|
|
|
try { |
|
falStream = await tryFalCallWithFailover('stream', "fal-ai/any-llm", { input: falInput }); |
|
|
|
for await (const event of falStream) { |
|
const currentOutput = (event && typeof event.output === 'string') ? event.output : ''; |
|
const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true; |
|
const errorInfo = (event && event.error) ? event.error : null; |
|
|
|
if (errorInfo) { |
|
console.error("Error received *during* fal stream:", errorInfo); |
|
const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] }; |
|
res.write(`data: ${JSON.stringify(errorChunk)}\n\n`); |
|
break; |
|
} |
|
|
|
let deltaContent = ''; |
|
if (currentOutput.startsWith(previousOutput)) { |
|
deltaContent = currentOutput.substring(previousOutput.length); |
|
} else if (currentOutput.length > 0) { |
|
console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length }); |
|
deltaContent = currentOutput; |
|
previousOutput = ''; |
|
} |
|
previousOutput = currentOutput; |
|
|
|
if (deltaContent || !isPartial) { |
|
const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] }; |
|
res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`); |
|
} |
|
} |
|
res.write(`data: [DONE]\n\n`); |
|
res.end(); |
|
console.log("Stream finished successfully."); |
|
|
|
} catch (streamError) { |
|
console.error('Error during stream processing:', streamError); |
|
if (!res.writableEnded) { |
|
try { |
|
const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError); |
|
const finalErrorChunk = { error: { message: "Stream failed", type: "proxy_error", details: errorDetails } }; |
|
res.write(`data: ${JSON.stringify(finalErrorChunk)}\n\n`); |
|
res.write(`data: [DONE]\n\n`); |
|
res.end(); |
|
} catch (finalError) { |
|
console.error('Error sending final stream error message to client:', finalError); |
|
if (!res.writableEnded) { res.end(); } |
|
} |
|
} |
|
} |
|
|
|
} else { |
|
console.log("Executing non-stream request with failover..."); |
|
const result = await tryFalCallWithFailover('subscribe', "fal-ai/any-llm", { input: falInput, logs: true }); |
|
|
|
console.log("Received non-stream result from fal-ai via failover wrapper."); |
|
|
|
if (result && result.error) { |
|
console.error("Fal-ai returned an application error in non-stream mode (after successful API call):", result.error); |
|
return res.status(500).json({ |
|
object: "error", |
|
message: `Fal-ai application error: ${JSON.stringify(result.error)}`, |
|
type: "fal_ai_error", |
|
param: null, |
|
code: result.error.code || null |
|
}); |
|
} |
|
|
|
const openAIResponse = { |
|
id: `chatcmpl-${result?.requestId || Date.now()}`, |
|
object: "chat.completion", |
|
created: Math.floor(Date.now() / 1000), |
|
model: model, |
|
choices: [{ |
|
index: 0, |
|
message: { |
|
role: "assistant", |
|
content: result?.output || "" |
|
}, |
|
finish_reason: "stop" |
|
}], |
|
usage: { |
|
prompt_tokens: null, |
|
completion_tokens: null, |
|
total_tokens: null |
|
}, |
|
system_fingerprint: null, |
|
...(result?.reasoning && { fal_reasoning: result.reasoning }), |
|
}; |
|
res.json(openAIResponse); |
|
console.log("Returned non-stream response successfully."); |
|
} |
|
|
|
} catch (error) { |
|
console.error('Unhandled error in /v1/chat/completions:', error); |
|
if (!res.headersSent) { |
|
const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error); |
|
const errorType = error.message?.includes("All FAL keys failed") ? "api_key_error" : "proxy_internal_error"; |
|
res.status(500).json({ |
|
error: { |
|
message: `Internal Server Error in Proxy: ${errorMessage}`, |
|
type: errorType, |
|
details: error.stack |
|
} |
|
}); |
|
} else if (!res.writableEnded) { |
|
console.error("Headers already sent, attempting to end response after error."); |
|
res.end(); |
|
} |
|
} |
|
}); |
|
|
|
|
|
app.listen(PORT, () => { |
|
console.log(`===========================================================`); |
|
console.log(` Fal OpenAI Proxy Server (Multi-Key Failover)`); |
|
console.log(` Listening on port: ${PORT}`); |
|
|
|
console.log(` Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY.`); |
|
console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`); |
|
console.log(` Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`); |
|
console.log(` Chat Completions: POST http://localhost:${PORT}/v1/chat/completions`); |
|
console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`); |
|
console.log(`===========================================================`); |
|
}); |
|
|
|
|
|
app.get('/', (req, res) => { |
|
res.send('Fal OpenAI Proxy (Multi-Key Failover) is running.'); |
|
}); |