Spaces:
Running
Running
Update server.js
Browse files
server.js
CHANGED
@@ -1,75 +1,53 @@
|
|
1 |
-
import { createServerAdapter } from '@whatwg-node/server'
|
2 |
-
import { AutoRouter, json, error, cors } from 'itty-router'
|
3 |
-
import { createServer } from 'http'
|
4 |
-
import dotenv from 'dotenv'
|
5 |
|
6 |
-
dotenv.config()
|
7 |
|
8 |
class Config {
|
9 |
constructor() {
|
10 |
-
this.PORT = process.env.PORT || 8787
|
11 |
-
this.API_PREFIX = process.env.API_PREFIX || '/'
|
12 |
-
this.
|
13 |
-
this.
|
14 |
-
this.
|
15 |
-
|
16 |
-
|
17 |
-
'
|
18 |
-
'
|
19 |
-
Origin: 'https://duckduckgo.com
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
Referer: 'https://duckduckgo.com/',
|
24 |
-
'Sec-Ch-Ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
|
25 |
-
'Sec-Ch-Ua-Mobile': '?0',
|
26 |
-
'Sec-Ch-Ua-Platform': '"Windows"',
|
27 |
-
'Sec-Fetch-Dest': 'empty',
|
28 |
-
'Sec-Fetch-Mode': 'cors',
|
29 |
-
'Sec-Fetch-Site': 'same-origin',
|
30 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
|
31 |
-
}
|
32 |
}
|
33 |
}
|
34 |
|
35 |
-
const config = new Config()
|
36 |
|
37 |
const { preflight, corsify } = cors({
|
38 |
origin: '*',
|
39 |
allowMethods: '*',
|
40 |
exposeHeaders: '*',
|
41 |
-
})
|
42 |
|
43 |
const withBenchmarking = (request) => {
|
44 |
-
request.start = Date.now()
|
45 |
-
}
|
46 |
-
|
47 |
-
const withAuth = (request) => {
|
48 |
-
if (config.API_KEY) {
|
49 |
-
const authHeader = request.headers.get('Authorization')
|
50 |
-
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
51 |
-
return error(401, 'Unauthorized: Missing or invalid Authorization header')
|
52 |
-
}
|
53 |
-
const token = authHeader.substring(7)
|
54 |
-
if (token !== config.API_KEY) {
|
55 |
-
return error(403, 'Forbidden: Invalid API key')
|
56 |
-
}
|
57 |
-
}
|
58 |
-
}
|
59 |
|
60 |
const logger = (res, req) => {
|
61 |
-
console.log(req.method, res.status, req.url, Date.now() - req.start, 'ms')
|
62 |
-
}
|
63 |
|
64 |
const router = AutoRouter({
|
65 |
-
before: [withBenchmarking, preflight
|
66 |
missing: () => error(404, '404 Not Found. Please check whether the calling URL is correct.'),
|
67 |
finally: [corsify, logger],
|
68 |
-
})
|
69 |
|
70 |
-
router.get('/', () => json({ message: 'API
|
71 |
-
router.get('/ping', () => json({ message: 'pong' }))
|
72 |
-
router.get(config.API_PREFIX + '
|
73 |
json({
|
74 |
object: 'list',
|
75 |
data: [
|
@@ -80,251 +58,186 @@ router.get(config.API_PREFIX + '/v1/models', () =>
|
|
80 |
{ id: 'o3-mini', object: 'model', owned_by: 'ddg' },
|
81 |
],
|
82 |
})
|
83 |
-
)
|
84 |
|
85 |
-
router.post(config.API_PREFIX + '
|
86 |
|
87 |
async function handleCompletion(request) {
|
88 |
try {
|
89 |
-
const { model: inputModel, messages, stream: returnStream } = await request.json()
|
90 |
-
const model = convertModel(inputModel)
|
91 |
-
const content = messagesPrepare(messages)
|
92 |
-
return createCompletion(model, content, returnStream)
|
93 |
} catch (err) {
|
94 |
-
|
|
|
95 |
}
|
96 |
}
|
97 |
|
98 |
async function createCompletion(model, content, returnStream, retryCount = 0) {
|
99 |
-
const token = await requestToken()
|
100 |
try {
|
101 |
-
const
|
|
|
102 |
method: 'POST',
|
103 |
headers: {
|
104 |
...config.FAKE_HEADERS,
|
105 |
-
Accept: 'text/event-stream',
|
106 |
-
'Content-Type': 'application/json',
|
107 |
'x-vqd-4': token,
|
108 |
},
|
109 |
body: JSON.stringify({
|
110 |
-
model
|
111 |
-
messages: [
|
112 |
-
{
|
113 |
-
role: 'user',
|
114 |
-
content: content,
|
115 |
-
},
|
116 |
-
],
|
117 |
}),
|
118 |
-
})
|
119 |
|
120 |
if (!response.ok) {
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
}
|
123 |
-
|
|
|
124 |
} catch (err) {
|
125 |
-
console.
|
126 |
-
if (retryCount < config.MAX_RETRY_COUNT) {
|
127 |
-
console.log('Retrying... count', ++retryCount)
|
128 |
-
await new Promise((resolve) => setTimeout(resolve, config.RETRY_DELAY))
|
129 |
-
return
|
130 |
}
|
131 |
-
throw err
|
132 |
}
|
133 |
}
|
134 |
|
135 |
-
async function handlerStream(model,
|
136 |
-
|
137 |
-
|
138 |
-
const
|
139 |
-
|
140 |
-
if (bwzChunk != '') {
|
141 |
-
chunk = bwzChunk + chunk
|
142 |
-
bwzChunk = ''
|
143 |
-
}
|
144 |
|
145 |
-
if (chunk.includes('[DONE]')) {
|
146 |
-
return chunk
|
147 |
-
}
|
148 |
-
|
149 |
-
if (chunk.slice(-2) !== '"}') {
|
150 |
-
bwzChunk = chunk
|
151 |
-
}
|
152 |
-
return chunk
|
153 |
-
}
|
154 |
-
const reader = rb.getReader()
|
155 |
-
const decoder = new TextDecoder()
|
156 |
-
const encoder = new TextEncoder()
|
157 |
const stream = new ReadableStream({
|
158 |
async start(controller) {
|
159 |
while (true) {
|
160 |
-
const { done, value } = await reader.read()
|
161 |
if (done) {
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
continue
|
167 |
}
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
return
|
172 |
-
}
|
173 |
-
line = line.slice(6)
|
174 |
-
if (line !== '[DONE]') {
|
175 |
-
const originReq = JSON.parse(line)
|
176 |
|
177 |
-
|
178 |
-
|
|
|
|
|
|
|
179 |
}
|
|
|
|
|
180 |
|
181 |
-
|
182 |
-
|
|
|
|
|
183 |
if (returnStream) {
|
184 |
controller.enqueue(
|
185 |
-
encoder.encode(`data: ${JSON.stringify(newChatCompletionChunkWithModel(
|
186 |
-
)
|
187 |
}
|
188 |
}
|
189 |
-
}
|
190 |
-
|
191 |
-
controller.enqueue(encoder.encode(`data: ${JSON.stringify(newStopChunkWithModel('stop', model))}\n\n`))
|
192 |
-
} else {
|
193 |
-
controller.enqueue(encoder.encode(JSON.stringify(newChatCompletionWithModel(previousText, model))))
|
194 |
-
}
|
195 |
-
return controller.close()
|
196 |
}
|
197 |
-
}
|
198 |
-
continue
|
199 |
}
|
200 |
},
|
201 |
-
})
|
202 |
|
203 |
return new Response(stream, {
|
204 |
headers: {
|
205 |
'Content-Type': returnStream ? 'text/event-stream' : 'application/json',
|
|
|
|
|
206 |
},
|
207 |
-
})
|
208 |
}
|
209 |
|
210 |
function messagesPrepare(messages) {
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
if (['user', 'assistant'].includes(role)) {
|
216 |
-
const contentStr = Array.isArray(message.content)
|
217 |
-
? message.content
|
218 |
-
.filter((item) => item.text)
|
219 |
-
.map((item) => item.text)
|
220 |
-
.join('') || ''
|
221 |
-
: message.content
|
222 |
-
content += `${role}:${contentStr};\r\n`
|
223 |
-
}
|
224 |
-
}
|
225 |
-
return content
|
226 |
}
|
227 |
|
228 |
async function requestToken() {
|
229 |
try {
|
230 |
-
const response = await fetch(
|
231 |
method: 'GET',
|
232 |
-
headers:
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
|
|
|
|
|
|
241 |
}
|
242 |
}
|
243 |
|
244 |
function convertModel(inputModel) {
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
model = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
|
255 |
-
break
|
256 |
-
case 'o3-mini':
|
257 |
-
model = 'o3-mini'
|
258 |
-
break
|
259 |
-
}
|
260 |
-
return model || 'gpt-4o-mini'
|
261 |
}
|
262 |
|
263 |
function newChatCompletionChunkWithModel(text, model) {
|
264 |
return {
|
265 |
-
id: 'chatcmpl-
|
266 |
object: 'chat.completion.chunk',
|
267 |
-
created:
|
268 |
model,
|
269 |
-
choices: [
|
270 |
-
|
271 |
-
index: 0,
|
272 |
-
delta: {
|
273 |
-
content: text,
|
274 |
-
},
|
275 |
-
finish_reason: null,
|
276 |
-
},
|
277 |
-
],
|
278 |
-
}
|
279 |
}
|
280 |
|
281 |
function newStopChunkWithModel(reason, model) {
|
282 |
return {
|
283 |
-
id: 'chatcmpl-
|
284 |
object: 'chat.completion.chunk',
|
285 |
-
created:
|
286 |
model,
|
287 |
-
choices: [
|
288 |
-
|
289 |
-
index: 0,
|
290 |
-
finish_reason: reason,
|
291 |
-
},
|
292 |
-
],
|
293 |
-
}
|
294 |
}
|
295 |
|
296 |
function newChatCompletionWithModel(text, model) {
|
297 |
return {
|
298 |
-
id: 'chatcmpl-
|
299 |
object: 'chat.completion',
|
300 |
-
created:
|
301 |
model,
|
302 |
-
usage: {
|
303 |
-
|
304 |
-
|
305 |
-
total_tokens: 0,
|
306 |
-
},
|
307 |
-
choices: [
|
308 |
-
{
|
309 |
-
message: {
|
310 |
-
content: text,
|
311 |
-
role: 'assistant',
|
312 |
-
},
|
313 |
-
index: 0,
|
314 |
-
},
|
315 |
-
],
|
316 |
-
}
|
317 |
}
|
318 |
|
319 |
-
// Serverless Service
|
320 |
-
|
321 |
(async () => {
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
const
|
326 |
-
|
327 |
-
|
328 |
-
httpServer.listen(7860, '0.0.0.0') // Force binding to 0.0.0.0 on port 7860
|
329 |
-
})()
|
330 |
-
// export default router
|
|
|
1 |
+
import { createServerAdapter } from '@whatwg-node/server';
|
2 |
+
import { AutoRouter, json, error, cors } from 'itty-router';
|
3 |
+
import { createServer } from 'http';
|
4 |
+
import dotenv from 'dotenv';
|
5 |
|
6 |
+
dotenv.config();
|
7 |
|
8 |
class Config {
|
9 |
constructor() {
|
10 |
+
this.PORT = process.env.PORT || 8787;
|
11 |
+
this.API_PREFIX = process.env.API_PREFIX || '/';
|
12 |
+
this.MAX_RETRY_COUNT = parseInt(process.env.MAX_RETRY_COUNT) || 3;
|
13 |
+
this.RETRY_DELAY = parseInt(process.env.RETRY_DELAY) || 5000;
|
14 |
+
this.FAKE_HEADERS = {
|
15 |
+
'Accept': 'application/json',
|
16 |
+
'Content-Type': 'application/json',
|
17 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
|
18 |
+
'Referer': 'https://duckduckgo.com/',
|
19 |
+
'Origin': 'https://duckduckgo.com',
|
20 |
+
'x-vqd-accept': '1',
|
21 |
+
...JSON.parse(process.env.FAKE_HEADERS || '{}'),
|
22 |
+
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
}
|
24 |
}
|
25 |
|
26 |
+
const config = new Config();
|
27 |
|
28 |
const { preflight, corsify } = cors({
|
29 |
origin: '*',
|
30 |
allowMethods: '*',
|
31 |
exposeHeaders: '*',
|
32 |
+
});
|
33 |
|
34 |
const withBenchmarking = (request) => {
|
35 |
+
request.start = Date.now();
|
36 |
+
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
const logger = (res, req) => {
|
39 |
+
console.log(req.method, res.status, req.url, Date.now() - req.start, 'ms');
|
40 |
+
};
|
41 |
|
42 |
const router = AutoRouter({
|
43 |
+
before: [withBenchmarking, preflight],
|
44 |
missing: () => error(404, '404 Not Found. Please check whether the calling URL is correct.'),
|
45 |
finally: [corsify, logger],
|
46 |
+
});
|
47 |
|
48 |
+
router.get('/', () => json({ message: 'API Service Running!' }));
|
49 |
+
router.get('/ping', () => json({ message: 'pong' }));
|
50 |
+
router.get(config.API_PREFIX + 'v1/models', () =>
|
51 |
json({
|
52 |
object: 'list',
|
53 |
data: [
|
|
|
58 |
{ id: 'o3-mini', object: 'model', owned_by: 'ddg' },
|
59 |
],
|
60 |
})
|
61 |
+
);
|
62 |
|
63 |
+
router.post(config.API_PREFIX + 'v1/chat/completions', (req) => handleCompletion(req));
|
64 |
|
65 |
async function handleCompletion(request) {
|
66 |
try {
|
67 |
+
const { model: inputModel, messages, stream: returnStream } = await request.json();
|
68 |
+
const model = convertModel(inputModel);
|
69 |
+
const content = messagesPrepare(messages);
|
70 |
+
return createCompletion(model, content, returnStream);
|
71 |
} catch (err) {
|
72 |
+
console.error('Handle Completion Error:', err);
|
73 |
+
return error(500, err.message);
|
74 |
}
|
75 |
}
|
76 |
|
77 |
async function createCompletion(model, content, returnStream, retryCount = 0) {
|
|
|
78 |
try {
|
79 |
+
const token = await requestToken();
|
80 |
+
const response = await fetch('https://duckduckgo.com/duckchat/v1/chat', {
|
81 |
method: 'POST',
|
82 |
headers: {
|
83 |
...config.FAKE_HEADERS,
|
|
|
|
|
84 |
'x-vqd-4': token,
|
85 |
},
|
86 |
body: JSON.stringify({
|
87 |
+
model,
|
88 |
+
messages: [{ role: 'user', content }],
|
|
|
|
|
|
|
|
|
|
|
89 |
}),
|
90 |
+
});
|
91 |
|
92 |
if (!response.ok) {
|
93 |
+
if (response.status === 418) {
|
94 |
+
console.warn('Rate limit hit (418), retrying...');
|
95 |
+
throw new Error('Rate limit exceeded');
|
96 |
+
}
|
97 |
+
throw new Error(`Create Completion error! status: ${response.status}, message: ${await response.text()}`);
|
98 |
}
|
99 |
+
|
100 |
+
return handlerStream(model, response.body, returnStream);
|
101 |
} catch (err) {
|
102 |
+
console.error('Create Completion Error:', err.message);
|
103 |
+
if (retryCount < config.MAX_RETRY_COUNT && (err.message.includes('Rate limit') || err.message.includes('418'))) {
|
104 |
+
console.log('Retrying... count', ++retryCount);
|
105 |
+
await new Promise((resolve) => setTimeout(resolve, config.RETRY_DELAY));
|
106 |
+
return createCompletion(model, content, returnStream, retryCount);
|
107 |
}
|
108 |
+
throw err;
|
109 |
}
|
110 |
}
|
111 |
|
112 |
+
async function handlerStream(model, body, returnStream) {
|
113 |
+
const reader = body.getReader();
|
114 |
+
const decoder = new TextDecoder();
|
115 |
+
const encoder = new TextEncoder();
|
116 |
+
let previousText = '';
|
|
|
|
|
|
|
|
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
const stream = new ReadableStream({
|
119 |
async start(controller) {
|
120 |
while (true) {
|
121 |
+
const { done, value } = await reader.read();
|
122 |
if (done) {
|
123 |
+
if (!returnStream) {
|
124 |
+
controller.enqueue(encoder.encode(JSON.stringify(newChatCompletionWithModel(previousText, model))));
|
125 |
+
}
|
126 |
+
return controller.close();
|
|
|
127 |
}
|
128 |
|
129 |
+
const chunk = decoder.decode(value).trim();
|
130 |
+
const lines = chunk.split('\n').filter((line) => line.startsWith('data: '));
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
+
for (const line of lines) {
|
133 |
+
const data = line.slice(6);
|
134 |
+
if (data === '[DONE]') {
|
135 |
+
if (returnStream) {
|
136 |
+
controller.enqueue(encoder.encode(`data: ${JSON.stringify(newStopChunkWithModel('stop', model))}\n\n`));
|
137 |
}
|
138 |
+
return controller.close();
|
139 |
+
}
|
140 |
|
141 |
+
try {
|
142 |
+
const parsed = JSON.parse(data);
|
143 |
+
if (parsed.message) {
|
144 |
+
previousText += parsed.message;
|
145 |
if (returnStream) {
|
146 |
controller.enqueue(
|
147 |
+
encoder.encode(`data: ${JSON.stringify(newChatCompletionChunkWithModel(parsed.message, model))}\n\n`)
|
148 |
+
);
|
149 |
}
|
150 |
}
|
151 |
+
} catch (err) {
|
152 |
+
console.error('Stream parse error:', err);
|
|
|
|
|
|
|
|
|
|
|
153 |
}
|
154 |
+
}
|
|
|
155 |
}
|
156 |
},
|
157 |
+
});
|
158 |
|
159 |
return new Response(stream, {
|
160 |
headers: {
|
161 |
'Content-Type': returnStream ? 'text/event-stream' : 'application/json',
|
162 |
+
'Cache-Control': 'no-cache',
|
163 |
+
'Connection': 'keep-alive',
|
164 |
},
|
165 |
+
});
|
166 |
}
|
167 |
|
168 |
function messagesPrepare(messages) {
|
169 |
+
return messages
|
170 |
+
.filter((msg) => ['user', 'assistant'].includes(msg.role))
|
171 |
+
.map((msg) => msg.content)
|
172 |
+
.join('\n');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
}
|
174 |
|
175 |
async function requestToken() {
|
176 |
try {
|
177 |
+
const response = await fetch('https://duckduckgo.com/duckchat/v1/status', {
|
178 |
method: 'GET',
|
179 |
+
headers: config.FAKE_HEADERS,
|
180 |
+
});
|
181 |
+
const token = response.headers.get('x-vqd-4');
|
182 |
+
if (!token) {
|
183 |
+
console.error('No x-vqd-4 token found in response headers');
|
184 |
+
throw new Error('Failed to retrieve x-vqd-4 token');
|
185 |
+
}
|
186 |
+
console.log('Token retrieved:', token);
|
187 |
+
return token;
|
188 |
+
} catch (err) {
|
189 |
+
console.error('Request token error:', err);
|
190 |
+
throw err;
|
191 |
}
|
192 |
}
|
193 |
|
194 |
function convertModel(inputModel) {
|
195 |
+
const modelMap = {
|
196 |
+
'claude-3-haiku': 'claude-3-haiku-20240307',
|
197 |
+
'llama-3.1-70b': 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
198 |
+
'mixtral-8x7b': 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
199 |
+
'o3-mini': 'o3-mini', // Fallback to default if unsupported
|
200 |
+
};
|
201 |
+
const selectedModel = modelMap[inputModel.toLowerCase()] || 'gpt-4o-mini';
|
202 |
+
console.log(`Converted model: ${inputModel} -> ${selectedModel}`);
|
203 |
+
return selectedModel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
}
|
205 |
|
206 |
function newChatCompletionChunkWithModel(text, model) {
|
207 |
return {
|
208 |
+
id: 'chatcmpl-' + Math.random().toString(36).slice(2),
|
209 |
object: 'chat.completion.chunk',
|
210 |
+
created: Math.floor(Date.now() / 1000),
|
211 |
model,
|
212 |
+
choices: [{ index: 0, delta: { content: text }, finish_reason: null }],
|
213 |
+
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
}
|
215 |
|
216 |
function newStopChunkWithModel(reason, model) {
|
217 |
return {
|
218 |
+
id: 'chatcmpl-' + Math.random().toString(36).slice(2),
|
219 |
object: 'chat.completion.chunk',
|
220 |
+
created: Math.floor(Date.now() / 1000),
|
221 |
model,
|
222 |
+
choices: [{ index: 0, finish_reason: reason }],
|
223 |
+
};
|
|
|
|
|
|
|
|
|
|
|
224 |
}
|
225 |
|
226 |
function newChatCompletionWithModel(text, model) {
|
227 |
return {
|
228 |
+
id: 'chatcmpl-' + Math.random().toString(36).slice(2),
|
229 |
object: 'chat.completion',
|
230 |
+
created: Math.floor(Date.now() / 1000),
|
231 |
model,
|
232 |
+
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
|
233 |
+
choices: [{ message: { content: text, role: 'assistant' }, index: 0 }],
|
234 |
+
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
}
|
236 |
|
|
|
|
|
237 |
(async () => {
|
238 |
+
if (typeof addEventListener === 'function') return;
|
239 |
+
const ittyServer = createServerAdapter(router.fetch);
|
240 |
+
console.log(`Listening on http://0.0.0.0:${config.PORT}`);
|
241 |
+
const httpServer = createServer(ittyServer);
|
242 |
+
httpServer.listen(config.PORT, '0.0.0.0');
|
243 |
+
})();
|
|
|
|
|
|