Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- README.md +11 -7
- src/routes/landingPageHtml.ts +4 -4
- src/schemas.ts +2 -2
README.md
CHANGED
@@ -13,19 +13,24 @@ app_port: 3000
|
|
13 |
|
14 |
# responses.js
|
15 |
|
16 |
-
A lightweight Express.js server that implements
|
|
|
|
|
|
|
|
|
17 |
|
18 |
## ✨ Features
|
19 |
|
20 |
- **ResponsesAPI**: Partial implementation of [OpenAI's Responses API](https://platform.openai.com/docs/api-reference/responses), on top of Chat Completion API
|
21 |
-
- **
|
22 |
- **Streaming Support**: Support for streamed responses
|
23 |
- **Structured Output**: Support for structured data responses (e.g. jsonschema)
|
24 |
- **Function Calling**: Tool and function calling capabilities
|
25 |
- **Multi-modal Input**: Text and image input support
|
|
|
26 |
- **Demo UI**: Interactive web interface for testing
|
27 |
|
28 |
-
Not implemented: remote function calling,
|
29 |
|
30 |
## 🚀 Quick Start
|
31 |
|
@@ -82,8 +87,8 @@ pnpm run example function_streaming
|
|
82 |
### Important Notes
|
83 |
|
84 |
- Server must be running (`pnpm dev`) on `http://localhost:3000`
|
85 |
-
- `
|
86 |
-
- Tests use real inference providers and
|
87 |
- Tests are not run in CI due to billing requirements
|
88 |
|
89 |
### Running Tests
|
@@ -112,7 +117,7 @@ Experience the API through our interactive web interface, adapted from the [open
|
|
112 |
```bash
|
113 |
# Create demo/.env
|
114 |
cat > demo/.env << EOF
|
115 |
-
MODEL="
|
116 |
OPENAI_BASE_URL=http://localhost:3000/v1
|
117 |
OPENAI_API_KEY=${HF_TOKEN:-<your-huggingface-token>}
|
118 |
EOF
|
@@ -200,4 +205,3 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
|
|
200 |
- Based on OpenAI's [Responses API specification](https://platform.openai.com/docs/api-reference/responses)
|
201 |
- Built on top of [OpenAI's nodejs client](https://github.com/openai/openai-node)
|
202 |
- Demo UI adapted from [openai-responses-starter-app](https://github.com/openai/openai-responses-starter-app)
|
203 |
-
- Built on top of [Hugging Face Inference Providers](https://huggingface.co/docs/inference-providers/index)
|
|
|
13 |
|
14 |
# responses.js
|
15 |
|
16 |
+
A lightweight Express.js server that implements a translation layer between the two main LLM APIs currently available. Works with any Chat Completion API, whether it's a local LLM or the cloud provider of your choice.
|
17 |
+
|
18 |
+
## 🎮 Live Demo
|
19 |
+
|
20 |
+
[**Try responses.js right now, no installation needed!**](https://huggingface.co/spaces/Wauplin/responses.js)
|
21 |
|
22 |
## ✨ Features
|
23 |
|
24 |
- **ResponsesAPI**: Partial implementation of [OpenAI's Responses API](https://platform.openai.com/docs/api-reference/responses), on top of Chat Completion API
|
25 |
+
- **Provider Agnostic**: Works with any Chat Completion API (local or remote)
|
26 |
- **Streaming Support**: Support for streamed responses
|
27 |
- **Structured Output**: Support for structured data responses (e.g. jsonschema)
|
28 |
- **Function Calling**: Tool and function calling capabilities
|
29 |
- **Multi-modal Input**: Text and image input support
|
30 |
+
- **Remote MCP**: Execute MCP tool calls remotely
|
31 |
- **Demo UI**: Interactive web interface for testing
|
32 |
|
33 |
+
Not implemented: remote function calling, file upload, stateful API, etc.
|
34 |
|
35 |
## 🚀 Quick Start
|
36 |
|
|
|
87 |
### Important Notes
|
88 |
|
89 |
- Server must be running (`pnpm dev`) on `http://localhost:3000`
|
90 |
+
- `API_KEY` environment variable set with your LLM provider's API key
|
91 |
+
- Tests use real inference providers and may incur costs
|
92 |
- Tests are not run in CI due to billing requirements
|
93 |
|
94 |
### Running Tests
|
|
|
117 |
```bash
|
118 |
# Create demo/.env
|
119 |
cat > demo/.env << EOF
|
120 |
+
MODEL="moonshotai/Kimi-K2-Instruct:groq"
|
121 |
OPENAI_BASE_URL=http://localhost:3000/v1
|
122 |
OPENAI_API_KEY=${HF_TOKEN:-<your-huggingface-token>}
|
123 |
EOF
|
|
|
205 |
- Based on OpenAI's [Responses API specification](https://platform.openai.com/docs/api-reference/responses)
|
206 |
- Built on top of [OpenAI's nodejs client](https://github.com/openai/openai-node)
|
207 |
- Demo UI adapted from [openai-responses-starter-app](https://github.com/openai/openai-responses-starter-app)
|
|
src/routes/landingPageHtml.ts
CHANGED
@@ -464,7 +464,7 @@ export function getLandingPageHtml(req: Request, res: Response): void {
|
|
464 |
<main>
|
465 |
<section class="hero">
|
466 |
<h2>OpenAI-compatible Responses API</h2>
|
467 |
-
<p><b>responses.js</b> is an open-source, lightweight
|
468 |
<div class="api-endpoint-box">
|
469 |
<button class="copy-endpoint-btn" onclick="copyEndpointUrl(this)">Copy</button>
|
470 |
<div><b>API Endpoint:</b></div>
|
@@ -479,13 +479,13 @@ export function getLandingPageHtml(req: Request, res: Response): void {
|
|
479 |
<b>OpenAI-compatible</b><br>Stateless implementation of the <a href="https://platform.openai.com/docs/api-reference/responses" target="_blank">Responses API</a>
|
480 |
</div>
|
481 |
<div class="feature-card">
|
482 |
-
<b>
|
483 |
</div>
|
484 |
<div class="feature-card">
|
485 |
-
<b>Multi-modal</b><br>
|
486 |
</div>
|
487 |
<div class="feature-card">
|
488 |
-
<b>
|
489 |
</div>
|
490 |
</div>
|
491 |
</section>
|
|
|
464 |
<main>
|
465 |
<section class="hero">
|
466 |
<h2>OpenAI-compatible Responses API</h2>
|
467 |
+
<p><b>responses.js</b> is an open-source, lightweight translation layer between the two main LLM APIs currently available. Works with any Chat Completion API, whether it's a local LLM or the cloud provider of your choice.</p>
|
468 |
<div class="api-endpoint-box">
|
469 |
<button class="copy-endpoint-btn" onclick="copyEndpointUrl(this)">Copy</button>
|
470 |
<div><b>API Endpoint:</b></div>
|
|
|
479 |
<b>OpenAI-compatible</b><br>Stateless implementation of the <a href="https://platform.openai.com/docs/api-reference/responses" target="_blank">Responses API</a>
|
480 |
</div>
|
481 |
<div class="feature-card">
|
482 |
+
<b>Provider Agnostic</b><br>Works with any Chat Completion API (local or remote).
|
483 |
</div>
|
484 |
<div class="feature-card">
|
485 |
+
<b>Multi-modal, streaming, structured output</b><br>Supports text and image inputs, streaming output, JSON schema, and function calling.
|
486 |
</div>
|
487 |
<div class="feature-card">
|
488 |
+
<b>Remote MCP</b><br>Server-side MCP tool execution.
|
489 |
</div>
|
490 |
</div>
|
491 |
</section>
|
src/schemas.ts
CHANGED
@@ -193,7 +193,7 @@ export const createResponseParamsSchema = z.object({
|
|
193 |
.union([
|
194 |
z.enum(["auto", "none", "required"]),
|
195 |
z.object({
|
196 |
-
type: z.
|
197 |
name: z.string(),
|
198 |
}),
|
199 |
// TODO: also hosted tool and MCP tool
|
@@ -206,7 +206,7 @@ export const createResponseParamsSchema = z.object({
|
|
206 |
name: z.string(),
|
207 |
parameters: z.record(z.any()),
|
208 |
strict: z.boolean().default(true),
|
209 |
-
type: z.
|
210 |
description: z.string().optional(),
|
211 |
}),
|
212 |
mcpServerParamsSchema,
|
|
|
193 |
.union([
|
194 |
z.enum(["auto", "none", "required"]),
|
195 |
z.object({
|
196 |
+
type: z.literal("function"),
|
197 |
name: z.string(),
|
198 |
}),
|
199 |
// TODO: also hosted tool and MCP tool
|
|
|
206 |
name: z.string(),
|
207 |
parameters: z.record(z.any()),
|
208 |
strict: z.boolean().default(true),
|
209 |
+
type: z.literal("function"),
|
210 |
description: z.string().optional(),
|
211 |
}),
|
212 |
mcpServerParamsSchema,
|