@ai-sdk-tool/proxy 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +172 -0
- package/dist/index.cjs +927 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +183 -0
- package/dist/index.d.ts +183 -0
- package/dist/index.js +882 -0
- package/dist/index.js.map +1 -0
- package/package.json +62 -0
package/README.md
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# @ai-sdk-tool/proxy
|
|
2
|
+
|
|
3
|
+
OpenAI-compatible proxy server for AI SDK tool middleware. This package allows you to expose AI SDK middleware-wrapped language models as standard OpenAI API endpoints, enabling tool calling capabilities for models that don't natively support them.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- 🔄 OpenAI-compatible `/v1/chat/completions` endpoint
|
|
8
|
+
- 🌊 Streaming and non-streaming responses
|
|
9
|
+
- 🛠️ Tool calling support for non-native models
|
|
10
|
+
- ⚡ Fast and lightweight Fastify server
|
|
11
|
+
- 🔧 Configurable CORS and server options
|
|
12
|
+
- 📦 Easy integration with existing AI SDK middleware
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pnpm add @ai-sdk-tool/proxy
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Quick Start
|
|
21
|
+
|
|
22
|
+
```typescript
|
|
23
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
24
|
+
import { gemmaToolMiddleware } from "@ai-sdk-tool/parser";
|
|
25
|
+
import { wrapLanguageModel } from "ai";
|
|
26
|
+
import { OpenAIProxyServer } from "@ai-sdk-tool/proxy";
|
|
27
|
+
import { z } from "zod";
|
|
28
|
+
|
|
29
|
+
// Create your language model with middleware
|
|
30
|
+
const baseModel = createOpenAICompatible({
|
|
31
|
+
name: "openrouter",
|
|
32
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
33
|
+
baseURL: "https://openrouter.ai/api/v1",
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
const wrappedModel = wrapLanguageModel({
|
|
37
|
+
model: baseModel("google/gemma-3-27b-it"),
|
|
38
|
+
middleware: gemmaToolMiddleware,
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
// Configure tools
|
|
42
|
+
const tools = {
|
|
43
|
+
get_weather: {
|
|
44
|
+
description: "Get the weather for a given city",
|
|
45
|
+
inputSchema: z.object({ city: z.string() }),
|
|
46
|
+
execute: ({ city }) => {
|
|
47
|
+
// Your weather API logic here
|
|
48
|
+
return { city, temperature: 22, condition: "sunny" };
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// Start the proxy server
|
|
54
|
+
const server = new OpenAIProxyServer({
|
|
55
|
+
model: wrappedModel,
|
|
56
|
+
port: 3000,
|
|
57
|
+
tools,
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
await server.start();
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Usage
|
|
64
|
+
|
|
65
|
+
Once the server is running, you can make standard OpenAI API calls to `http://localhost:3000/v1/chat/completions`:
|
|
66
|
+
|
|
67
|
+
```bash
|
|
68
|
+
curl -X POST http://localhost:3000/v1/chat/completions \
|
|
69
|
+
-H "Content-Type: application/json" \
|
|
70
|
+
-d '{
|
|
71
|
+
"model": "wrapped-model",
|
|
72
|
+
"messages": [
|
|
73
|
+
{"role": "user", "content": "What is the weather in New York?"}
|
|
74
|
+
],
|
|
75
|
+
"tools": [
|
|
76
|
+
{
|
|
77
|
+
"type": "function",
|
|
78
|
+
"function": {
|
|
79
|
+
"name": "get_weather",
|
|
80
|
+
"description": "Get the weather for a given city",
|
|
81
|
+
"parameters": {
|
|
82
|
+
"type": "object",
|
|
83
|
+
"properties": {
|
|
84
|
+
"city": {"type": "string"}
|
|
85
|
+
},
|
|
86
|
+
"required": ["city"]
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
],
|
|
91
|
+
"stream": false
|
|
92
|
+
}'
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Streaming
|
|
96
|
+
|
|
97
|
+
Enable streaming by setting `"stream": true` in your request:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
curl -X POST http://localhost:3000/v1/chat/completions \
|
|
101
|
+
-H "Content-Type: application/json" \
|
|
102
|
+
-d '{
|
|
103
|
+
"model": "wrapped-model",
|
|
104
|
+
"messages": [
|
|
105
|
+
{"role": "user", "content": "Tell me a story"}
|
|
106
|
+
],
|
|
107
|
+
"stream": true
|
|
108
|
+
}'
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## API Endpoints
|
|
112
|
+
|
|
113
|
+
- `POST /v1/chat/completions` - OpenAI-compatible chat completions
|
|
114
|
+
- `GET /v1/models` - List available models
|
|
115
|
+
- `GET /health` - Health check endpoint
|
|
116
|
+
|
|
117
|
+
## Configuration
|
|
118
|
+
|
|
119
|
+
```typescript
|
|
120
|
+
interface ProxyConfig {
|
|
121
|
+
model: LanguageModel; // Wrapped language model with middleware
|
|
122
|
+
port?: number; // Server port (default: 3000)
|
|
123
|
+
host?: string; // Server host (default: 'localhost')
|
|
124
|
+
cors?: boolean; // Enable CORS (default: true)
|
|
125
|
+
tools?: Record<string, AISDKTool>; // Available tools (server-registered)
|
|
126
|
+
maxSteps?: number; // Optional: maximum tool-calling steps (experimental)
|
|
127
|
+
logger?: { debug: Function; info: Function; warn: Function; error: Function }; // Optional structured logger
|
|
128
|
+
}
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Tool Definition
|
|
132
|
+
|
|
133
|
+
```typescript
|
|
134
|
+
interface AISDKTool {
|
|
135
|
+
description: string;
|
|
136
|
+
inputSchema: z.ZodTypeAny; // Zod schema for tool input
|
|
137
|
+
execute?: (params: unknown) => unknown | Promise<unknown>;
|
|
138
|
+
}
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Notes:
|
|
142
|
+
|
|
143
|
+
- Server merges request-provided tools (schema-only) with server tools (schema + optional execute). Server tools take precedence on name collision. Zod schemas are wrapped for provider compatibility internally.
|
|
144
|
+
|
|
145
|
+
## Testing
|
|
146
|
+
|
|
147
|
+
This package uses colocated unit tests next to source files. Key areas covered:
|
|
148
|
+
|
|
149
|
+
- Request conversion (OpenAI → AI SDK): `openai-request-converter.test.ts`, `openai-request-converter.normalize.test.ts`
|
|
150
|
+
- Result conversion (AI SDK → OpenAI): `response-converter.result.test.ts`
|
|
151
|
+
- Streaming conversion with stateful handling: `response-converter.stream.test.ts`
|
|
152
|
+
- SSE formatting: `response-converter.sse.test.ts`
|
|
153
|
+
|
|
154
|
+
Run tests:
|
|
155
|
+
|
|
156
|
+
```bash
|
|
157
|
+
pnpm --filter @ai-sdk-tool/proxy test
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
Vitest config includes `src/**/*.test.ts` and excludes `src/test/**` (legacy).
|
|
161
|
+
|
|
162
|
+
## Internals (Overview)
|
|
163
|
+
|
|
164
|
+
- OpenAI request → AI SDK params: `src/openai-request-converter.ts`
|
|
165
|
+
- AI SDK result/stream → OpenAI response/SSE: `src/response-converter.ts`
|
|
166
|
+
- SSE helpers: `createSSEResponse`, `createOpenAIStreamConverter(model)`
|
|
167
|
+
|
|
168
|
+
Each streaming request creates a single converter instance to maintain per-request state for correct `finish_reason` and tool-call handling.
|
|
169
|
+
|
|
170
|
+
## License
|
|
171
|
+
|
|
172
|
+
Apache-2.0
|