@agentloop/openai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +85 -0
- package/dist/index.cjs +364 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +13 -0
- package/dist/index.d.mts +13 -0
- package/dist/index.mjs +340 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +54 -0
package/README.md
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# @agentloop/openai
|
|
2
|
+
|
|
3
|
+
OpenAI provider for [`@agentloop/core`](../core).
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @agentloop/core @agentloop/openai openai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```ts
|
|
14
|
+
import { defineAgent } from "@agentloop/core";
|
|
15
|
+
import { createOpenAI } from "@agentloop/openai";
|
|
16
|
+
|
|
17
|
+
const provider = createOpenAI();
|
|
18
|
+
const model = provider.model("gpt-4o", { maxTokens: 4096 });
|
|
19
|
+
|
|
20
|
+
const agent = defineAgent({ model });
|
|
21
|
+
const result = await agent.run("Hello!");
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
### Provider configuration
|
|
25
|
+
|
|
26
|
+
```ts
|
|
27
|
+
const provider = createOpenAI({
|
|
28
|
+
apiKey: process.env.OPENAI_API_KEY, // defaults to OPENAI_API_KEY env var
|
|
29
|
+
baseUrl: "https://custom-endpoint.example.com",
|
|
30
|
+
timeout: 30_000,
|
|
31
|
+
maxRetries: 3,
|
|
32
|
+
headers: { "X-Custom": "value" },
|
|
33
|
+
});
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Reasoning models
|
|
37
|
+
|
|
38
|
+
Configure reasoning effort for o1/o3/gpt-5 models:
|
|
39
|
+
|
|
40
|
+
```ts
|
|
41
|
+
import type { OpenAIModelConfig } from "@agentloop/openai";
|
|
42
|
+
|
|
43
|
+
const model = provider.model("o3", {
|
|
44
|
+
maxTokens: 16_000,
|
|
45
|
+
reasoningEffort: "high",
|
|
46
|
+
} satisfies OpenAIModelConfig);
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Structured output
|
|
50
|
+
|
|
51
|
+
Structured output works via OpenAI's strict JSON Schema mode:
|
|
52
|
+
|
|
53
|
+
```ts
|
|
54
|
+
import { z } from "zod";
|
|
55
|
+
|
|
56
|
+
const result = await agent.run("Summarize this text.", {
|
|
57
|
+
output: z.object({
|
|
58
|
+
summary: z.string(),
|
|
59
|
+
topics: z.array(z.string()),
|
|
60
|
+
}),
|
|
61
|
+
});
|
|
62
|
+
console.log(result.object);
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## Features
|
|
66
|
+
|
|
67
|
+
- Full streaming with usage tracking
|
|
68
|
+
- Reasoning model support with configurable effort
|
|
69
|
+
- Tool calling with schema validation
|
|
70
|
+
- Structured output via strict JSON Schema
|
|
71
|
+
- Token usage aggregation
|
|
72
|
+
- Cooperative cancellation via AbortSignal
|
|
73
|
+
|
|
74
|
+
## Exports
|
|
75
|
+
|
|
76
|
+
- `createOpenAI(config?: ProviderConfig): Provider` — create an OpenAI provider
|
|
77
|
+
- `OpenAIModelConfig` — model config type with `reasoningEffort` option
|
|
78
|
+
|
|
79
|
+
## Peer dependencies
|
|
80
|
+
|
|
81
|
+
- `openai` ^6.32.0
|
|
82
|
+
|
|
83
|
+
## License
|
|
84
|
+
|
|
85
|
+
MIT
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
|
|
2
|
+
//#region \0rolldown/runtime.js
|
|
3
|
+
var __create = Object.create;
|
|
4
|
+
var __defProp = Object.defineProperty;
|
|
5
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
6
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
7
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
8
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
9
|
+
var __copyProps = (to, from, except, desc) => {
|
|
10
|
+
if (from && typeof from === "object" || typeof from === "function") for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
|
|
11
|
+
key = keys[i];
|
|
12
|
+
if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, {
|
|
13
|
+
get: ((k) => from[k]).bind(null, key),
|
|
14
|
+
enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
return to;
|
|
18
|
+
};
|
|
19
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", {
|
|
20
|
+
value: mod,
|
|
21
|
+
enumerable: true
|
|
22
|
+
}) : target, mod));
|
|
23
|
+
//#endregion
|
|
24
|
+
let openai = require("openai");
|
|
25
|
+
openai = __toESM(openai);
|
|
26
|
+
let _agentloop_core = require("@agentloop/core");
|
|
27
|
+
//#region src/convert.ts
|
|
28
|
+
/** Map an OpenAI finish reason to a core {@link FinishReason}. */
|
|
29
|
+
function mapFinishReason(reason) {
|
|
30
|
+
switch (reason) {
|
|
31
|
+
case "stop": return "stop";
|
|
32
|
+
case "length": return "length";
|
|
33
|
+
case "tool_calls": return "tool_call";
|
|
34
|
+
case "content_filter": return "refused";
|
|
35
|
+
case "function_call": return "tool_call";
|
|
36
|
+
default: return "unknown";
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
/** Map OpenAI usage to a core {@link Usage}. */
|
|
40
|
+
function mapUsage(usage) {
|
|
41
|
+
if (usage === void 0 || usage === null) return {
|
|
42
|
+
inputTokens: 0,
|
|
43
|
+
outputTokens: 0,
|
|
44
|
+
totalTokens: 0
|
|
45
|
+
};
|
|
46
|
+
return {
|
|
47
|
+
inputTokens: usage.prompt_tokens,
|
|
48
|
+
outputTokens: usage.completion_tokens,
|
|
49
|
+
totalTokens: usage.total_tokens,
|
|
50
|
+
cacheReadTokens: usage.prompt_tokens_details?.cached_tokens ?? void 0
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
/** Convert core {@link ToolDefinition} array to OpenAI tool format. */
|
|
54
|
+
function convertToolDefs(tools) {
|
|
55
|
+
return tools.map((t) => ({
|
|
56
|
+
type: "function",
|
|
57
|
+
function: {
|
|
58
|
+
name: t.name,
|
|
59
|
+
description: t.description,
|
|
60
|
+
parameters: (0, _agentloop_core.schemaToJsonSchema)(t.schema)
|
|
61
|
+
}
|
|
62
|
+
}));
|
|
63
|
+
}
|
|
64
|
+
const IMAGE_TYPES = new Set([
|
|
65
|
+
"image/jpeg",
|
|
66
|
+
"image/png",
|
|
67
|
+
"image/gif",
|
|
68
|
+
"image/webp"
|
|
69
|
+
]);
|
|
70
|
+
/** Convert a core {@link UserContent} part to an OpenAI content part. */
|
|
71
|
+
function convertUserPart(part) {
|
|
72
|
+
switch (part.type) {
|
|
73
|
+
case "text": return {
|
|
74
|
+
type: "text",
|
|
75
|
+
text: part.text
|
|
76
|
+
};
|
|
77
|
+
case "json": return {
|
|
78
|
+
type: "text",
|
|
79
|
+
text: JSON.stringify(part.json)
|
|
80
|
+
};
|
|
81
|
+
case "blob": return convertBlob(part);
|
|
82
|
+
case "url": return convertUrl(part);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
/** Convert a core {@link BlobPart} to an OpenAI content part. */
|
|
86
|
+
function convertBlob(part) {
|
|
87
|
+
const data = typeof part.data === "string" ? part.data : uint8ToBase64(part.data);
|
|
88
|
+
if (IMAGE_TYPES.has(part.mediaType)) return {
|
|
89
|
+
type: "image_url",
|
|
90
|
+
image_url: { url: `data:${part.mediaType};base64,${data}` }
|
|
91
|
+
};
|
|
92
|
+
if (part.mediaType === "audio/wav" || part.mediaType === "audio/mp3") return {
|
|
93
|
+
type: "input_audio",
|
|
94
|
+
input_audio: {
|
|
95
|
+
data,
|
|
96
|
+
format: part.mediaType === "audio/wav" ? "wav" : "mp3"
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
return {
|
|
100
|
+
type: "file",
|
|
101
|
+
file: { file_data: `data:${part.mediaType};base64,${data}` }
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
/** Convert a core {@link URLPart} to an OpenAI content part. */
|
|
105
|
+
function convertUrl(part) {
|
|
106
|
+
if (part.mediaType !== void 0 ? IMAGE_TYPES.has(part.mediaType) : /\.(jpe?g|png|gif|webp)(\?|$)/i.test(part.url)) return {
|
|
107
|
+
type: "image_url",
|
|
108
|
+
image_url: { url: part.url }
|
|
109
|
+
};
|
|
110
|
+
return {
|
|
111
|
+
type: "file",
|
|
112
|
+
file: { file_data: part.url }
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* Convert a core {@link Message} array into OpenAI's Chat Completions format.
|
|
117
|
+
*
|
|
118
|
+
* System messages become `{ role: "system" }`. Tool messages are split into
|
|
119
|
+
* one `{ role: "tool" }` message per tool result (OpenAI requires one per
|
|
120
|
+
* `tool_call_id`).
|
|
121
|
+
*/
|
|
122
|
+
function convertMessages(messages) {
|
|
123
|
+
const out = [];
|
|
124
|
+
for (const msg of messages) switch (msg.role) {
|
|
125
|
+
case "system":
|
|
126
|
+
out.push({
|
|
127
|
+
role: "system",
|
|
128
|
+
content: msg.content.map((p) => ({
|
|
129
|
+
type: "text",
|
|
130
|
+
text: p.text
|
|
131
|
+
}))
|
|
132
|
+
});
|
|
133
|
+
break;
|
|
134
|
+
case "user":
|
|
135
|
+
out.push({
|
|
136
|
+
role: "user",
|
|
137
|
+
content: msg.content.map(convertUserPart)
|
|
138
|
+
});
|
|
139
|
+
break;
|
|
140
|
+
case "assistant":
|
|
141
|
+
out.push(convertAssistantMessage(msg.content));
|
|
142
|
+
break;
|
|
143
|
+
case "tool":
|
|
144
|
+
for (const result of msg.content) out.push(convertToolResult(result));
|
|
145
|
+
break;
|
|
146
|
+
}
|
|
147
|
+
return out;
|
|
148
|
+
}
|
|
149
|
+
/** Convert core assistant content to an OpenAI assistant message. */
|
|
150
|
+
function convertAssistantMessage(content) {
|
|
151
|
+
let textContent = "";
|
|
152
|
+
const toolCalls = [];
|
|
153
|
+
for (const part of content) switch (part.type) {
|
|
154
|
+
case "text":
|
|
155
|
+
if (textContent) textContent += "\n";
|
|
156
|
+
textContent += part.text;
|
|
157
|
+
break;
|
|
158
|
+
case "thinking": break;
|
|
159
|
+
case "tool_call":
|
|
160
|
+
toolCalls.push({
|
|
161
|
+
id: part.id,
|
|
162
|
+
type: "function",
|
|
163
|
+
function: {
|
|
164
|
+
name: part.name,
|
|
165
|
+
arguments: JSON.stringify(part.arguments)
|
|
166
|
+
}
|
|
167
|
+
});
|
|
168
|
+
break;
|
|
169
|
+
}
|
|
170
|
+
const msg = {
|
|
171
|
+
role: "assistant",
|
|
172
|
+
content: textContent || null
|
|
173
|
+
};
|
|
174
|
+
if (toolCalls.length > 0) msg.tool_calls = toolCalls;
|
|
175
|
+
return msg;
|
|
176
|
+
}
|
|
177
|
+
/** Convert a core tool result to an OpenAI tool message. */
|
|
178
|
+
function convertToolResult(part) {
|
|
179
|
+
let content = "";
|
|
180
|
+
for (const item of part.output) switch (item.type) {
|
|
181
|
+
case "text":
|
|
182
|
+
content += item.text;
|
|
183
|
+
break;
|
|
184
|
+
case "json":
|
|
185
|
+
content += JSON.stringify(item.json);
|
|
186
|
+
break;
|
|
187
|
+
default:
|
|
188
|
+
content += `[${item.type}]`;
|
|
189
|
+
break;
|
|
190
|
+
}
|
|
191
|
+
return {
|
|
192
|
+
role: "tool",
|
|
193
|
+
tool_call_id: part.id,
|
|
194
|
+
content
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
/** Create a fresh {@link StreamState}. */
|
|
198
|
+
function createStreamState() {
|
|
199
|
+
return {
|
|
200
|
+
textStarted: false,
|
|
201
|
+
toolCalls: /* @__PURE__ */ new Map()
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
/**
|
|
205
|
+
* Map a single OpenAI {@link ChatCompletionChunk} to zero or more core
|
|
206
|
+
* {@link StreamPart} values.
|
|
207
|
+
*
|
|
208
|
+
* OpenAI chunks don't have explicit start/end events. We infer them:
|
|
209
|
+
* - First text delta → emit `text_start` then `text_delta`
|
|
210
|
+
* - First delta for a tool call index → emit `tool_call_start`
|
|
211
|
+
* - `finish_reason` present → emit `text_end`/`tool_call_end` + `finish`
|
|
212
|
+
*/
|
|
213
|
+
function mapChunk(chunk, state) {
|
|
214
|
+
const parts = [];
|
|
215
|
+
const choice = chunk.choices[0];
|
|
216
|
+
if (choice === void 0) {
|
|
217
|
+
if (chunk.usage) parts.push({
|
|
218
|
+
type: "finish",
|
|
219
|
+
finishReason: "stop",
|
|
220
|
+
usage: mapUsage(chunk.usage)
|
|
221
|
+
});
|
|
222
|
+
return parts;
|
|
223
|
+
}
|
|
224
|
+
const delta = choice.delta;
|
|
225
|
+
if (delta.content) {
|
|
226
|
+
if (!state.textStarted) {
|
|
227
|
+
state.textStarted = true;
|
|
228
|
+
parts.push({ type: "text_start" });
|
|
229
|
+
}
|
|
230
|
+
parts.push({
|
|
231
|
+
type: "text_delta",
|
|
232
|
+
text: delta.content
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
if (delta.tool_calls != null) for (const tc of delta.tool_calls) {
|
|
236
|
+
if (state.toolCalls.get(tc.index) === void 0) {
|
|
237
|
+
const id = tc.id ?? "";
|
|
238
|
+
const name = tc.function?.name ?? "";
|
|
239
|
+
state.toolCalls.set(tc.index, {
|
|
240
|
+
id,
|
|
241
|
+
name
|
|
242
|
+
});
|
|
243
|
+
parts.push({
|
|
244
|
+
type: "tool_call_start",
|
|
245
|
+
id,
|
|
246
|
+
name
|
|
247
|
+
});
|
|
248
|
+
}
|
|
249
|
+
if (tc.function?.arguments != null) {
|
|
250
|
+
const info = state.toolCalls.get(tc.index);
|
|
251
|
+
parts.push({
|
|
252
|
+
type: "tool_call_delta",
|
|
253
|
+
id: info.id,
|
|
254
|
+
args: tc.function.arguments
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
if (choice.finish_reason != null) {
|
|
259
|
+
if (state.textStarted) {
|
|
260
|
+
parts.push({ type: "text_end" });
|
|
261
|
+
state.textStarted = false;
|
|
262
|
+
}
|
|
263
|
+
for (const [, info] of state.toolCalls) parts.push({
|
|
264
|
+
type: "tool_call_end",
|
|
265
|
+
id: info.id
|
|
266
|
+
});
|
|
267
|
+
state.toolCalls.clear();
|
|
268
|
+
parts.push({
|
|
269
|
+
type: "finish",
|
|
270
|
+
finishReason: mapFinishReason(choice.finish_reason),
|
|
271
|
+
usage: mapUsage(chunk.usage)
|
|
272
|
+
});
|
|
273
|
+
}
|
|
274
|
+
return parts;
|
|
275
|
+
}
|
|
276
|
+
/** Convert a Uint8Array to a base64 string. */
|
|
277
|
+
function uint8ToBase64(data) {
|
|
278
|
+
return Buffer.from(data).toString("base64");
|
|
279
|
+
}
|
|
280
|
+
//#endregion
|
|
281
|
+
//#region src/provider.ts
|
|
282
|
+
/** Create an OpenAI {@link Provider}. */
|
|
283
|
+
function createOpenAI(config) {
|
|
284
|
+
const client = new openai.default({
|
|
285
|
+
apiKey: config?.apiKey,
|
|
286
|
+
baseURL: config?.baseUrl,
|
|
287
|
+
timeout: config?.timeout,
|
|
288
|
+
maxRetries: config?.maxRetries,
|
|
289
|
+
defaultHeaders: config?.headers
|
|
290
|
+
});
|
|
291
|
+
return { model(name, defaults) {
|
|
292
|
+
const modelDefaults = defaults;
|
|
293
|
+
return {
|
|
294
|
+
name,
|
|
295
|
+
stream(options) {
|
|
296
|
+
const callConfig = options.config;
|
|
297
|
+
const params = {
|
|
298
|
+
model: name,
|
|
299
|
+
messages: convertMessages(options.messages),
|
|
300
|
+
stream: true,
|
|
301
|
+
stream_options: { include_usage: true }
|
|
302
|
+
};
|
|
303
|
+
const maxTokens = callConfig?.maxTokens ?? modelDefaults?.maxTokens;
|
|
304
|
+
if (maxTokens !== void 0) params.max_completion_tokens = maxTokens;
|
|
305
|
+
const reasoningEffort = callConfig?.reasoningEffort ?? modelDefaults?.reasoningEffort;
|
|
306
|
+
if (reasoningEffort === void 0) {
|
|
307
|
+
const temperature = callConfig?.temperature ?? modelDefaults?.temperature;
|
|
308
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
309
|
+
const topP = callConfig?.topP ?? modelDefaults?.topP;
|
|
310
|
+
if (topP !== void 0) params.top_p = topP;
|
|
311
|
+
}
|
|
312
|
+
if (reasoningEffort !== void 0) params.reasoning_effort = reasoningEffort;
|
|
313
|
+
const stopSequences = callConfig?.stopSequences ?? modelDefaults?.stopSequences;
|
|
314
|
+
if (stopSequences !== void 0) params.stop = stopSequences;
|
|
315
|
+
if (options.tools !== void 0 && options.tools.length > 0) params.tools = convertToolDefs(options.tools);
|
|
316
|
+
if (options.output !== void 0) params.response_format = {
|
|
317
|
+
type: "json_schema",
|
|
318
|
+
json_schema: {
|
|
319
|
+
name: "output",
|
|
320
|
+
schema: ensureStrictObjects((0, _agentloop_core.schemaToJsonSchema)(options.output)),
|
|
321
|
+
strict: true
|
|
322
|
+
}
|
|
323
|
+
};
|
|
324
|
+
return streamOpenAI(client, params, options.signal);
|
|
325
|
+
}
|
|
326
|
+
};
|
|
327
|
+
} };
|
|
328
|
+
}
|
|
329
|
+
/**
|
|
330
|
+
* Stream an OpenAI chat completion, converting chunks to core {@link StreamPart} values.
|
|
331
|
+
*/
|
|
332
|
+
async function* streamOpenAI(client, params, signal) {
|
|
333
|
+
const state = createStreamState();
|
|
334
|
+
const response = await client.chat.completions.create(params, { signal });
|
|
335
|
+
for await (const chunk of response) {
|
|
336
|
+
const parts = mapChunk(chunk, state);
|
|
337
|
+
for (const part of parts) yield part;
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
/**
|
|
341
|
+
* Recursively add `additionalProperties: false` to all object schemas.
|
|
342
|
+
* OpenAI's structured output requires this when `strict: true`.
|
|
343
|
+
*/
|
|
344
|
+
function ensureStrictObjects(schema) {
|
|
345
|
+
if (schema.type === "object") {
|
|
346
|
+
schema.additionalProperties = false;
|
|
347
|
+
if (schema.properties !== null && typeof schema.properties === "object") {
|
|
348
|
+
for (const value of Object.values(schema.properties)) if (value !== null && typeof value === "object") ensureStrictObjects(value);
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
for (const key of [
|
|
352
|
+
"anyOf",
|
|
353
|
+
"oneOf",
|
|
354
|
+
"allOf"
|
|
355
|
+
]) if (Array.isArray(schema[key])) for (const item of schema[key]) ensureStrictObjects(item);
|
|
356
|
+
if (schema.$defs !== null && typeof schema.$defs === "object") {
|
|
357
|
+
for (const value of Object.values(schema.$defs)) if (value !== null && typeof value === "object") ensureStrictObjects(value);
|
|
358
|
+
}
|
|
359
|
+
return schema;
|
|
360
|
+
}
|
|
361
|
+
//#endregion
|
|
362
|
+
exports.createOpenAI = createOpenAI;
|
|
363
|
+
|
|
364
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","names":["OpenAI"],"sources":["../src/convert.ts","../src/provider.ts"],"sourcesContent":["import type OpenAI from \"openai\";\nimport type {\n BlobPart,\n URLPart,\n AssistantContent,\n UserContent,\n ToolResultPart,\n Message,\n FinishReason,\n Usage,\n ToolDefinition,\n StreamPart,\n} from \"@agentloop/core\";\nimport { schemaToJsonSchema } from \"@agentloop/core\";\n\ntype OAIMessage = OpenAI.ChatCompletionMessageParam;\ntype OAITool = OpenAI.ChatCompletionTool;\ntype OAIChunk = OpenAI.ChatCompletionChunk;\ntype OAIContentPart = OpenAI.ChatCompletionContentPart;\n\n/** Map an OpenAI finish reason to a core {@link FinishReason}. */\nexport function mapFinishReason(\n reason: \"stop\" | \"length\" | \"tool_calls\" | \"content_filter\" | \"function_call\" | null,\n): FinishReason {\n switch (reason) {\n case \"stop\":\n return \"stop\";\n case \"length\":\n return \"length\";\n case \"tool_calls\":\n return \"tool_call\";\n case \"content_filter\":\n return \"refused\";\n case \"function_call\":\n return \"tool_call\";\n case null:\n default:\n return \"unknown\";\n }\n}\n\n/** Map OpenAI usage to a core {@link Usage}. */\nexport function mapUsage(usage: OpenAI.CompletionUsage | undefined | null): Usage {\n if (usage === undefined || usage === null) {\n return { inputTokens: 0, outputTokens: 0, totalTokens: 0 };\n }\n return {\n inputTokens: usage.prompt_tokens,\n outputTokens: usage.completion_tokens,\n totalTokens: usage.total_tokens,\n cacheReadTokens: usage.prompt_tokens_details?.cached_tokens ?? undefined,\n };\n}\n\n/** Convert core {@link ToolDefinition} array to OpenAI tool format. */\nexport function convertToolDefs(tools: ToolDefinition[]): OAITool[] {\n return tools.map((t) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: t.description,\n parameters: schemaToJsonSchema(t.schema) as OpenAI.FunctionParameters,\n },\n }));\n}\n\nconst IMAGE_TYPES = new Set([\"image/jpeg\", \"image/png\", \"image/gif\", \"image/webp\"]);\n\n/** Convert a core {@link UserContent} part to an OpenAI content part. */\nfunction convertUserPart(part: UserContent): OAIContentPart {\n switch (part.type) {\n case \"text\":\n return { type: \"text\", text: part.text };\n\n case \"json\":\n return { type: \"text\", text: JSON.stringify(part.json) };\n\n case \"blob\":\n return convertBlob(part);\n\n case \"url\":\n return convertUrl(part);\n }\n}\n\n/** Convert a core {@link BlobPart} to an OpenAI content part. */\nfunction convertBlob(part: BlobPart): OAIContentPart {\n const data = typeof part.data === \"string\" ? part.data : uint8ToBase64(part.data);\n\n if (IMAGE_TYPES.has(part.mediaType)) {\n return {\n type: \"image_url\",\n image_url: { url: `data:${part.mediaType};base64,${data}` },\n };\n }\n\n if (part.mediaType === \"audio/wav\" || part.mediaType === \"audio/mp3\") {\n const format = part.mediaType === \"audio/wav\" ? \"wav\" : \"mp3\";\n return { type: \"input_audio\", input_audio: { data, format } };\n }\n\n return {\n type: \"file\",\n file: { file_data: `data:${part.mediaType};base64,${data}` },\n } as OAIContentPart;\n}\n\n/** Convert a core {@link URLPart} to an OpenAI content part. */\nfunction convertUrl(part: URLPart): OAIContentPart {\n const isImage =\n part.mediaType !== undefined\n ? IMAGE_TYPES.has(part.mediaType)\n : /\\.(jpe?g|png|gif|webp)(\\?|$)/i.test(part.url);\n\n if (isImage) {\n return { type: \"image_url\", image_url: { url: part.url } };\n }\n\n return {\n type: \"file\",\n file: { file_data: part.url },\n } as OAIContentPart;\n}\n\n/**\n * Convert a core {@link Message} array into OpenAI's Chat Completions format.\n *\n * System messages become `{ role: \"system\" }`. Tool messages are split into\n * one `{ role: \"tool\" }` message per tool result (OpenAI requires one per\n * `tool_call_id`).\n */\nexport function convertMessages(messages: Message[]): OAIMessage[] {\n const out: OAIMessage[] = [];\n\n for (const msg of messages) {\n switch (msg.role) {\n case \"system\":\n out.push({\n role: \"system\",\n content: msg.content.map((p) => ({ type: \"text\" as const, text: p.text })),\n });\n break;\n\n case \"user\":\n out.push({\n role: \"user\",\n content: msg.content.map(convertUserPart),\n });\n break;\n\n case \"assistant\":\n out.push(convertAssistantMessage(msg.content));\n break;\n\n case \"tool\":\n // OpenAI requires one tool message per tool_call_id.\n for (const result of msg.content) {\n out.push(convertToolResult(result));\n }\n break;\n }\n }\n\n return out;\n}\n\n/** Convert core assistant content to an OpenAI assistant message. */\nfunction convertAssistantMessage(\n content: AssistantContent[],\n): OpenAI.ChatCompletionAssistantMessageParam {\n let textContent = \"\";\n const toolCalls: OpenAI.ChatCompletionMessageToolCall[] = [];\n\n for (const part of content) {\n switch (part.type) {\n case \"text\":\n if (textContent) textContent += \"\\n\";\n textContent += part.text;\n break;\n\n case \"thinking\":\n // OpenAI Chat Completions doesn't support thinking blocks — skip.\n break;\n\n case \"tool_call\":\n toolCalls.push({\n id: part.id,\n type: \"function\",\n function: {\n name: part.name,\n arguments: JSON.stringify(part.arguments),\n },\n });\n break;\n }\n }\n\n const msg: OpenAI.ChatCompletionAssistantMessageParam = {\n role: \"assistant\",\n content: textContent || null,\n };\n\n if (toolCalls.length > 0) msg.tool_calls = toolCalls;\n\n return msg;\n}\n\n/** Convert a core tool result to an OpenAI tool message. */\nfunction convertToolResult(part: ToolResultPart): OpenAI.ChatCompletionToolMessageParam {\n let content = \"\";\n for (const item of part.output) {\n switch (item.type) {\n case \"text\":\n content += item.text;\n break;\n case \"json\":\n content += JSON.stringify(item.json);\n break;\n default:\n content += `[${item.type}]`;\n break;\n }\n }\n return { role: \"tool\", tool_call_id: part.id, content };\n}\n\n/**\n * Tracks state during streaming to synthesize start/end events that\n * OpenAI's chunk format doesn't provide explicitly.\n */\nexport interface StreamState {\n textStarted: boolean;\n toolCalls: Map<number, { id: string; name: string }>;\n}\n\n/** Create a fresh {@link StreamState}. */\nexport function createStreamState(): StreamState {\n return { textStarted: false, toolCalls: new Map() };\n}\n\n/**\n * Map a single OpenAI {@link ChatCompletionChunk} to zero or more core\n * {@link StreamPart} values.\n *\n * OpenAI chunks don't have explicit start/end events. We infer them:\n * - First text delta → emit `text_start` then `text_delta`\n * - First delta for a tool call index → emit `tool_call_start`\n * - `finish_reason` present → emit `text_end`/`tool_call_end` + `finish`\n */\nexport function mapChunk(chunk: OAIChunk, state: StreamState): StreamPart[] {\n const parts: StreamPart[] = [];\n const choice = chunk.choices[0];\n\n if (choice === undefined) {\n if (chunk.usage) {\n parts.push({\n type: \"finish\",\n finishReason: \"stop\",\n usage: mapUsage(chunk.usage),\n });\n }\n return parts;\n }\n\n const delta = choice.delta;\n\n if (delta.content) {\n if (!state.textStarted) {\n state.textStarted = true;\n parts.push({ type: \"text_start\" });\n }\n parts.push({ type: \"text_delta\", text: delta.content });\n }\n\n if (delta.tool_calls != null) {\n for (const tc of delta.tool_calls) {\n const existing = state.toolCalls.get(tc.index);\n\n if (existing === undefined) {\n const id = tc.id ?? \"\";\n const name = tc.function?.name ?? \"\";\n state.toolCalls.set(tc.index, { id, name });\n parts.push({ type: \"tool_call_start\", id, name });\n }\n\n if (tc.function?.arguments != null) {\n const info = state.toolCalls.get(tc.index)!;\n parts.push({\n type: \"tool_call_delta\",\n id: info.id,\n args: tc.function.arguments,\n });\n }\n }\n }\n\n if (choice.finish_reason != null) {\n if (state.textStarted) {\n parts.push({ type: \"text_end\" });\n state.textStarted = false;\n }\n\n for (const [, info] of state.toolCalls) {\n parts.push({ type: \"tool_call_end\", id: info.id });\n }\n state.toolCalls.clear();\n\n parts.push({\n type: \"finish\",\n finishReason: mapFinishReason(choice.finish_reason),\n usage: mapUsage(chunk.usage),\n });\n }\n\n return parts;\n}\n\n/** Convert a Uint8Array to a base64 string. */\nfunction uint8ToBase64(data: Uint8Array): string {\n return Buffer.from(data).toString(\"base64\");\n}\n","import OpenAI from \"openai\";\nimport type {\n Model,\n ModelConfig,\n Provider,\n ProviderConfig,\n StreamPart,\n Schema,\n Message,\n ToolDefinition,\n} from \"@agentloop/core\";\nimport { schemaToJsonSchema } from \"@agentloop/core\";\nimport { convertMessages, convertToolDefs, createStreamState, mapChunk } from \"./convert.ts\";\n\n/** OpenAI-specific model configuration, extending core {@link ModelConfig}. */\nexport interface OpenAIModelConfig extends ModelConfig {\n /** Reasoning effort for o1/o3/gpt-5 models. */\n reasoningEffort?: \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\n/** Create an OpenAI {@link Provider}. */\nexport function createOpenAI(config?: ProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config?.apiKey,\n baseURL: config?.baseUrl,\n timeout: config?.timeout,\n maxRetries: config?.maxRetries,\n defaultHeaders: config?.headers,\n });\n\n return {\n model(name: string, defaults?: ModelConfig): Model {\n const modelDefaults = defaults as OpenAIModelConfig | undefined;\n\n return {\n name,\n stream(options: {\n messages: Message[];\n tools?: ToolDefinition[];\n config?: ModelConfig;\n output?: Schema;\n signal?: AbortSignal;\n }): AsyncIterable<StreamPart> {\n const callConfig = options.config as OpenAIModelConfig | undefined;\n\n const messages = convertMessages(options.messages);\n\n const params: OpenAI.ChatCompletionCreateParamsStreaming = {\n model: name,\n messages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = callConfig?.maxTokens ?? modelDefaults?.maxTokens;\n if (maxTokens !== undefined) params.max_completion_tokens = maxTokens;\n\n const reasoningEffort = callConfig?.reasoningEffort ?? modelDefaults?.reasoningEffort;\n if (reasoningEffort === undefined) {\n const temperature = callConfig?.temperature ?? modelDefaults?.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const topP = callConfig?.topP ?? modelDefaults?.topP;\n if (topP !== undefined) params.top_p = topP;\n }\n\n if (reasoningEffort !== undefined) {\n params.reasoning_effort = reasoningEffort;\n }\n\n const stopSequences = callConfig?.stopSequences ?? modelDefaults?.stopSequences;\n if (stopSequences !== undefined) params.stop = stopSequences;\n\n if (options.tools !== undefined && options.tools.length > 0) {\n params.tools = convertToolDefs(options.tools);\n }\n\n if (options.output !== undefined) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"output\",\n schema: ensureStrictObjects(schemaToJsonSchema(options.output)),\n strict: true,\n },\n };\n }\n\n return streamOpenAI(client, params, options.signal);\n },\n };\n },\n };\n}\n\n/**\n * Stream an OpenAI chat completion, converting chunks to core {@link StreamPart} values.\n */\nasync function* streamOpenAI(\n client: OpenAI,\n params: OpenAI.ChatCompletionCreateParamsStreaming,\n signal?: AbortSignal,\n): AsyncGenerator<StreamPart> {\n const state = createStreamState();\n\n const response = await client.chat.completions.create(params, {\n signal,\n });\n\n for await (const chunk of response) {\n const parts = mapChunk(chunk, state);\n for (const part of parts) {\n yield part;\n }\n }\n}\n\n/**\n * Recursively add `additionalProperties: false` to all object schemas.\n * OpenAI's structured output requires this when `strict: true`.\n */\nfunction ensureStrictObjects(schema: Record<string, unknown>): Record<string, unknown> {\n if (schema.type === \"object\") {\n schema.additionalProperties = false;\n if (schema.properties !== null && typeof schema.properties === \"object\") {\n for (const value of Object.values(schema.properties as Record<string, unknown>)) {\n if (value !== null && typeof value === \"object\") {\n ensureStrictObjects(value as Record<string, unknown>);\n }\n }\n }\n }\n for (const key of [\"anyOf\", \"oneOf\", \"allOf\"] as const) {\n if (Array.isArray(schema[key])) {\n for (const item of schema[key] as Record<string, unknown>[]) {\n ensureStrictObjects(item);\n }\n }\n }\n if (schema.$defs !== null && typeof schema.$defs === \"object\") {\n for (const value of Object.values(schema.$defs as Record<string, unknown>)) {\n if (value !== null && typeof value === \"object\") {\n ensureStrictObjects(value as Record<string, unknown>);\n }\n }\n }\n return schema;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqBA,SAAgB,gBACd,QACc;AACd,SAAQ,QAAR;EACE,KAAK,OACH,QAAO;EACT,KAAK,SACH,QAAO;EACT,KAAK,aACH,QAAO;EACT,KAAK,iBACH,QAAO;EACT,KAAK,gBACH,QAAO;EAET,QACE,QAAO;;;;AAKb,SAAgB,SAAS,OAAyD;AAChF,KAAI,UAAU,KAAA,KAAa,UAAU,KACnC,QAAO;EAAE,aAAa;EAAG,cAAc;EAAG,aAAa;EAAG;AAE5D,QAAO;EACL,aAAa,MAAM;EACnB,cAAc,MAAM;EACpB,aAAa,MAAM;EACnB,iBAAiB,MAAM,uBAAuB,iBAAiB,KAAA;EAChE;;;AAIH,SAAgB,gBAAgB,OAAoC;AAClE,QAAO,MAAM,KAAK,OAAO;EACvB,MAAM;EACN,UAAU;GACR,MAAM,EAAE;GACR,aAAa,EAAE;GACf,aAAA,GAAA,gBAAA,oBAA+B,EAAE,OAAO;GACzC;EACF,EAAE;;AAGL,MAAM,cAAc,IAAI,IAAI;CAAC;CAAc;CAAa;CAAa;CAAa,CAAC;;AAGnF,SAAS,gBAAgB,MAAmC;AAC1D,SAAQ,KAAK,MAAb;EACE,KAAK,OACH,QAAO;GAAE,MAAM;GAAQ,MAAM,KAAK;GAAM;EAE1C,KAAK,OACH,QAAO;GAAE,MAAM;GAAQ,MAAM,KAAK,UAAU,KAAK,KAAK;GAAE;EAE1D,KAAK,OACH,QAAO,YAAY,KAAK;EAE1B,KAAK,MACH,QAAO,WAAW,KAAK;;;;AAK7B,SAAS,YAAY,MAAgC;CACnD,MAAM,OAAO,OAAO,KAAK,SAAS,WAAW,KAAK,OAAO,cAAc,KAAK,KAAK;AAEjF,KAAI,YAAY,IAAI,KAAK,UAAU,CACjC,QAAO;EACL,MAAM;EACN,WAAW,EAAE,KAAK,QAAQ,KAAK,UAAU,UAAU,QAAQ;EAC5D;AAGH,KAAI,KAAK,cAAc,eAAe,KAAK,cAAc,YAEvD,QAAO;EAAE,MAAM;EAAe,aAAa;GAAE;GAAM,QADpC,KAAK,cAAc,cAAc,QAAQ;GACG;EAAE;AAG/D,QAAO;EACL,MAAM;EACN,MAAM,EAAE,WAAW,QAAQ,KAAK,UAAU,UAAU,QAAQ;EAC7D;;;AAIH,SAAS,WAAW,MAA+B;AAMjD,KAJE,KAAK,cAAc,KAAA,IACf,YAAY,IAAI,KAAK,UAAU,GAC/B,gCAAgC,KAAK,KAAK,IAAI,CAGlD,QAAO;EAAE,MAAM;EAAa,WAAW,EAAE,KAAK,KAAK,KAAK;EAAE;AAG5D,QAAO;EACL,MAAM;EACN,MAAM,EAAE,WAAW,KAAK,KAAK;EAC9B;;;;;;;;;AAUH,SAAgB,gBAAgB,UAAmC;CACjE,MAAM,MAAoB,EAAE;AAE5B,MAAK,MAAM,OAAO,SAChB,SAAQ,IAAI,MAAZ;EACE,KAAK;AACH,OAAI,KAAK;IACP,MAAM;IACN,SAAS,IAAI,QAAQ,KAAK,OAAO;KAAE,MAAM;KAAiB,MAAM,EAAE;KAAM,EAAE;IAC3E,CAAC;AACF;EAEF,KAAK;AACH,OAAI,KAAK;IACP,MAAM;IACN,SAAS,IAAI,QAAQ,IAAI,gBAAgB;IAC1C,CAAC;AACF;EAEF,KAAK;AACH,OAAI,KAAK,wBAAwB,IAAI,QAAQ,CAAC;AAC9C;EAEF,KAAK;AAEH,QAAK,MAAM,UAAU,IAAI,QACvB,KAAI,KAAK,kBAAkB,OAAO,CAAC;AAErC;;AAIN,QAAO;;;AAIT,SAAS,wBACP,SAC4C;CAC5C,IAAI,cAAc;CAClB,MAAM,YAAoD,EAAE;AAE5D,MAAK,MAAM,QAAQ,QACjB,SAAQ,KAAK,MAAb;EACE,KAAK;AACH,OAAI,YAAa,gBAAe;AAChC,kBAAe,KAAK;AACpB;EAEF,KAAK,WAEH;EAEF,KAAK;AACH,aAAU,KAAK;IACb,IAAI,KAAK;IACT,MAAM;IACN,UAAU;KACR,MAAM,KAAK;KACX,WAAW,KAAK,UAAU,KAAK,UAAU;KAC1C;IACF,CAAC;AACF;;CAIN,MAAM,MAAkD;EACtD,MAAM;EACN,SAAS,eAAe;EACzB;AAED,KAAI,UAAU,SAAS,EAAG,KAAI,aAAa;AAE3C,QAAO;;;AAIT,SAAS,kBAAkB,MAA6D;CACtF,IAAI,UAAU;AACd,MAAK,MAAM,QAAQ,KAAK,OACtB,SAAQ,KAAK,MAAb;EACE,KAAK;AACH,cAAW,KAAK;AAChB;EACF,KAAK;AACH,cAAW,KAAK,UAAU,KAAK,KAAK;AACpC;EACF;AACE,cAAW,IAAI,KAAK,KAAK;AACzB;;AAGN,QAAO;EAAE,MAAM;EAAQ,cAAc,KAAK;EAAI;EAAS;;;AAazD,SAAgB,oBAAiC;AAC/C,QAAO;EAAE,aAAa;EAAO,2BAAW,IAAI,KAAK;EAAE;;;;;;;;;;;AAYrD,SAAgB,SAAS,OAAiB,OAAkC;CAC1E,MAAM,QAAsB,EAAE;CAC9B,MAAM,SAAS,MAAM,QAAQ;AAE7B,KAAI,WAAW,KAAA,GAAW;AACxB,MAAI,MAAM,MACR,OAAM,KAAK;GACT,MAAM;GACN,cAAc;GACd,OAAO,SAAS,MAAM,MAAM;GAC7B,CAAC;AAEJ,SAAO;;CAGT,MAAM,QAAQ,OAAO;AAErB,KAAI,MAAM,SAAS;AACjB,MAAI,CAAC,MAAM,aAAa;AACtB,SAAM,cAAc;AACpB,SAAM,KAAK,EAAE,MAAM,cAAc,CAAC;;AAEpC,QAAM,KAAK;GAAE,MAAM;GAAc,MAAM,MAAM;GAAS,CAAC;;AAGzD,KAAI,MAAM,cAAc,KACtB,MAAK,MAAM,MAAM,MAAM,YAAY;AAGjC,MAFiB,MAAM,UAAU,IAAI,GAAG,MAAM,KAE7B,KAAA,GAAW;GAC1B,MAAM,KAAK,GAAG,MAAM;GACpB,MAAM,OAAO,GAAG,UAAU,QAAQ;AAClC,SAAM,UAAU,IAAI,GAAG,OAAO;IAAE;IAAI;IAAM,CAAC;AAC3C,SAAM,KAAK;IAAE,MAAM;IAAmB;IAAI;IAAM,CAAC;;AAGnD,MAAI,GAAG,UAAU,aAAa,MAAM;GAClC,MAAM,OAAO,MAAM,UAAU,IAAI,GAAG,MAAM;AAC1C,SAAM,KAAK;IACT,MAAM;IACN,IAAI,KAAK;IACT,MAAM,GAAG,SAAS;IACnB,CAAC;;;AAKR,KAAI,OAAO,iBAAiB,MAAM;AAChC,MAAI,MAAM,aAAa;AACrB,SAAM,KAAK,EAAE,MAAM,YAAY,CAAC;AAChC,SAAM,cAAc;;AAGtB,OAAK,MAAM,GAAG,SAAS,MAAM,UAC3B,OAAM,KAAK;GAAE,MAAM;GAAiB,IAAI,KAAK;GAAI,CAAC;AAEpD,QAAM,UAAU,OAAO;AAEvB,QAAM,KAAK;GACT,MAAM;GACN,cAAc,gBAAgB,OAAO,cAAc;GACnD,OAAO,SAAS,MAAM,MAAM;GAC7B,CAAC;;AAGJ,QAAO;;;AAIT,SAAS,cAAc,MAA0B;AAC/C,QAAO,OAAO,KAAK,KAAK,CAAC,SAAS,SAAS;;;;;AC1S7C,SAAgB,aAAa,QAAmC;CAC9D,MAAM,SAAS,IAAIA,OAAAA,QAAO;EACxB,QAAQ,QAAQ;EAChB,SAAS,QAAQ;EACjB,SAAS,QAAQ;EACjB,YAAY,QAAQ;EACpB,gBAAgB,QAAQ;EACzB,CAAC;AAEF,QAAO,EACL,MAAM,MAAc,UAA+B;EACjD,MAAM,gBAAgB;AAEtB,SAAO;GACL;GACA,OAAO,SAMuB;IAC5B,MAAM,aAAa,QAAQ;IAI3B,MAAM,SAAqD;KACzD,OAAO;KACP,UAJe,gBAAgB,QAAQ,SAAS;KAKhD,QAAQ;KACR,gBAAgB,EAAE,eAAe,MAAM;KACxC;IAED,MAAM,YAAY,YAAY,aAAa,eAAe;AAC1D,QAAI,cAAc,KAAA,EAAW,QAAO,wBAAwB;IAE5D,MAAM,kBAAkB,YAAY,mBAAmB,eAAe;AACtE,QAAI,oBAAoB,KAAA,GAAW;KACjC,MAAM,cAAc,YAAY,eAAe,eAAe;AAC9D,SAAI,gBAAgB,KAAA,EAAW,QAAO,cAAc;KAEpD,MAAM,OAAO,YAAY,QAAQ,eAAe;AAChD,SAAI,SAAS,KAAA,EAAW,QAAO,QAAQ;;AAGzC,QAAI,oBAAoB,KAAA,EACtB,QAAO,mBAAmB;IAG5B,MAAM,gBAAgB,YAAY,iBAAiB,eAAe;AAClE,QAAI,kBAAkB,KAAA,EAAW,QAAO,OAAO;AAE/C,QAAI,QAAQ,UAAU,KAAA,KAAa,QAAQ,MAAM,SAAS,EACxD,QAAO,QAAQ,gBAAgB,QAAQ,MAAM;AAG/C,QAAI,QAAQ,WAAW,KAAA,EACrB,QAAO,kBAAkB;KACvB,MAAM;KACN,aAAa;MACX,MAAM;MACN,QAAQ,qBAAA,GAAA,gBAAA,oBAAuC,QAAQ,OAAO,CAAC;MAC/D,QAAQ;MACT;KACF;AAGH,WAAO,aAAa,QAAQ,QAAQ,QAAQ,OAAO;;GAEtD;IAEJ;;;;;AAMH,gBAAgB,aACd,QACA,QACA,QAC4B;CAC5B,MAAM,QAAQ,mBAAmB;CAEjC,MAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAC5D,QACD,CAAC;AAEF,YAAW,MAAM,SAAS,UAAU;EAClC,MAAM,QAAQ,SAAS,OAAO,MAAM;AACpC,OAAK,MAAM,QAAQ,MACjB,OAAM;;;;;;;AASZ,SAAS,oBAAoB,QAA0D;AACrF,KAAI,OAAO,SAAS,UAAU;AAC5B,SAAO,uBAAuB;AAC9B,MAAI,OAAO,eAAe,QAAQ,OAAO,OAAO,eAAe;QACxD,MAAM,SAAS,OAAO,OAAO,OAAO,WAAsC,CAC7E,KAAI,UAAU,QAAQ,OAAO,UAAU,SACrC,qBAAoB,MAAiC;;;AAK7D,MAAK,MAAM,OAAO;EAAC;EAAS;EAAS;EAAQ,CAC3C,KAAI,MAAM,QAAQ,OAAO,KAAK,CAC5B,MAAK,MAAM,QAAQ,OAAO,KACxB,qBAAoB,KAAK;AAI/B,KAAI,OAAO,UAAU,QAAQ,OAAO,OAAO,UAAU;OAC9C,MAAM,SAAS,OAAO,OAAO,OAAO,MAAiC,CACxE,KAAI,UAAU,QAAQ,OAAO,UAAU,SACrC,qBAAoB,MAAiC;;AAI3D,QAAO"}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { ModelConfig, Provider, ProviderConfig } from "@agentloop/core";
|
|
2
|
+
|
|
3
|
+
//#region src/provider.d.ts
|
|
4
|
+
/** OpenAI-specific model configuration, extending core {@link ModelConfig}. */
|
|
5
|
+
interface OpenAIModelConfig extends ModelConfig {
|
|
6
|
+
/** Reasoning effort for o1/o3/gpt-5 models. */
|
|
7
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
8
|
+
}
|
|
9
|
+
/** Create an OpenAI {@link Provider}. */
|
|
10
|
+
declare function createOpenAI(config?: ProviderConfig): Provider;
|
|
11
|
+
//#endregion
|
|
12
|
+
export { type OpenAIModelConfig, createOpenAI };
|
|
13
|
+
//# sourceMappingURL=index.d.cts.map
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { ModelConfig, Provider, ProviderConfig } from "@agentloop/core";
|
|
2
|
+
|
|
3
|
+
//#region src/provider.d.ts
|
|
4
|
+
/** OpenAI-specific model configuration, extending core {@link ModelConfig}. */
|
|
5
|
+
interface OpenAIModelConfig extends ModelConfig {
|
|
6
|
+
/** Reasoning effort for o1/o3/gpt-5 models. */
|
|
7
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
8
|
+
}
|
|
9
|
+
/** Create an OpenAI {@link Provider}. */
|
|
10
|
+
declare function createOpenAI(config?: ProviderConfig): Provider;
|
|
11
|
+
//#endregion
|
|
12
|
+
export { type OpenAIModelConfig, createOpenAI };
|
|
13
|
+
//# sourceMappingURL=index.d.mts.map
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import { schemaToJsonSchema } from "@agentloop/core";
|
|
3
|
+
//#region src/convert.ts
|
|
4
|
+
/** Map an OpenAI finish reason to a core {@link FinishReason}. */
|
|
5
|
+
function mapFinishReason(reason) {
|
|
6
|
+
switch (reason) {
|
|
7
|
+
case "stop": return "stop";
|
|
8
|
+
case "length": return "length";
|
|
9
|
+
case "tool_calls": return "tool_call";
|
|
10
|
+
case "content_filter": return "refused";
|
|
11
|
+
case "function_call": return "tool_call";
|
|
12
|
+
default: return "unknown";
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
/** Map OpenAI usage to a core {@link Usage}. */
|
|
16
|
+
function mapUsage(usage) {
|
|
17
|
+
if (usage === void 0 || usage === null) return {
|
|
18
|
+
inputTokens: 0,
|
|
19
|
+
outputTokens: 0,
|
|
20
|
+
totalTokens: 0
|
|
21
|
+
};
|
|
22
|
+
return {
|
|
23
|
+
inputTokens: usage.prompt_tokens,
|
|
24
|
+
outputTokens: usage.completion_tokens,
|
|
25
|
+
totalTokens: usage.total_tokens,
|
|
26
|
+
cacheReadTokens: usage.prompt_tokens_details?.cached_tokens ?? void 0
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
/** Convert core {@link ToolDefinition} array to OpenAI tool format. */
|
|
30
|
+
function convertToolDefs(tools) {
|
|
31
|
+
return tools.map((t) => ({
|
|
32
|
+
type: "function",
|
|
33
|
+
function: {
|
|
34
|
+
name: t.name,
|
|
35
|
+
description: t.description,
|
|
36
|
+
parameters: schemaToJsonSchema(t.schema)
|
|
37
|
+
}
|
|
38
|
+
}));
|
|
39
|
+
}
|
|
40
|
+
const IMAGE_TYPES = new Set([
|
|
41
|
+
"image/jpeg",
|
|
42
|
+
"image/png",
|
|
43
|
+
"image/gif",
|
|
44
|
+
"image/webp"
|
|
45
|
+
]);
|
|
46
|
+
/** Convert a core {@link UserContent} part to an OpenAI content part. */
|
|
47
|
+
function convertUserPart(part) {
|
|
48
|
+
switch (part.type) {
|
|
49
|
+
case "text": return {
|
|
50
|
+
type: "text",
|
|
51
|
+
text: part.text
|
|
52
|
+
};
|
|
53
|
+
case "json": return {
|
|
54
|
+
type: "text",
|
|
55
|
+
text: JSON.stringify(part.json)
|
|
56
|
+
};
|
|
57
|
+
case "blob": return convertBlob(part);
|
|
58
|
+
case "url": return convertUrl(part);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
/** Convert a core {@link BlobPart} to an OpenAI content part. */
|
|
62
|
+
function convertBlob(part) {
|
|
63
|
+
const data = typeof part.data === "string" ? part.data : uint8ToBase64(part.data);
|
|
64
|
+
if (IMAGE_TYPES.has(part.mediaType)) return {
|
|
65
|
+
type: "image_url",
|
|
66
|
+
image_url: { url: `data:${part.mediaType};base64,${data}` }
|
|
67
|
+
};
|
|
68
|
+
if (part.mediaType === "audio/wav" || part.mediaType === "audio/mp3") return {
|
|
69
|
+
type: "input_audio",
|
|
70
|
+
input_audio: {
|
|
71
|
+
data,
|
|
72
|
+
format: part.mediaType === "audio/wav" ? "wav" : "mp3"
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
return {
|
|
76
|
+
type: "file",
|
|
77
|
+
file: { file_data: `data:${part.mediaType};base64,${data}` }
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
/** Convert a core {@link URLPart} to an OpenAI content part. */
|
|
81
|
+
function convertUrl(part) {
|
|
82
|
+
if (part.mediaType !== void 0 ? IMAGE_TYPES.has(part.mediaType) : /\.(jpe?g|png|gif|webp)(\?|$)/i.test(part.url)) return {
|
|
83
|
+
type: "image_url",
|
|
84
|
+
image_url: { url: part.url }
|
|
85
|
+
};
|
|
86
|
+
return {
|
|
87
|
+
type: "file",
|
|
88
|
+
file: { file_data: part.url }
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Convert a core {@link Message} array into OpenAI's Chat Completions format.
|
|
93
|
+
*
|
|
94
|
+
* System messages become `{ role: "system" }`. Tool messages are split into
|
|
95
|
+
* one `{ role: "tool" }` message per tool result (OpenAI requires one per
|
|
96
|
+
* `tool_call_id`).
|
|
97
|
+
*/
|
|
98
|
+
function convertMessages(messages) {
|
|
99
|
+
const out = [];
|
|
100
|
+
for (const msg of messages) switch (msg.role) {
|
|
101
|
+
case "system":
|
|
102
|
+
out.push({
|
|
103
|
+
role: "system",
|
|
104
|
+
content: msg.content.map((p) => ({
|
|
105
|
+
type: "text",
|
|
106
|
+
text: p.text
|
|
107
|
+
}))
|
|
108
|
+
});
|
|
109
|
+
break;
|
|
110
|
+
case "user":
|
|
111
|
+
out.push({
|
|
112
|
+
role: "user",
|
|
113
|
+
content: msg.content.map(convertUserPart)
|
|
114
|
+
});
|
|
115
|
+
break;
|
|
116
|
+
case "assistant":
|
|
117
|
+
out.push(convertAssistantMessage(msg.content));
|
|
118
|
+
break;
|
|
119
|
+
case "tool":
|
|
120
|
+
for (const result of msg.content) out.push(convertToolResult(result));
|
|
121
|
+
break;
|
|
122
|
+
}
|
|
123
|
+
return out;
|
|
124
|
+
}
|
|
125
|
+
/** Convert core assistant content to an OpenAI assistant message. */
|
|
126
|
+
function convertAssistantMessage(content) {
|
|
127
|
+
let textContent = "";
|
|
128
|
+
const toolCalls = [];
|
|
129
|
+
for (const part of content) switch (part.type) {
|
|
130
|
+
case "text":
|
|
131
|
+
if (textContent) textContent += "\n";
|
|
132
|
+
textContent += part.text;
|
|
133
|
+
break;
|
|
134
|
+
case "thinking": break;
|
|
135
|
+
case "tool_call":
|
|
136
|
+
toolCalls.push({
|
|
137
|
+
id: part.id,
|
|
138
|
+
type: "function",
|
|
139
|
+
function: {
|
|
140
|
+
name: part.name,
|
|
141
|
+
arguments: JSON.stringify(part.arguments)
|
|
142
|
+
}
|
|
143
|
+
});
|
|
144
|
+
break;
|
|
145
|
+
}
|
|
146
|
+
const msg = {
|
|
147
|
+
role: "assistant",
|
|
148
|
+
content: textContent || null
|
|
149
|
+
};
|
|
150
|
+
if (toolCalls.length > 0) msg.tool_calls = toolCalls;
|
|
151
|
+
return msg;
|
|
152
|
+
}
|
|
153
|
+
/** Convert a core tool result to an OpenAI tool message. */
|
|
154
|
+
function convertToolResult(part) {
|
|
155
|
+
let content = "";
|
|
156
|
+
for (const item of part.output) switch (item.type) {
|
|
157
|
+
case "text":
|
|
158
|
+
content += item.text;
|
|
159
|
+
break;
|
|
160
|
+
case "json":
|
|
161
|
+
content += JSON.stringify(item.json);
|
|
162
|
+
break;
|
|
163
|
+
default:
|
|
164
|
+
content += `[${item.type}]`;
|
|
165
|
+
break;
|
|
166
|
+
}
|
|
167
|
+
return {
|
|
168
|
+
role: "tool",
|
|
169
|
+
tool_call_id: part.id,
|
|
170
|
+
content
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
/** Create a fresh {@link StreamState}. */
|
|
174
|
+
function createStreamState() {
|
|
175
|
+
return {
|
|
176
|
+
textStarted: false,
|
|
177
|
+
toolCalls: /* @__PURE__ */ new Map()
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Map a single OpenAI {@link ChatCompletionChunk} to zero or more core
|
|
182
|
+
* {@link StreamPart} values.
|
|
183
|
+
*
|
|
184
|
+
* OpenAI chunks don't have explicit start/end events. We infer them:
|
|
185
|
+
* - First text delta → emit `text_start` then `text_delta`
|
|
186
|
+
* - First delta for a tool call index → emit `tool_call_start`
|
|
187
|
+
* - `finish_reason` present → emit `text_end`/`tool_call_end` + `finish`
|
|
188
|
+
*/
|
|
189
|
+
function mapChunk(chunk, state) {
|
|
190
|
+
const parts = [];
|
|
191
|
+
const choice = chunk.choices[0];
|
|
192
|
+
if (choice === void 0) {
|
|
193
|
+
if (chunk.usage) parts.push({
|
|
194
|
+
type: "finish",
|
|
195
|
+
finishReason: "stop",
|
|
196
|
+
usage: mapUsage(chunk.usage)
|
|
197
|
+
});
|
|
198
|
+
return parts;
|
|
199
|
+
}
|
|
200
|
+
const delta = choice.delta;
|
|
201
|
+
if (delta.content) {
|
|
202
|
+
if (!state.textStarted) {
|
|
203
|
+
state.textStarted = true;
|
|
204
|
+
parts.push({ type: "text_start" });
|
|
205
|
+
}
|
|
206
|
+
parts.push({
|
|
207
|
+
type: "text_delta",
|
|
208
|
+
text: delta.content
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
if (delta.tool_calls != null) for (const tc of delta.tool_calls) {
|
|
212
|
+
if (state.toolCalls.get(tc.index) === void 0) {
|
|
213
|
+
const id = tc.id ?? "";
|
|
214
|
+
const name = tc.function?.name ?? "";
|
|
215
|
+
state.toolCalls.set(tc.index, {
|
|
216
|
+
id,
|
|
217
|
+
name
|
|
218
|
+
});
|
|
219
|
+
parts.push({
|
|
220
|
+
type: "tool_call_start",
|
|
221
|
+
id,
|
|
222
|
+
name
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
if (tc.function?.arguments != null) {
|
|
226
|
+
const info = state.toolCalls.get(tc.index);
|
|
227
|
+
parts.push({
|
|
228
|
+
type: "tool_call_delta",
|
|
229
|
+
id: info.id,
|
|
230
|
+
args: tc.function.arguments
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
if (choice.finish_reason != null) {
|
|
235
|
+
if (state.textStarted) {
|
|
236
|
+
parts.push({ type: "text_end" });
|
|
237
|
+
state.textStarted = false;
|
|
238
|
+
}
|
|
239
|
+
for (const [, info] of state.toolCalls) parts.push({
|
|
240
|
+
type: "tool_call_end",
|
|
241
|
+
id: info.id
|
|
242
|
+
});
|
|
243
|
+
state.toolCalls.clear();
|
|
244
|
+
parts.push({
|
|
245
|
+
type: "finish",
|
|
246
|
+
finishReason: mapFinishReason(choice.finish_reason),
|
|
247
|
+
usage: mapUsage(chunk.usage)
|
|
248
|
+
});
|
|
249
|
+
}
|
|
250
|
+
return parts;
|
|
251
|
+
}
|
|
252
|
+
/** Convert a Uint8Array to a base64 string. */
|
|
253
|
+
function uint8ToBase64(data) {
|
|
254
|
+
return Buffer.from(data).toString("base64");
|
|
255
|
+
}
|
|
256
|
+
//#endregion
|
|
257
|
+
//#region src/provider.ts
|
|
258
|
+
/** Create an OpenAI {@link Provider}. */
|
|
259
|
+
function createOpenAI(config) {
|
|
260
|
+
const client = new OpenAI({
|
|
261
|
+
apiKey: config?.apiKey,
|
|
262
|
+
baseURL: config?.baseUrl,
|
|
263
|
+
timeout: config?.timeout,
|
|
264
|
+
maxRetries: config?.maxRetries,
|
|
265
|
+
defaultHeaders: config?.headers
|
|
266
|
+
});
|
|
267
|
+
return { model(name, defaults) {
|
|
268
|
+
const modelDefaults = defaults;
|
|
269
|
+
return {
|
|
270
|
+
name,
|
|
271
|
+
stream(options) {
|
|
272
|
+
const callConfig = options.config;
|
|
273
|
+
const params = {
|
|
274
|
+
model: name,
|
|
275
|
+
messages: convertMessages(options.messages),
|
|
276
|
+
stream: true,
|
|
277
|
+
stream_options: { include_usage: true }
|
|
278
|
+
};
|
|
279
|
+
const maxTokens = callConfig?.maxTokens ?? modelDefaults?.maxTokens;
|
|
280
|
+
if (maxTokens !== void 0) params.max_completion_tokens = maxTokens;
|
|
281
|
+
const reasoningEffort = callConfig?.reasoningEffort ?? modelDefaults?.reasoningEffort;
|
|
282
|
+
if (reasoningEffort === void 0) {
|
|
283
|
+
const temperature = callConfig?.temperature ?? modelDefaults?.temperature;
|
|
284
|
+
if (temperature !== void 0) params.temperature = temperature;
|
|
285
|
+
const topP = callConfig?.topP ?? modelDefaults?.topP;
|
|
286
|
+
if (topP !== void 0) params.top_p = topP;
|
|
287
|
+
}
|
|
288
|
+
if (reasoningEffort !== void 0) params.reasoning_effort = reasoningEffort;
|
|
289
|
+
const stopSequences = callConfig?.stopSequences ?? modelDefaults?.stopSequences;
|
|
290
|
+
if (stopSequences !== void 0) params.stop = stopSequences;
|
|
291
|
+
if (options.tools !== void 0 && options.tools.length > 0) params.tools = convertToolDefs(options.tools);
|
|
292
|
+
if (options.output !== void 0) params.response_format = {
|
|
293
|
+
type: "json_schema",
|
|
294
|
+
json_schema: {
|
|
295
|
+
name: "output",
|
|
296
|
+
schema: ensureStrictObjects(schemaToJsonSchema(options.output)),
|
|
297
|
+
strict: true
|
|
298
|
+
}
|
|
299
|
+
};
|
|
300
|
+
return streamOpenAI(client, params, options.signal);
|
|
301
|
+
}
|
|
302
|
+
};
|
|
303
|
+
} };
|
|
304
|
+
}
|
|
305
|
+
/**
|
|
306
|
+
* Stream an OpenAI chat completion, converting chunks to core {@link StreamPart} values.
|
|
307
|
+
*/
|
|
308
|
+
async function* streamOpenAI(client, params, signal) {
|
|
309
|
+
const state = createStreamState();
|
|
310
|
+
const response = await client.chat.completions.create(params, { signal });
|
|
311
|
+
for await (const chunk of response) {
|
|
312
|
+
const parts = mapChunk(chunk, state);
|
|
313
|
+
for (const part of parts) yield part;
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Recursively add `additionalProperties: false` to all object schemas.
|
|
318
|
+
* OpenAI's structured output requires this when `strict: true`.
|
|
319
|
+
*/
|
|
320
|
+
function ensureStrictObjects(schema) {
|
|
321
|
+
if (schema.type === "object") {
|
|
322
|
+
schema.additionalProperties = false;
|
|
323
|
+
if (schema.properties !== null && typeof schema.properties === "object") {
|
|
324
|
+
for (const value of Object.values(schema.properties)) if (value !== null && typeof value === "object") ensureStrictObjects(value);
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
for (const key of [
|
|
328
|
+
"anyOf",
|
|
329
|
+
"oneOf",
|
|
330
|
+
"allOf"
|
|
331
|
+
]) if (Array.isArray(schema[key])) for (const item of schema[key]) ensureStrictObjects(item);
|
|
332
|
+
if (schema.$defs !== null && typeof schema.$defs === "object") {
|
|
333
|
+
for (const value of Object.values(schema.$defs)) if (value !== null && typeof value === "object") ensureStrictObjects(value);
|
|
334
|
+
}
|
|
335
|
+
return schema;
|
|
336
|
+
}
|
|
337
|
+
//#endregion
|
|
338
|
+
export { createOpenAI };
|
|
339
|
+
|
|
340
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","names":[],"sources":["../src/convert.ts","../src/provider.ts"],"sourcesContent":["import type OpenAI from \"openai\";\nimport type {\n BlobPart,\n URLPart,\n AssistantContent,\n UserContent,\n ToolResultPart,\n Message,\n FinishReason,\n Usage,\n ToolDefinition,\n StreamPart,\n} from \"@agentloop/core\";\nimport { schemaToJsonSchema } from \"@agentloop/core\";\n\ntype OAIMessage = OpenAI.ChatCompletionMessageParam;\ntype OAITool = OpenAI.ChatCompletionTool;\ntype OAIChunk = OpenAI.ChatCompletionChunk;\ntype OAIContentPart = OpenAI.ChatCompletionContentPart;\n\n/** Map an OpenAI finish reason to a core {@link FinishReason}. */\nexport function mapFinishReason(\n reason: \"stop\" | \"length\" | \"tool_calls\" | \"content_filter\" | \"function_call\" | null,\n): FinishReason {\n switch (reason) {\n case \"stop\":\n return \"stop\";\n case \"length\":\n return \"length\";\n case \"tool_calls\":\n return \"tool_call\";\n case \"content_filter\":\n return \"refused\";\n case \"function_call\":\n return \"tool_call\";\n case null:\n default:\n return \"unknown\";\n }\n}\n\n/** Map OpenAI usage to a core {@link Usage}. */\nexport function mapUsage(usage: OpenAI.CompletionUsage | undefined | null): Usage {\n if (usage === undefined || usage === null) {\n return { inputTokens: 0, outputTokens: 0, totalTokens: 0 };\n }\n return {\n inputTokens: usage.prompt_tokens,\n outputTokens: usage.completion_tokens,\n totalTokens: usage.total_tokens,\n cacheReadTokens: usage.prompt_tokens_details?.cached_tokens ?? undefined,\n };\n}\n\n/** Convert core {@link ToolDefinition} array to OpenAI tool format. */\nexport function convertToolDefs(tools: ToolDefinition[]): OAITool[] {\n return tools.map((t) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: t.description,\n parameters: schemaToJsonSchema(t.schema) as OpenAI.FunctionParameters,\n },\n }));\n}\n\nconst IMAGE_TYPES = new Set([\"image/jpeg\", \"image/png\", \"image/gif\", \"image/webp\"]);\n\n/** Convert a core {@link UserContent} part to an OpenAI content part. */\nfunction convertUserPart(part: UserContent): OAIContentPart {\n switch (part.type) {\n case \"text\":\n return { type: \"text\", text: part.text };\n\n case \"json\":\n return { type: \"text\", text: JSON.stringify(part.json) };\n\n case \"blob\":\n return convertBlob(part);\n\n case \"url\":\n return convertUrl(part);\n }\n}\n\n/** Convert a core {@link BlobPart} to an OpenAI content part. */\nfunction convertBlob(part: BlobPart): OAIContentPart {\n const data = typeof part.data === \"string\" ? part.data : uint8ToBase64(part.data);\n\n if (IMAGE_TYPES.has(part.mediaType)) {\n return {\n type: \"image_url\",\n image_url: { url: `data:${part.mediaType};base64,${data}` },\n };\n }\n\n if (part.mediaType === \"audio/wav\" || part.mediaType === \"audio/mp3\") {\n const format = part.mediaType === \"audio/wav\" ? \"wav\" : \"mp3\";\n return { type: \"input_audio\", input_audio: { data, format } };\n }\n\n return {\n type: \"file\",\n file: { file_data: `data:${part.mediaType};base64,${data}` },\n } as OAIContentPart;\n}\n\n/** Convert a core {@link URLPart} to an OpenAI content part. */\nfunction convertUrl(part: URLPart): OAIContentPart {\n const isImage =\n part.mediaType !== undefined\n ? IMAGE_TYPES.has(part.mediaType)\n : /\\.(jpe?g|png|gif|webp)(\\?|$)/i.test(part.url);\n\n if (isImage) {\n return { type: \"image_url\", image_url: { url: part.url } };\n }\n\n return {\n type: \"file\",\n file: { file_data: part.url },\n } as OAIContentPart;\n}\n\n/**\n * Convert a core {@link Message} array into OpenAI's Chat Completions format.\n *\n * System messages become `{ role: \"system\" }`. Tool messages are split into\n * one `{ role: \"tool\" }` message per tool result (OpenAI requires one per\n * `tool_call_id`).\n */\nexport function convertMessages(messages: Message[]): OAIMessage[] {\n const out: OAIMessage[] = [];\n\n for (const msg of messages) {\n switch (msg.role) {\n case \"system\":\n out.push({\n role: \"system\",\n content: msg.content.map((p) => ({ type: \"text\" as const, text: p.text })),\n });\n break;\n\n case \"user\":\n out.push({\n role: \"user\",\n content: msg.content.map(convertUserPart),\n });\n break;\n\n case \"assistant\":\n out.push(convertAssistantMessage(msg.content));\n break;\n\n case \"tool\":\n // OpenAI requires one tool message per tool_call_id.\n for (const result of msg.content) {\n out.push(convertToolResult(result));\n }\n break;\n }\n }\n\n return out;\n}\n\n/** Convert core assistant content to an OpenAI assistant message. */\nfunction convertAssistantMessage(\n content: AssistantContent[],\n): OpenAI.ChatCompletionAssistantMessageParam {\n let textContent = \"\";\n const toolCalls: OpenAI.ChatCompletionMessageToolCall[] = [];\n\n for (const part of content) {\n switch (part.type) {\n case \"text\":\n if (textContent) textContent += \"\\n\";\n textContent += part.text;\n break;\n\n case \"thinking\":\n // OpenAI Chat Completions doesn't support thinking blocks — skip.\n break;\n\n case \"tool_call\":\n toolCalls.push({\n id: part.id,\n type: \"function\",\n function: {\n name: part.name,\n arguments: JSON.stringify(part.arguments),\n },\n });\n break;\n }\n }\n\n const msg: OpenAI.ChatCompletionAssistantMessageParam = {\n role: \"assistant\",\n content: textContent || null,\n };\n\n if (toolCalls.length > 0) msg.tool_calls = toolCalls;\n\n return msg;\n}\n\n/** Convert a core tool result to an OpenAI tool message. */\nfunction convertToolResult(part: ToolResultPart): OpenAI.ChatCompletionToolMessageParam {\n let content = \"\";\n for (const item of part.output) {\n switch (item.type) {\n case \"text\":\n content += item.text;\n break;\n case \"json\":\n content += JSON.stringify(item.json);\n break;\n default:\n content += `[${item.type}]`;\n break;\n }\n }\n return { role: \"tool\", tool_call_id: part.id, content };\n}\n\n/**\n * Tracks state during streaming to synthesize start/end events that\n * OpenAI's chunk format doesn't provide explicitly.\n */\nexport interface StreamState {\n textStarted: boolean;\n toolCalls: Map<number, { id: string; name: string }>;\n}\n\n/** Create a fresh {@link StreamState}. */\nexport function createStreamState(): StreamState {\n return { textStarted: false, toolCalls: new Map() };\n}\n\n/**\n * Map a single OpenAI {@link ChatCompletionChunk} to zero or more core\n * {@link StreamPart} values.\n *\n * OpenAI chunks don't have explicit start/end events. We infer them:\n * - First text delta → emit `text_start` then `text_delta`\n * - First delta for a tool call index → emit `tool_call_start`\n * - `finish_reason` present → emit `text_end`/`tool_call_end` + `finish`\n */\nexport function mapChunk(chunk: OAIChunk, state: StreamState): StreamPart[] {\n const parts: StreamPart[] = [];\n const choice = chunk.choices[0];\n\n if (choice === undefined) {\n if (chunk.usage) {\n parts.push({\n type: \"finish\",\n finishReason: \"stop\",\n usage: mapUsage(chunk.usage),\n });\n }\n return parts;\n }\n\n const delta = choice.delta;\n\n if (delta.content) {\n if (!state.textStarted) {\n state.textStarted = true;\n parts.push({ type: \"text_start\" });\n }\n parts.push({ type: \"text_delta\", text: delta.content });\n }\n\n if (delta.tool_calls != null) {\n for (const tc of delta.tool_calls) {\n const existing = state.toolCalls.get(tc.index);\n\n if (existing === undefined) {\n const id = tc.id ?? \"\";\n const name = tc.function?.name ?? \"\";\n state.toolCalls.set(tc.index, { id, name });\n parts.push({ type: \"tool_call_start\", id, name });\n }\n\n if (tc.function?.arguments != null) {\n const info = state.toolCalls.get(tc.index)!;\n parts.push({\n type: \"tool_call_delta\",\n id: info.id,\n args: tc.function.arguments,\n });\n }\n }\n }\n\n if (choice.finish_reason != null) {\n if (state.textStarted) {\n parts.push({ type: \"text_end\" });\n state.textStarted = false;\n }\n\n for (const [, info] of state.toolCalls) {\n parts.push({ type: \"tool_call_end\", id: info.id });\n }\n state.toolCalls.clear();\n\n parts.push({\n type: \"finish\",\n finishReason: mapFinishReason(choice.finish_reason),\n usage: mapUsage(chunk.usage),\n });\n }\n\n return parts;\n}\n\n/** Convert a Uint8Array to a base64 string. */\nfunction uint8ToBase64(data: Uint8Array): string {\n return Buffer.from(data).toString(\"base64\");\n}\n","import OpenAI from \"openai\";\nimport type {\n Model,\n ModelConfig,\n Provider,\n ProviderConfig,\n StreamPart,\n Schema,\n Message,\n ToolDefinition,\n} from \"@agentloop/core\";\nimport { schemaToJsonSchema } from \"@agentloop/core\";\nimport { convertMessages, convertToolDefs, createStreamState, mapChunk } from \"./convert.ts\";\n\n/** OpenAI-specific model configuration, extending core {@link ModelConfig}. */\nexport interface OpenAIModelConfig extends ModelConfig {\n /** Reasoning effort for o1/o3/gpt-5 models. */\n reasoningEffort?: \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n}\n\n/** Create an OpenAI {@link Provider}. */\nexport function createOpenAI(config?: ProviderConfig): Provider {\n const client = new OpenAI({\n apiKey: config?.apiKey,\n baseURL: config?.baseUrl,\n timeout: config?.timeout,\n maxRetries: config?.maxRetries,\n defaultHeaders: config?.headers,\n });\n\n return {\n model(name: string, defaults?: ModelConfig): Model {\n const modelDefaults = defaults as OpenAIModelConfig | undefined;\n\n return {\n name,\n stream(options: {\n messages: Message[];\n tools?: ToolDefinition[];\n config?: ModelConfig;\n output?: Schema;\n signal?: AbortSignal;\n }): AsyncIterable<StreamPart> {\n const callConfig = options.config as OpenAIModelConfig | undefined;\n\n const messages = convertMessages(options.messages);\n\n const params: OpenAI.ChatCompletionCreateParamsStreaming = {\n model: name,\n messages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n const maxTokens = callConfig?.maxTokens ?? modelDefaults?.maxTokens;\n if (maxTokens !== undefined) params.max_completion_tokens = maxTokens;\n\n const reasoningEffort = callConfig?.reasoningEffort ?? modelDefaults?.reasoningEffort;\n if (reasoningEffort === undefined) {\n const temperature = callConfig?.temperature ?? modelDefaults?.temperature;\n if (temperature !== undefined) params.temperature = temperature;\n\n const topP = callConfig?.topP ?? modelDefaults?.topP;\n if (topP !== undefined) params.top_p = topP;\n }\n\n if (reasoningEffort !== undefined) {\n params.reasoning_effort = reasoningEffort;\n }\n\n const stopSequences = callConfig?.stopSequences ?? modelDefaults?.stopSequences;\n if (stopSequences !== undefined) params.stop = stopSequences;\n\n if (options.tools !== undefined && options.tools.length > 0) {\n params.tools = convertToolDefs(options.tools);\n }\n\n if (options.output !== undefined) {\n params.response_format = {\n type: \"json_schema\",\n json_schema: {\n name: \"output\",\n schema: ensureStrictObjects(schemaToJsonSchema(options.output)),\n strict: true,\n },\n };\n }\n\n return streamOpenAI(client, params, options.signal);\n },\n };\n },\n };\n}\n\n/**\n * Stream an OpenAI chat completion, converting chunks to core {@link StreamPart} values.\n */\nasync function* streamOpenAI(\n client: OpenAI,\n params: OpenAI.ChatCompletionCreateParamsStreaming,\n signal?: AbortSignal,\n): AsyncGenerator<StreamPart> {\n const state = createStreamState();\n\n const response = await client.chat.completions.create(params, {\n signal,\n });\n\n for await (const chunk of response) {\n const parts = mapChunk(chunk, state);\n for (const part of parts) {\n yield part;\n }\n }\n}\n\n/**\n * Recursively add `additionalProperties: false` to all object schemas.\n * OpenAI's structured output requires this when `strict: true`.\n */\nfunction ensureStrictObjects(schema: Record<string, unknown>): Record<string, unknown> {\n if (schema.type === \"object\") {\n schema.additionalProperties = false;\n if (schema.properties !== null && typeof schema.properties === \"object\") {\n for (const value of Object.values(schema.properties as Record<string, unknown>)) {\n if (value !== null && typeof value === \"object\") {\n ensureStrictObjects(value as Record<string, unknown>);\n }\n }\n }\n }\n for (const key of [\"anyOf\", \"oneOf\", \"allOf\"] as const) {\n if (Array.isArray(schema[key])) {\n for (const item of schema[key] as Record<string, unknown>[]) {\n ensureStrictObjects(item);\n }\n }\n }\n if (schema.$defs !== null && typeof schema.$defs === \"object\") {\n for (const value of Object.values(schema.$defs as Record<string, unknown>)) {\n if (value !== null && typeof value === \"object\") {\n ensureStrictObjects(value as Record<string, unknown>);\n }\n }\n }\n return schema;\n}\n"],"mappings":";;;;AAqBA,SAAgB,gBACd,QACc;AACd,SAAQ,QAAR;EACE,KAAK,OACH,QAAO;EACT,KAAK,SACH,QAAO;EACT,KAAK,aACH,QAAO;EACT,KAAK,iBACH,QAAO;EACT,KAAK,gBACH,QAAO;EAET,QACE,QAAO;;;;AAKb,SAAgB,SAAS,OAAyD;AAChF,KAAI,UAAU,KAAA,KAAa,UAAU,KACnC,QAAO;EAAE,aAAa;EAAG,cAAc;EAAG,aAAa;EAAG;AAE5D,QAAO;EACL,aAAa,MAAM;EACnB,cAAc,MAAM;EACpB,aAAa,MAAM;EACnB,iBAAiB,MAAM,uBAAuB,iBAAiB,KAAA;EAChE;;;AAIH,SAAgB,gBAAgB,OAAoC;AAClE,QAAO,MAAM,KAAK,OAAO;EACvB,MAAM;EACN,UAAU;GACR,MAAM,EAAE;GACR,aAAa,EAAE;GACf,YAAY,mBAAmB,EAAE,OAAO;GACzC;EACF,EAAE;;AAGL,MAAM,cAAc,IAAI,IAAI;CAAC;CAAc;CAAa;CAAa;CAAa,CAAC;;AAGnF,SAAS,gBAAgB,MAAmC;AAC1D,SAAQ,KAAK,MAAb;EACE,KAAK,OACH,QAAO;GAAE,MAAM;GAAQ,MAAM,KAAK;GAAM;EAE1C,KAAK,OACH,QAAO;GAAE,MAAM;GAAQ,MAAM,KAAK,UAAU,KAAK,KAAK;GAAE;EAE1D,KAAK,OACH,QAAO,YAAY,KAAK;EAE1B,KAAK,MACH,QAAO,WAAW,KAAK;;;;AAK7B,SAAS,YAAY,MAAgC;CACnD,MAAM,OAAO,OAAO,KAAK,SAAS,WAAW,KAAK,OAAO,cAAc,KAAK,KAAK;AAEjF,KAAI,YAAY,IAAI,KAAK,UAAU,CACjC,QAAO;EACL,MAAM;EACN,WAAW,EAAE,KAAK,QAAQ,KAAK,UAAU,UAAU,QAAQ;EAC5D;AAGH,KAAI,KAAK,cAAc,eAAe,KAAK,cAAc,YAEvD,QAAO;EAAE,MAAM;EAAe,aAAa;GAAE;GAAM,QADpC,KAAK,cAAc,cAAc,QAAQ;GACG;EAAE;AAG/D,QAAO;EACL,MAAM;EACN,MAAM,EAAE,WAAW,QAAQ,KAAK,UAAU,UAAU,QAAQ;EAC7D;;;AAIH,SAAS,WAAW,MAA+B;AAMjD,KAJE,KAAK,cAAc,KAAA,IACf,YAAY,IAAI,KAAK,UAAU,GAC/B,gCAAgC,KAAK,KAAK,IAAI,CAGlD,QAAO;EAAE,MAAM;EAAa,WAAW,EAAE,KAAK,KAAK,KAAK;EAAE;AAG5D,QAAO;EACL,MAAM;EACN,MAAM,EAAE,WAAW,KAAK,KAAK;EAC9B;;;;;;;;;AAUH,SAAgB,gBAAgB,UAAmC;CACjE,MAAM,MAAoB,EAAE;AAE5B,MAAK,MAAM,OAAO,SAChB,SAAQ,IAAI,MAAZ;EACE,KAAK;AACH,OAAI,KAAK;IACP,MAAM;IACN,SAAS,IAAI,QAAQ,KAAK,OAAO;KAAE,MAAM;KAAiB,MAAM,EAAE;KAAM,EAAE;IAC3E,CAAC;AACF;EAEF,KAAK;AACH,OAAI,KAAK;IACP,MAAM;IACN,SAAS,IAAI,QAAQ,IAAI,gBAAgB;IAC1C,CAAC;AACF;EAEF,KAAK;AACH,OAAI,KAAK,wBAAwB,IAAI,QAAQ,CAAC;AAC9C;EAEF,KAAK;AAEH,QAAK,MAAM,UAAU,IAAI,QACvB,KAAI,KAAK,kBAAkB,OAAO,CAAC;AAErC;;AAIN,QAAO;;;AAIT,SAAS,wBACP,SAC4C;CAC5C,IAAI,cAAc;CAClB,MAAM,YAAoD,EAAE;AAE5D,MAAK,MAAM,QAAQ,QACjB,SAAQ,KAAK,MAAb;EACE,KAAK;AACH,OAAI,YAAa,gBAAe;AAChC,kBAAe,KAAK;AACpB;EAEF,KAAK,WAEH;EAEF,KAAK;AACH,aAAU,KAAK;IACb,IAAI,KAAK;IACT,MAAM;IACN,UAAU;KACR,MAAM,KAAK;KACX,WAAW,KAAK,UAAU,KAAK,UAAU;KAC1C;IACF,CAAC;AACF;;CAIN,MAAM,MAAkD;EACtD,MAAM;EACN,SAAS,eAAe;EACzB;AAED,KAAI,UAAU,SAAS,EAAG,KAAI,aAAa;AAE3C,QAAO;;;AAIT,SAAS,kBAAkB,MAA6D;CACtF,IAAI,UAAU;AACd,MAAK,MAAM,QAAQ,KAAK,OACtB,SAAQ,KAAK,MAAb;EACE,KAAK;AACH,cAAW,KAAK;AAChB;EACF,KAAK;AACH,cAAW,KAAK,UAAU,KAAK,KAAK;AACpC;EACF;AACE,cAAW,IAAI,KAAK,KAAK;AACzB;;AAGN,QAAO;EAAE,MAAM;EAAQ,cAAc,KAAK;EAAI;EAAS;;;AAazD,SAAgB,oBAAiC;AAC/C,QAAO;EAAE,aAAa;EAAO,2BAAW,IAAI,KAAK;EAAE;;;;;;;;;;;AAYrD,SAAgB,SAAS,OAAiB,OAAkC;CAC1E,MAAM,QAAsB,EAAE;CAC9B,MAAM,SAAS,MAAM,QAAQ;AAE7B,KAAI,WAAW,KAAA,GAAW;AACxB,MAAI,MAAM,MACR,OAAM,KAAK;GACT,MAAM;GACN,cAAc;GACd,OAAO,SAAS,MAAM,MAAM;GAC7B,CAAC;AAEJ,SAAO;;CAGT,MAAM,QAAQ,OAAO;AAErB,KAAI,MAAM,SAAS;AACjB,MAAI,CAAC,MAAM,aAAa;AACtB,SAAM,cAAc;AACpB,SAAM,KAAK,EAAE,MAAM,cAAc,CAAC;;AAEpC,QAAM,KAAK;GAAE,MAAM;GAAc,MAAM,MAAM;GAAS,CAAC;;AAGzD,KAAI,MAAM,cAAc,KACtB,MAAK,MAAM,MAAM,MAAM,YAAY;AAGjC,MAFiB,MAAM,UAAU,IAAI,GAAG,MAAM,KAE7B,KAAA,GAAW;GAC1B,MAAM,KAAK,GAAG,MAAM;GACpB,MAAM,OAAO,GAAG,UAAU,QAAQ;AAClC,SAAM,UAAU,IAAI,GAAG,OAAO;IAAE;IAAI;IAAM,CAAC;AAC3C,SAAM,KAAK;IAAE,MAAM;IAAmB;IAAI;IAAM,CAAC;;AAGnD,MAAI,GAAG,UAAU,aAAa,MAAM;GAClC,MAAM,OAAO,MAAM,UAAU,IAAI,GAAG,MAAM;AAC1C,SAAM,KAAK;IACT,MAAM;IACN,IAAI,KAAK;IACT,MAAM,GAAG,SAAS;IACnB,CAAC;;;AAKR,KAAI,OAAO,iBAAiB,MAAM;AAChC,MAAI,MAAM,aAAa;AACrB,SAAM,KAAK,EAAE,MAAM,YAAY,CAAC;AAChC,SAAM,cAAc;;AAGtB,OAAK,MAAM,GAAG,SAAS,MAAM,UAC3B,OAAM,KAAK;GAAE,MAAM;GAAiB,IAAI,KAAK;GAAI,CAAC;AAEpD,QAAM,UAAU,OAAO;AAEvB,QAAM,KAAK;GACT,MAAM;GACN,cAAc,gBAAgB,OAAO,cAAc;GACnD,OAAO,SAAS,MAAM,MAAM;GAC7B,CAAC;;AAGJ,QAAO;;;AAIT,SAAS,cAAc,MAA0B;AAC/C,QAAO,OAAO,KAAK,KAAK,CAAC,SAAS,SAAS;;;;;AC1S7C,SAAgB,aAAa,QAAmC;CAC9D,MAAM,SAAS,IAAI,OAAO;EACxB,QAAQ,QAAQ;EAChB,SAAS,QAAQ;EACjB,SAAS,QAAQ;EACjB,YAAY,QAAQ;EACpB,gBAAgB,QAAQ;EACzB,CAAC;AAEF,QAAO,EACL,MAAM,MAAc,UAA+B;EACjD,MAAM,gBAAgB;AAEtB,SAAO;GACL;GACA,OAAO,SAMuB;IAC5B,MAAM,aAAa,QAAQ;IAI3B,MAAM,SAAqD;KACzD,OAAO;KACP,UAJe,gBAAgB,QAAQ,SAAS;KAKhD,QAAQ;KACR,gBAAgB,EAAE,eAAe,MAAM;KACxC;IAED,MAAM,YAAY,YAAY,aAAa,eAAe;AAC1D,QAAI,cAAc,KAAA,EAAW,QAAO,wBAAwB;IAE5D,MAAM,kBAAkB,YAAY,mBAAmB,eAAe;AACtE,QAAI,oBAAoB,KAAA,GAAW;KACjC,MAAM,cAAc,YAAY,eAAe,eAAe;AAC9D,SAAI,gBAAgB,KAAA,EAAW,QAAO,cAAc;KAEpD,MAAM,OAAO,YAAY,QAAQ,eAAe;AAChD,SAAI,SAAS,KAAA,EAAW,QAAO,QAAQ;;AAGzC,QAAI,oBAAoB,KAAA,EACtB,QAAO,mBAAmB;IAG5B,MAAM,gBAAgB,YAAY,iBAAiB,eAAe;AAClE,QAAI,kBAAkB,KAAA,EAAW,QAAO,OAAO;AAE/C,QAAI,QAAQ,UAAU,KAAA,KAAa,QAAQ,MAAM,SAAS,EACxD,QAAO,QAAQ,gBAAgB,QAAQ,MAAM;AAG/C,QAAI,QAAQ,WAAW,KAAA,EACrB,QAAO,kBAAkB;KACvB,MAAM;KACN,aAAa;MACX,MAAM;MACN,QAAQ,oBAAoB,mBAAmB,QAAQ,OAAO,CAAC;MAC/D,QAAQ;MACT;KACF;AAGH,WAAO,aAAa,QAAQ,QAAQ,QAAQ,OAAO;;GAEtD;IAEJ;;;;;AAMH,gBAAgB,aACd,QACA,QACA,QAC4B;CAC5B,MAAM,QAAQ,mBAAmB;CAEjC,MAAM,WAAW,MAAM,OAAO,KAAK,YAAY,OAAO,QAAQ,EAC5D,QACD,CAAC;AAEF,YAAW,MAAM,SAAS,UAAU;EAClC,MAAM,QAAQ,SAAS,OAAO,MAAM;AACpC,OAAK,MAAM,QAAQ,MACjB,OAAM;;;;;;;AASZ,SAAS,oBAAoB,QAA0D;AACrF,KAAI,OAAO,SAAS,UAAU;AAC5B,SAAO,uBAAuB;AAC9B,MAAI,OAAO,eAAe,QAAQ,OAAO,OAAO,eAAe;QACxD,MAAM,SAAS,OAAO,OAAO,OAAO,WAAsC,CAC7E,KAAI,UAAU,QAAQ,OAAO,UAAU,SACrC,qBAAoB,MAAiC;;;AAK7D,MAAK,MAAM,OAAO;EAAC;EAAS;EAAS;EAAQ,CAC3C,KAAI,MAAM,QAAQ,OAAO,KAAK,CAC5B,MAAK,MAAM,QAAQ,OAAO,KACxB,qBAAoB,KAAK;AAI/B,KAAI,OAAO,UAAU,QAAQ,OAAO,OAAO,UAAU;OAC9C,MAAM,SAAS,OAAO,OAAO,OAAO,MAAiC,CACxE,KAAI,UAAU,QAAQ,OAAO,UAAU,SACrC,qBAAoB,MAAiC;;AAI3D,QAAO"}
|
package/package.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@agentloop/openai",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "OpenAI provider for @agentloop/core",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"agent",
|
|
7
|
+
"ai",
|
|
8
|
+
"gpt",
|
|
9
|
+
"llm",
|
|
10
|
+
"openai",
|
|
11
|
+
"provider"
|
|
12
|
+
],
|
|
13
|
+
"license": "MIT",
|
|
14
|
+
"repository": {
|
|
15
|
+
"type": "git",
|
|
16
|
+
"url": "https://github.com/joeychilson/agentloop.git",
|
|
17
|
+
"directory": "packages/openai"
|
|
18
|
+
},
|
|
19
|
+
"files": [
|
|
20
|
+
"dist"
|
|
21
|
+
],
|
|
22
|
+
"type": "module",
|
|
23
|
+
"sideEffects": false,
|
|
24
|
+
"main": "./dist/index.cjs",
|
|
25
|
+
"module": "./dist/index.mjs",
|
|
26
|
+
"types": "./dist/index.d.mts",
|
|
27
|
+
"exports": {
|
|
28
|
+
".": {
|
|
29
|
+
"import": {
|
|
30
|
+
"types": "./dist/index.d.mts",
|
|
31
|
+
"default": "./dist/index.mjs"
|
|
32
|
+
},
|
|
33
|
+
"require": {
|
|
34
|
+
"types": "./dist/index.d.cts",
|
|
35
|
+
"default": "./dist/index.cjs"
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
"scripts": {
|
|
40
|
+
"build": "tsdown",
|
|
41
|
+
"dev": "tsdown --watch",
|
|
42
|
+
"typecheck": "tsc --noEmit",
|
|
43
|
+
"clean": "rm -rf dist"
|
|
44
|
+
},
|
|
45
|
+
"dependencies": {
|
|
46
|
+
"@agentloop/core": "workspace:*"
|
|
47
|
+
},
|
|
48
|
+
"peerDependencies": {
|
|
49
|
+
"openai": "^6.32.0"
|
|
50
|
+
},
|
|
51
|
+
"engines": {
|
|
52
|
+
"node": ">=20"
|
|
53
|
+
}
|
|
54
|
+
}
|