botinabox 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,75 @@
1
+ /** LLM provider types — Story 1.5 / 2.1 */
2
+ interface ToolDefinition {
3
+ name: string;
4
+ description: string;
5
+ parameters: Record<string, unknown>;
6
+ }
7
+ interface ChatMessage {
8
+ role: "user" | "assistant" | "system";
9
+ content: string | ContentBlock[];
10
+ }
11
+ type ContentBlock = {
12
+ type: "text";
13
+ text: string;
14
+ } | {
15
+ type: "tool_use";
16
+ id: string;
17
+ name: string;
18
+ input: unknown;
19
+ } | {
20
+ type: "tool_result";
21
+ tool_use_id: string;
22
+ content: string;
23
+ };
24
+ interface ChatParams {
25
+ messages: ChatMessage[];
26
+ system?: string;
27
+ tools?: ToolDefinition[];
28
+ maxTokens?: number;
29
+ temperature?: number;
30
+ model: string;
31
+ abortSignal?: AbortSignal;
32
+ }
33
+ interface TokenUsage {
34
+ inputTokens: number;
35
+ outputTokens: number;
36
+ cacheReadTokens?: number;
37
+ cacheWriteTokens?: number;
38
+ }
39
+ interface ChatResult {
40
+ content: string;
41
+ toolUses?: ToolUse[];
42
+ usage: TokenUsage;
43
+ model: string;
44
+ stopReason: "end_turn" | "tool_use" | "max_tokens" | "stop_sequence";
45
+ }
46
+ interface ToolUse {
47
+ id: string;
48
+ name: string;
49
+ input: unknown;
50
+ }
51
+ interface ModelInfo {
52
+ id: string;
53
+ displayName: string;
54
+ contextWindow: number;
55
+ maxOutputTokens: number;
56
+ capabilities: Array<"chat" | "tools" | "vision" | "streaming">;
57
+ /** Cost in micro-cents per 1M tokens */
58
+ inputCostPerMToken?: number;
59
+ outputCostPerMToken?: number;
60
+ }
61
+ interface ResolvedModel {
62
+ provider: string;
63
+ model: string;
64
+ }
65
+ interface LLMProvider {
66
+ id: string;
67
+ displayName: string;
68
+ models: ModelInfo[];
69
+ chat(params: ChatParams): Promise<ChatResult>;
70
+ chatStream(params: ChatParams): AsyncGenerator<string, ChatResult, unknown>;
71
+ /** Convert ToolDefinition[] to provider-native format */
72
+ serializeTools(tools: ToolDefinition[]): unknown;
73
+ }
74
+
75
+ export type { ChatMessage as C, LLMProvider as L, ModelInfo as M, ResolvedModel as R, TokenUsage as T, ChatParams as a, ChatResult as b, ContentBlock as c, ToolDefinition as d, ToolUse as e };
@@ -0,0 +1,22 @@
1
+ import { L as LLMProvider, M as ModelInfo, d as ToolDefinition, a as ChatParams, b as ChatResult } from '../../provider-qqJYv9nv.js';
2
+
3
+ declare class AnthropicProvider implements LLMProvider {
4
+ readonly id = "anthropic";
5
+ readonly displayName = "Anthropic";
6
+ readonly models: ModelInfo[];
7
+ private client;
8
+ constructor({ apiKey }: {
9
+ apiKey: string;
10
+ });
11
+ serializeTools(tools: ToolDefinition[]): unknown;
12
+ chat(params: ChatParams): Promise<ChatResult>;
13
+ chatStream(params: ChatParams): AsyncGenerator<string, ChatResult, unknown>;
14
+ }
15
+
16
+ declare const MODELS: ModelInfo[];
17
+
18
+ declare function createAnthropicProvider(config: {
19
+ apiKey: string;
20
+ }): LLMProvider;
21
+
22
+ export { AnthropicProvider, MODELS, createAnthropicProvider as default };
@@ -0,0 +1,169 @@
1
+ // src/providers/anthropic/provider.ts
2
+ import Anthropic from "@anthropic-ai/sdk";
3
+
4
+ // src/providers/anthropic/models.ts
5
+ var MODELS = [
6
+ {
7
+ id: "claude-opus-4-6",
8
+ displayName: "Claude Opus 4.6",
9
+ contextWindow: 2e5,
10
+ maxOutputTokens: 32e3,
11
+ inputCostPerMToken: 15,
12
+ outputCostPerMToken: 75,
13
+ capabilities: ["chat", "tools", "vision", "streaming"]
14
+ },
15
+ {
16
+ id: "claude-sonnet-4-6",
17
+ displayName: "Claude Sonnet 4.6",
18
+ contextWindow: 2e5,
19
+ maxOutputTokens: 16e3,
20
+ inputCostPerMToken: 3,
21
+ outputCostPerMToken: 15,
22
+ capabilities: ["chat", "tools", "vision", "streaming"]
23
+ },
24
+ {
25
+ id: "claude-haiku-4-5",
26
+ displayName: "Claude Haiku 4.5",
27
+ contextWindow: 2e5,
28
+ maxOutputTokens: 8192,
29
+ inputCostPerMToken: 0.8,
30
+ outputCostPerMToken: 4,
31
+ capabilities: ["chat", "tools", "vision", "streaming"]
32
+ }
33
+ ];
34
+
35
+ // src/providers/anthropic/tool-converter.ts
36
+ function convertTools(tools) {
37
+ return tools.map((tool) => ({
38
+ name: tool.name,
39
+ description: tool.description,
40
+ input_schema: { type: "object", ...tool.parameters }
41
+ }));
42
+ }
43
+
44
+ // src/providers/anthropic/provider.ts
45
+ var AnthropicProvider = class {
46
+ id = "anthropic";
47
+ displayName = "Anthropic";
48
+ models = MODELS;
49
+ client;
50
+ constructor({ apiKey }) {
51
+ this.client = new Anthropic({ apiKey });
52
+ }
53
+ serializeTools(tools) {
54
+ return convertTools(tools);
55
+ }
56
+ async chat(params) {
57
+ const { messages, system, tools, maxTokens, temperature, model, abortSignal } = params;
58
+ const anthropicMessages = messages.filter((m) => m.role !== "system").map((m) => ({
59
+ role: m.role,
60
+ content: typeof m.content === "string" ? m.content : m.content
61
+ }));
62
+ const response = await this.client.messages.create(
63
+ {
64
+ model,
65
+ max_tokens: maxTokens ?? 4096,
66
+ messages: anthropicMessages,
67
+ ...system ? { system } : {},
68
+ ...tools && tools.length > 0 ? { tools: convertTools(tools) } : {},
69
+ ...temperature !== void 0 ? { temperature } : {}
70
+ },
71
+ { signal: abortSignal }
72
+ );
73
+ let content = "";
74
+ const toolUses = [];
75
+ for (const block of response.content) {
76
+ if (block.type === "text") {
77
+ content += block.text;
78
+ } else if (block.type === "tool_use") {
79
+ toolUses.push({
80
+ id: block.id,
81
+ name: block.name,
82
+ input: block.input
83
+ });
84
+ }
85
+ }
86
+ const stopReason = mapStopReason(response.stop_reason);
87
+ return {
88
+ content,
89
+ toolUses: toolUses.length > 0 ? toolUses : void 0,
90
+ usage: {
91
+ inputTokens: response.usage.input_tokens,
92
+ outputTokens: response.usage.output_tokens
93
+ },
94
+ model: response.model,
95
+ stopReason
96
+ };
97
+ }
98
+ async *chatStream(params) {
99
+ const { messages, system, tools, maxTokens, temperature, model, abortSignal } = params;
100
+ const anthropicMessages = messages.filter((m) => m.role !== "system").map((m) => ({
101
+ role: m.role,
102
+ content: typeof m.content === "string" ? m.content : m.content
103
+ }));
104
+ const stream = this.client.messages.stream(
105
+ {
106
+ model,
107
+ max_tokens: maxTokens ?? 4096,
108
+ messages: anthropicMessages,
109
+ ...system ? { system } : {},
110
+ ...tools && tools.length > 0 ? { tools: convertTools(tools) } : {},
111
+ ...temperature !== void 0 ? { temperature } : {}
112
+ },
113
+ { signal: abortSignal }
114
+ );
115
+ for await (const event of stream) {
116
+ if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
117
+ yield event.delta.text;
118
+ }
119
+ }
120
+ const finalMessage = await stream.finalMessage();
121
+ let content = "";
122
+ const toolUses = [];
123
+ for (const block of finalMessage.content) {
124
+ if (block.type === "text") {
125
+ content += block.text;
126
+ } else if (block.type === "tool_use") {
127
+ toolUses.push({
128
+ id: block.id,
129
+ name: block.name,
130
+ input: block.input
131
+ });
132
+ }
133
+ }
134
+ return {
135
+ content,
136
+ toolUses: toolUses.length > 0 ? toolUses : void 0,
137
+ usage: {
138
+ inputTokens: finalMessage.usage.input_tokens,
139
+ outputTokens: finalMessage.usage.output_tokens
140
+ },
141
+ model: finalMessage.model,
142
+ stopReason: mapStopReason(finalMessage.stop_reason)
143
+ };
144
+ }
145
+ };
146
+ function mapStopReason(reason) {
147
+ switch (reason) {
148
+ case "end_turn":
149
+ return "end_turn";
150
+ case "tool_use":
151
+ return "tool_use";
152
+ case "max_tokens":
153
+ return "max_tokens";
154
+ case "stop_sequence":
155
+ return "stop_sequence";
156
+ default:
157
+ return "end_turn";
158
+ }
159
+ }
160
+
161
+ // src/providers/anthropic/index.ts
162
+ function createAnthropicProvider(config) {
163
+ return new AnthropicProvider(config);
164
+ }
165
+ export {
166
+ AnthropicProvider,
167
+ MODELS,
168
+ createAnthropicProvider as default
169
+ };
@@ -0,0 +1,24 @@
1
+ import { L as LLMProvider, M as ModelInfo, d as ToolDefinition, a as ChatParams, b as ChatResult } from '../../provider-qqJYv9nv.js';
2
+
3
+ declare class OllamaProvider implements LLMProvider {
4
+ readonly id = "ollama";
5
+ readonly displayName = "Ollama";
6
+ private baseUrl;
7
+ private cachedModels;
8
+ private cacheTimestamp;
9
+ private readonly cacheTtlMs;
10
+ constructor({ baseUrl }?: {
11
+ baseUrl?: string;
12
+ });
13
+ get models(): ModelInfo[];
14
+ serializeTools(_tools: ToolDefinition[]): unknown;
15
+ getModels(): Promise<ModelInfo[]>;
16
+ chat(params: ChatParams): Promise<ChatResult>;
17
+ chatStream(params: ChatParams): AsyncGenerator<string, ChatResult, unknown>;
18
+ }
19
+
20
+ declare function createOllamaProvider(config?: {
21
+ baseUrl?: string;
22
+ }): LLMProvider;
23
+
24
+ export { OllamaProvider, createOllamaProvider as default };
@@ -0,0 +1,179 @@
1
+ // src/providers/ollama/provider.ts
2
+ var OllamaProvider = class {
3
+ id = "ollama";
4
+ displayName = "Ollama";
5
+ baseUrl;
6
+ cachedModels = [];
7
+ cacheTimestamp = 0;
8
+ cacheTtlMs = 5 * 60 * 1e3;
9
+ // 5 minutes
10
+ constructor({ baseUrl = "http://localhost:11434" } = {}) {
11
+ this.baseUrl = baseUrl.replace(/\/$/, "");
12
+ }
13
+ get models() {
14
+ return this.cachedModels;
15
+ }
16
+ serializeTools(_tools) {
17
+ return _tools.map((tool) => ({
18
+ type: "function",
19
+ function: {
20
+ name: tool.name,
21
+ description: tool.description,
22
+ parameters: tool.parameters
23
+ }
24
+ }));
25
+ }
26
+ async getModels() {
27
+ const now = Date.now();
28
+ if (this.cachedModels.length > 0 && now - this.cacheTimestamp < this.cacheTtlMs) {
29
+ return this.cachedModels;
30
+ }
31
+ try {
32
+ const response = await fetch(`${this.baseUrl}/api/tags`);
33
+ if (!response.ok) {
34
+ return [];
35
+ }
36
+ const data = await response.json();
37
+ this.cachedModels = data.models.map((m) => ({
38
+ id: m.name,
39
+ displayName: m.name,
40
+ contextWindow: 128e3,
41
+ maxOutputTokens: 4096,
42
+ capabilities: ["chat", "streaming"]
43
+ }));
44
+ this.cacheTimestamp = now;
45
+ return this.cachedModels;
46
+ } catch {
47
+ return [];
48
+ }
49
+ }
50
+ async chat(params) {
51
+ const { messages, system, model, maxTokens, temperature } = params;
52
+ const ollamaMessages = [];
53
+ if (system) {
54
+ ollamaMessages.push({ role: "system", content: system });
55
+ }
56
+ for (const msg of messages) {
57
+ const content = typeof msg.content === "string" ? msg.content : msg.content.filter((b) => b.type === "text").map((b) => b.type === "text" ? b.text : "").join("");
58
+ ollamaMessages.push({
59
+ role: msg.role,
60
+ content
61
+ });
62
+ }
63
+ const body = {
64
+ model,
65
+ messages: ollamaMessages,
66
+ stream: false
67
+ };
68
+ if (maxTokens !== void 0) {
69
+ body["options"] = { ...body["options"] ?? {}, num_predict: maxTokens };
70
+ }
71
+ if (temperature !== void 0) {
72
+ body["options"] = { ...body["options"] ?? {}, temperature };
73
+ }
74
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
75
+ method: "POST",
76
+ headers: { "Content-Type": "application/json" },
77
+ body: JSON.stringify(body),
78
+ signal: params.abortSignal
79
+ });
80
+ if (!response.ok) {
81
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
82
+ }
83
+ const data = await response.json();
84
+ return {
85
+ content: data.message.content,
86
+ usage: {
87
+ inputTokens: data.prompt_eval_count ?? 0,
88
+ outputTokens: data.eval_count ?? 0
89
+ },
90
+ model: data.model,
91
+ stopReason: "end_turn"
92
+ };
93
+ }
94
+ async *chatStream(params) {
95
+ const { messages, system, model, maxTokens, temperature } = params;
96
+ const ollamaMessages = [];
97
+ if (system) {
98
+ ollamaMessages.push({ role: "system", content: system });
99
+ }
100
+ for (const msg of messages) {
101
+ const content = typeof msg.content === "string" ? msg.content : msg.content.filter((b) => b.type === "text").map((b) => b.type === "text" ? b.text : "").join("");
102
+ ollamaMessages.push({
103
+ role: msg.role,
104
+ content
105
+ });
106
+ }
107
+ const body = {
108
+ model,
109
+ messages: ollamaMessages,
110
+ stream: true
111
+ };
112
+ if (maxTokens !== void 0) {
113
+ body["options"] = { ...body["options"] ?? {}, num_predict: maxTokens };
114
+ }
115
+ if (temperature !== void 0) {
116
+ body["options"] = { ...body["options"] ?? {}, temperature };
117
+ }
118
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
119
+ method: "POST",
120
+ headers: { "Content-Type": "application/json" },
121
+ body: JSON.stringify(body),
122
+ signal: params.abortSignal
123
+ });
124
+ if (!response.ok) {
125
+ throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
126
+ }
127
+ if (!response.body) {
128
+ throw new Error("No response body for streaming");
129
+ }
130
+ const reader = response.body.getReader();
131
+ const decoder = new TextDecoder();
132
+ let buffer = "";
133
+ let fullContent = "";
134
+ let lastChunk;
135
+ try {
136
+ while (true) {
137
+ const { done, value } = await reader.read();
138
+ if (done) break;
139
+ buffer += decoder.decode(value, { stream: true });
140
+ const lines = buffer.split("\n");
141
+ buffer = lines.pop() ?? "";
142
+ for (const line of lines) {
143
+ if (!line.trim()) continue;
144
+ try {
145
+ const chunk = JSON.parse(line);
146
+ if (chunk.message?.content) {
147
+ fullContent += chunk.message.content;
148
+ yield chunk.message.content;
149
+ }
150
+ if (chunk.done) {
151
+ lastChunk = chunk;
152
+ }
153
+ } catch {
154
+ }
155
+ }
156
+ }
157
+ } finally {
158
+ reader.releaseLock();
159
+ }
160
+ return {
161
+ content: fullContent,
162
+ usage: {
163
+ inputTokens: lastChunk?.prompt_eval_count ?? 0,
164
+ outputTokens: lastChunk?.eval_count ?? 0
165
+ },
166
+ model,
167
+ stopReason: "end_turn"
168
+ };
169
+ }
170
+ };
171
+
172
+ // src/providers/ollama/index.ts
173
+ function createOllamaProvider(config) {
174
+ return new OllamaProvider(config);
175
+ }
176
+ export {
177
+ OllamaProvider,
178
+ createOllamaProvider as default
179
+ };
@@ -0,0 +1,22 @@
1
+ import { L as LLMProvider, M as ModelInfo, d as ToolDefinition, a as ChatParams, b as ChatResult } from '../../provider-qqJYv9nv.js';
2
+
3
+ declare class OpenAIProvider implements LLMProvider {
4
+ readonly id = "openai";
5
+ readonly displayName = "OpenAI";
6
+ readonly models: ModelInfo[];
7
+ private client;
8
+ constructor({ apiKey }: {
9
+ apiKey: string;
10
+ });
11
+ serializeTools(tools: ToolDefinition[]): unknown;
12
+ chat(params: ChatParams): Promise<ChatResult>;
13
+ chatStream(params: ChatParams): AsyncGenerator<string, ChatResult, unknown>;
14
+ }
15
+
16
+ declare const MODELS: ModelInfo[];
17
+
18
+ declare function createOpenAIProvider(config: {
19
+ apiKey: string;
20
+ }): LLMProvider;
21
+
22
+ export { MODELS, OpenAIProvider, createOpenAIProvider as default };
@@ -0,0 +1,212 @@
1
+ // src/providers/openai/provider.ts
2
+ import OpenAI from "openai";
3
+
4
+ // src/providers/openai/models.ts
5
+ var MODELS = [
6
+ {
7
+ id: "gpt-4o",
8
+ displayName: "GPT-4o",
9
+ contextWindow: 128e3,
10
+ maxOutputTokens: 16384,
11
+ inputCostPerMToken: 2.5,
12
+ outputCostPerMToken: 10,
13
+ capabilities: ["chat", "tools", "vision", "streaming"]
14
+ },
15
+ {
16
+ id: "gpt-4o-mini",
17
+ displayName: "GPT-4o Mini",
18
+ contextWindow: 128e3,
19
+ maxOutputTokens: 16384,
20
+ inputCostPerMToken: 0.15,
21
+ outputCostPerMToken: 0.6,
22
+ capabilities: ["chat", "tools", "vision", "streaming"]
23
+ },
24
+ {
25
+ id: "o3-mini",
26
+ displayName: "o3 Mini",
27
+ contextWindow: 2e5,
28
+ maxOutputTokens: 1e5,
29
+ inputCostPerMToken: 1.1,
30
+ outputCostPerMToken: 4.4,
31
+ capabilities: ["chat", "tools", "streaming"]
32
+ }
33
+ ];
34
+
35
+ // src/providers/openai/tool-converter.ts
36
+ function convertTools(tools) {
37
+ return tools.map((tool) => ({
38
+ type: "function",
39
+ function: {
40
+ name: tool.name,
41
+ description: tool.description,
42
+ parameters: tool.parameters
43
+ }
44
+ }));
45
+ }
46
+
47
+ // src/providers/openai/provider.ts
48
+ var OpenAIProvider = class {
49
+ id = "openai";
50
+ displayName = "OpenAI";
51
+ models = MODELS;
52
+ client;
53
+ constructor({ apiKey }) {
54
+ this.client = new OpenAI({ apiKey });
55
+ }
56
+ serializeTools(tools) {
57
+ return convertTools(tools);
58
+ }
59
+ async chat(params) {
60
+ const { messages, system, tools, maxTokens, temperature, model, abortSignal } = params;
61
+ const openaiMessages = [];
62
+ if (system) {
63
+ openaiMessages.push({ role: "system", content: system });
64
+ }
65
+ for (const msg of messages) {
66
+ if (typeof msg.content === "string") {
67
+ openaiMessages.push({
68
+ role: msg.role,
69
+ content: msg.content
70
+ });
71
+ } else {
72
+ const textContent = msg.content.filter((b) => b.type === "text").map((b) => b.type === "text" ? b.text : "").join("");
73
+ openaiMessages.push({
74
+ role: msg.role,
75
+ content: textContent
76
+ });
77
+ }
78
+ }
79
+ const response = await this.client.chat.completions.create(
80
+ {
81
+ model,
82
+ messages: openaiMessages,
83
+ ...maxTokens !== void 0 ? { max_tokens: maxTokens } : {},
84
+ ...temperature !== void 0 ? { temperature } : {},
85
+ ...tools && tools.length > 0 ? { tools: convertTools(tools) } : {}
86
+ },
87
+ { signal: abortSignal }
88
+ );
89
+ const choice = response.choices[0];
90
+ if (!choice) {
91
+ throw new Error("No choices returned from OpenAI");
92
+ }
93
+ const messageContent = choice.message.content ?? "";
94
+ const toolUses = [];
95
+ if (choice.message.tool_calls) {
96
+ for (const tc of choice.message.tool_calls) {
97
+ let input;
98
+ try {
99
+ input = JSON.parse(tc.function.arguments);
100
+ } catch {
101
+ input = tc.function.arguments;
102
+ }
103
+ toolUses.push({
104
+ id: tc.id,
105
+ name: tc.function.name,
106
+ input
107
+ });
108
+ }
109
+ }
110
+ const stopReason = mapFinishReason(choice.finish_reason);
111
+ return {
112
+ content: messageContent,
113
+ toolUses: toolUses.length > 0 ? toolUses : void 0,
114
+ usage: {
115
+ inputTokens: response.usage?.prompt_tokens ?? 0,
116
+ outputTokens: response.usage?.completion_tokens ?? 0
117
+ },
118
+ model: response.model,
119
+ stopReason
120
+ };
121
+ }
122
+ async *chatStream(params) {
123
+ const { messages, system, tools, maxTokens, temperature, model, abortSignal } = params;
124
+ const openaiMessages = [];
125
+ if (system) {
126
+ openaiMessages.push({ role: "system", content: system });
127
+ }
128
+ for (const msg of messages) {
129
+ if (typeof msg.content === "string") {
130
+ openaiMessages.push({
131
+ role: msg.role,
132
+ content: msg.content
133
+ });
134
+ } else {
135
+ const textContent = msg.content.filter((b) => b.type === "text").map((b) => b.type === "text" ? b.text : "").join("");
136
+ openaiMessages.push({
137
+ role: msg.role,
138
+ content: textContent
139
+ });
140
+ }
141
+ }
142
+ const stream = this.client.chat.completions.stream(
143
+ {
144
+ model,
145
+ messages: openaiMessages,
146
+ ...maxTokens !== void 0 ? { max_tokens: maxTokens } : {},
147
+ ...temperature !== void 0 ? { temperature } : {},
148
+ ...tools && tools.length > 0 ? { tools: convertTools(tools) } : {}
149
+ },
150
+ { signal: abortSignal }
151
+ );
152
+ let accumulatedContent = "";
153
+ for await (const chunk of stream) {
154
+ const delta = chunk.choices[0]?.delta?.content;
155
+ if (delta) {
156
+ accumulatedContent += delta;
157
+ yield delta;
158
+ }
159
+ }
160
+ const finalCompletion = await stream.finalChatCompletion();
161
+ const choice = finalCompletion.choices[0];
162
+ if (!choice) {
163
+ throw new Error("No choices in final completion");
164
+ }
165
+ const toolUses = [];
166
+ if (choice.message.tool_calls) {
167
+ for (const tc of choice.message.tool_calls) {
168
+ let input;
169
+ try {
170
+ input = JSON.parse(tc.function.arguments);
171
+ } catch {
172
+ input = tc.function.arguments;
173
+ }
174
+ toolUses.push({ id: tc.id, name: tc.function.name, input });
175
+ }
176
+ }
177
+ return {
178
+ content: choice.message.content ?? accumulatedContent,
179
+ toolUses: toolUses.length > 0 ? toolUses : void 0,
180
+ usage: {
181
+ inputTokens: finalCompletion.usage?.prompt_tokens ?? 0,
182
+ outputTokens: finalCompletion.usage?.completion_tokens ?? 0
183
+ },
184
+ model: finalCompletion.model,
185
+ stopReason: mapFinishReason(choice.finish_reason)
186
+ };
187
+ }
188
+ };
189
+ function mapFinishReason(reason) {
190
+ switch (reason) {
191
+ case "stop":
192
+ return "end_turn";
193
+ case "tool_calls":
194
+ return "tool_use";
195
+ case "length":
196
+ return "max_tokens";
197
+ case "content_filter":
198
+ return "stop_sequence";
199
+ default:
200
+ return "end_turn";
201
+ }
202
+ }
203
+
204
+ // src/providers/openai/index.ts
205
+ function createOpenAIProvider(config) {
206
+ return new OpenAIProvider(config);
207
+ }
208
+ export {
209
+ MODELS,
210
+ OpenAIProvider,
211
+ createOpenAIProvider as default
212
+ };