@ank1015/providers 0.0.1 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +93 -383
  3. package/dist/agent/conversation.d.ts +97 -0
  4. package/dist/agent/conversation.d.ts.map +1 -0
  5. package/dist/agent/conversation.js +328 -0
  6. package/dist/agent/conversation.js.map +1 -0
  7. package/dist/agent/runner.d.ts +37 -0
  8. package/dist/agent/runner.d.ts.map +1 -0
  9. package/dist/agent/runner.js +169 -0
  10. package/dist/agent/runner.js.map +1 -0
  11. package/dist/agent/tools/calculate.d.ts +15 -0
  12. package/dist/agent/tools/calculate.d.ts.map +1 -0
  13. package/dist/agent/tools/calculate.js +23 -0
  14. package/dist/agent/tools/calculate.js.map +1 -0
  15. package/dist/agent/tools/get-current-time.d.ts +15 -0
  16. package/dist/agent/tools/get-current-time.d.ts.map +1 -0
  17. package/dist/agent/tools/get-current-time.js +38 -0
  18. package/dist/agent/tools/get-current-time.js.map +1 -0
  19. package/dist/agent/tools/index.d.ts +3 -0
  20. package/dist/agent/tools/index.d.ts.map +1 -0
  21. package/dist/agent/tools/index.js +3 -0
  22. package/dist/agent/tools/index.js.map +1 -0
  23. package/dist/agent/types.d.ts +53 -31
  24. package/dist/agent/types.d.ts.map +1 -1
  25. package/dist/agent/types.js +1 -2
  26. package/dist/agent/utils.d.ts +14 -0
  27. package/dist/agent/utils.d.ts.map +1 -0
  28. package/dist/agent/utils.js +59 -0
  29. package/dist/agent/utils.js.map +1 -0
  30. package/dist/index.d.ts +16 -9
  31. package/dist/index.d.ts.map +1 -1
  32. package/dist/index.js +16 -28
  33. package/dist/index.js.map +1 -1
  34. package/dist/llm.d.ts +15 -0
  35. package/dist/llm.d.ts.map +1 -0
  36. package/dist/llm.js +92 -0
  37. package/dist/llm.js.map +1 -0
  38. package/dist/models.d.ts +8 -1
  39. package/dist/models.d.ts.map +1 -1
  40. package/dist/models.generated.d.ts +25 -112
  41. package/dist/models.generated.d.ts.map +1 -1
  42. package/dist/models.generated.js +72 -227
  43. package/dist/models.generated.js.map +1 -1
  44. package/dist/models.js +30 -32
  45. package/dist/models.js.map +1 -1
  46. package/dist/providers/google/complete.d.ts +3 -0
  47. package/dist/providers/google/complete.d.ts.map +1 -0
  48. package/dist/providers/google/complete.js +53 -0
  49. package/dist/providers/google/complete.js.map +1 -0
  50. package/dist/providers/google/index.d.ts +6 -0
  51. package/dist/providers/google/index.d.ts.map +1 -0
  52. package/dist/providers/google/index.js +6 -0
  53. package/dist/providers/google/index.js.map +1 -0
  54. package/dist/providers/google/stream.d.ts +3 -0
  55. package/dist/providers/google/stream.d.ts.map +1 -0
  56. package/dist/providers/{google.js → google/stream.js} +67 -231
  57. package/dist/providers/google/stream.js.map +1 -0
  58. package/dist/providers/google/types.d.ts +8 -0
  59. package/dist/providers/google/types.d.ts.map +1 -0
  60. package/dist/providers/google/types.js +2 -0
  61. package/dist/providers/google/types.js.map +1 -0
  62. package/dist/providers/google/utils.d.ts +30 -0
  63. package/dist/providers/google/utils.d.ts.map +1 -0
  64. package/dist/providers/google/utils.js +354 -0
  65. package/dist/providers/google/utils.js.map +1 -0
  66. package/dist/providers/openai/complete.d.ts +3 -0
  67. package/dist/providers/openai/complete.d.ts.map +1 -0
  68. package/dist/providers/openai/complete.js +57 -0
  69. package/dist/providers/openai/complete.js.map +1 -0
  70. package/dist/providers/openai/index.d.ts +4 -0
  71. package/dist/providers/openai/index.d.ts.map +1 -0
  72. package/dist/providers/openai/index.js +4 -0
  73. package/dist/providers/openai/index.js.map +1 -0
  74. package/dist/providers/openai/stream.d.ts +3 -0
  75. package/dist/providers/openai/stream.d.ts.map +1 -0
  76. package/dist/providers/{openai.js → openai/stream.js} +74 -152
  77. package/dist/providers/openai/stream.js.map +1 -0
  78. package/dist/providers/openai/types.d.ts +8 -0
  79. package/dist/providers/openai/types.d.ts.map +1 -0
  80. package/dist/providers/openai/types.js +2 -0
  81. package/dist/providers/openai/types.js.map +1 -0
  82. package/dist/providers/openai/utils.d.ts +13 -0
  83. package/dist/providers/openai/utils.d.ts.map +1 -0
  84. package/dist/providers/openai/utils.js +285 -0
  85. package/dist/providers/openai/utils.js.map +1 -0
  86. package/dist/types.d.ts +95 -87
  87. package/dist/types.d.ts.map +1 -1
  88. package/dist/types.js +1 -9
  89. package/dist/types.js.map +1 -1
  90. package/dist/utils/event-stream.d.ts +2 -2
  91. package/dist/utils/event-stream.d.ts.map +1 -1
  92. package/dist/utils/event-stream.js +2 -7
  93. package/dist/utils/event-stream.js.map +1 -1
  94. package/dist/utils/json-parse.js +3 -6
  95. package/dist/utils/json-parse.js.map +1 -1
  96. package/dist/utils/overflow.d.ts +51 -0
  97. package/dist/utils/overflow.d.ts.map +1 -0
  98. package/dist/utils/overflow.js +106 -0
  99. package/dist/utils/overflow.js.map +1 -0
  100. package/dist/utils/sanitize-unicode.js +1 -4
  101. package/dist/utils/sanitize-unicode.js.map +1 -1
  102. package/dist/utils/uuid.d.ts +6 -0
  103. package/dist/utils/uuid.d.ts.map +1 -0
  104. package/dist/utils/uuid.js +9 -0
  105. package/dist/utils/uuid.js.map +1 -0
  106. package/dist/utils/validation.d.ts +10 -3
  107. package/dist/utils/validation.d.ts.map +1 -1
  108. package/dist/utils/validation.js +20 -12
  109. package/dist/utils/validation.js.map +1 -1
  110. package/package.json +45 -8
  111. package/biome.json +0 -43
  112. package/dist/agent/agent-loop.d.ts +0 -5
  113. package/dist/agent/agent-loop.d.ts.map +0 -1
  114. package/dist/agent/agent-loop.js +0 -219
  115. package/dist/agent/agent-loop.js.map +0 -1
  116. package/dist/providers/convert.d.ts +0 -6
  117. package/dist/providers/convert.d.ts.map +0 -1
  118. package/dist/providers/convert.js +0 -207
  119. package/dist/providers/convert.js.map +0 -1
  120. package/dist/providers/google.d.ts +0 -26
  121. package/dist/providers/google.d.ts.map +0 -1
  122. package/dist/providers/google.js.map +0 -1
  123. package/dist/providers/openai.d.ts +0 -17
  124. package/dist/providers/openai.d.ts.map +0 -1
  125. package/dist/providers/openai.js.map +0 -1
  126. package/dist/stream.d.ts +0 -4
  127. package/dist/stream.d.ts.map +0 -1
  128. package/dist/stream.js +0 -40
  129. package/dist/stream.js.map +0 -1
  130. package/dist/test-google-agent-loop.d.ts +0 -2
  131. package/dist/test-google-agent-loop.d.ts.map +0 -1
  132. package/dist/test-google-agent-loop.js +0 -186
  133. package/dist/test-google-agent-loop.js.map +0 -1
  134. package/dist/test-google.d.ts +0 -2
  135. package/dist/test-google.d.ts.map +0 -1
  136. package/dist/test-google.js +0 -41
  137. package/dist/test-google.js.map +0 -1
  138. package/src/agent/agent-loop.ts +0 -275
  139. package/src/agent/types.ts +0 -80
  140. package/src/index.ts +0 -72
  141. package/src/models.generated.ts +0 -314
  142. package/src/models.ts +0 -45
  143. package/src/providers/convert.ts +0 -222
  144. package/src/providers/google.ts +0 -496
  145. package/src/providers/openai.ts +0 -437
  146. package/src/stream.ts +0 -60
  147. package/src/types.ts +0 -198
  148. package/src/utils/event-stream.ts +0 -60
  149. package/src/utils/json-parse.ts +0 -28
  150. package/src/utils/sanitize-unicode.ts +0 -25
  151. package/src/utils/validation.ts +0 -69
  152. package/test/core/agent-loop.test.ts +0 -958
  153. package/test/core/stream.test.ts +0 -409
  154. package/test/data/red-circle.png +0 -0
  155. package/test/data/superintelligentwill.pdf +0 -0
  156. package/test/edge-cases/general.test.ts +0 -565
  157. package/test/integration/e2e.test.ts +0 -530
  158. package/test/models/cost.test.ts +0 -499
  159. package/test/models/registry.test.ts +0 -298
  160. package/test/providers/convert.test.ts +0 -846
  161. package/test/providers/google-schema.test.ts +0 -666
  162. package/test/providers/google-stream.test.ts +0 -369
  163. package/test/providers/openai-stream.test.ts +0 -251
  164. package/test/utils/event-stream.test.ts +0 -289
  165. package/test/utils/json-parse.test.ts +0 -344
  166. package/test/utils/sanitize-unicode.test.ts +0 -329
  167. package/test/utils/validation.test.ts +0 -614
  168. package/tsconfig.json +0 -21
  169. package/vitest.config.ts +0 -9
@@ -1,437 +0,0 @@
1
- // Return a abstracted event stream
2
- // Return the final response as it is.
3
- import OpenAI from "openai";
4
- import { AssistantMessageEventStream } from "../utils/event-stream";
5
- import { StreamFunction, Model, Context, Tool, Api, AssistantMessage, AssistantThinkingContent, AssistantTextContent, AssistantToolCall, StopReason } from "../types";
6
- import { buildOpenAIMessages } from "./convert";
7
- import { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js";
8
- import type {Tool as OpenAITool, ResponseFunctionToolCall, ResponseOutputMessage, ResponseReasoningItem,} from "openai/resources/responses/responses.js";
9
- import { parseStreamingJson } from "../utils/json-parse";
10
- import { validateToolArguments } from "../utils/validation";
11
- import { calculateCost } from "../models";
12
- import { Response } from "openai/resources/responses/responses.js";
13
- export interface OpenAIProviderOptions {
14
- apiKey?: string;
15
- signal?: AbortSignal;
16
- maxOutputTokens?: number;
17
- parallelToolCalls?: boolean;
18
- prompt_cache_key?: string;
19
- promptCacheRetention?: 'in-memory' | '24h' | null;
20
- reasoning?: {
21
- effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh' | null;
22
- summary?: 'auto' | 'concise' | 'detailed' | null;
23
- };
24
- temperature?: number;
25
- truncation?: 'auto' | 'disabled' | null;
26
- }
27
-
28
- // takes in model, built in message
29
- export const streamOpenAI: StreamFunction<'openai'> = (
30
- model: Model<'openai'>,
31
- context: Context,
32
- options: OpenAIProviderOptions
33
- ) => {
34
-
35
- const stream = new AssistantMessageEventStream();
36
-
37
- // Start async processing
38
- (async () => {
39
- const output: AssistantMessage = {
40
- role: "assistant",
41
- content: [],
42
- api: "openai" as Api,
43
- model: model.id,
44
- usage: {
45
- input: 0,
46
- output: 0,
47
- cacheRead: 0,
48
- cacheWrite: 0,
49
- totalTokens: 0,
50
- cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
51
- },
52
- stopReason: "stop",
53
- timestamp: Date.now(),
54
- };
55
- let finalResponse: Response = {
56
- id: "resp_123",
57
- object: "response",
58
- created_at: 1740855869,
59
- output_text: '',
60
- status: "completed",
61
- incomplete_details: null,
62
- parallel_tool_calls: false,
63
- error: null,
64
- instructions: null,
65
- max_output_tokens: null,
66
- model: "gpt-4o-mini-2024-07-18",
67
- output: [],
68
- previous_response_id: null,
69
- temperature: 1,
70
- text: {},
71
- tool_choice: "auto",
72
- tools: [],
73
- top_p: 1,
74
- truncation: "disabled",
75
- usage: {
76
- input_tokens: 0,
77
- output_tokens: 0,
78
- output_tokens_details: {
79
- reasoning_tokens: 0
80
- },
81
- input_tokens_details: {
82
- cached_tokens: 0
83
- },
84
- total_tokens: 0
85
- },
86
- user: undefined,
87
- metadata: {}
88
- }
89
- try {
90
- const client = createClient(model, options?.apiKey);
91
- const params = buildParams(model, context, options);
92
- const openaiStream = await client.responses.create(params, { signal: options?.signal });
93
- stream.push({ type: "start", partial: output });
94
-
95
- let currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;
96
- let currentBlock: AssistantThinkingContent | AssistantTextContent | (AssistantToolCall & { partialJson: string }) | null = null;
97
- const blocks = output.content;
98
- const blockIndex = () => blocks.length - 1;
99
-
100
- for await (const event of openaiStream) {
101
- // Handle output item start
102
- if (event.type === "response.output_item.added") {
103
- const item = event.item;
104
- if (item.type === "reasoning") {
105
- currentItem = item;
106
- currentBlock = { type: "thinking", thinking: "" };
107
- output.content.push(currentBlock);
108
- stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
109
- } else if (item.type === "message") {
110
- currentItem = item;
111
- currentBlock = { type: "text", text: "" };
112
- output.content.push(currentBlock);
113
- stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
114
- } else if (item.type === "function_call") {
115
- currentItem = item;
116
- currentBlock = {
117
- type: "toolCall",
118
- id: item.call_id,
119
- name: item.name,
120
- arguments: {},
121
- partialJson: item.arguments || "",
122
- };
123
- output.content.push(currentBlock);
124
- stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
125
- }
126
- }
127
- // Handle reasoning summary deltas
128
- else if (event.type === "response.reasoning_summary_part.added") {
129
- if (currentItem && currentItem.type === "reasoning") {
130
- currentItem.summary = currentItem.summary || [];
131
- currentItem.summary.push(event.part);
132
- }
133
- } else if (event.type === "response.reasoning_summary_text.delta") {
134
- if (
135
- currentItem &&
136
- currentItem.type === "reasoning" &&
137
- currentBlock &&
138
- currentBlock.type === "thinking"
139
- ) {
140
- currentItem.summary = currentItem.summary || [];
141
- const lastPart = currentItem.summary[currentItem.summary.length - 1];
142
- if (lastPart) {
143
- currentBlock.thinking += event.delta;
144
- lastPart.text += event.delta;
145
- stream.push({
146
- type: "thinking_delta",
147
- contentIndex: blockIndex(),
148
- delta: event.delta,
149
- partial: output,
150
- });
151
- }
152
- }
153
- }
154
- // Add a new line between summary parts (hack...)
155
- else if (event.type === "response.reasoning_summary_part.done") {
156
- if (
157
- currentItem &&
158
- currentItem.type === "reasoning" &&
159
- currentBlock &&
160
- currentBlock.type === "thinking"
161
- ) {
162
- currentItem.summary = currentItem.summary || [];
163
- const lastPart = currentItem.summary[currentItem.summary.length - 1];
164
- if (lastPart) {
165
- currentBlock.thinking += "\n\n";
166
- lastPart.text += "\n\n";
167
- stream.push({
168
- type: "thinking_delta",
169
- contentIndex: blockIndex(),
170
- delta: "\n\n",
171
- partial: output,
172
- });
173
- }
174
- }
175
- }
176
- // Handle text output deltas
177
- else if (event.type === "response.content_part.added") {
178
- if (currentItem && currentItem.type === "message") {
179
- currentItem.content = currentItem.content || [];
180
- currentItem.content.push(event.part as any);
181
- }
182
- } else if (event.type === "response.output_text.delta") {
183
- if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
184
- const lastPart = currentItem.content[currentItem.content.length - 1];
185
- if (lastPart && lastPart.type === "output_text") {
186
- currentBlock.text += event.delta;
187
- lastPart.text += event.delta;
188
- stream.push({
189
- type: "text_delta",
190
- contentIndex: blockIndex(),
191
- delta: event.delta,
192
- partial: output,
193
- });
194
- }
195
- }
196
- } else if (event.type === "response.refusal.delta") {
197
- if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
198
- const lastPart = currentItem.content[currentItem.content.length - 1];
199
- if (lastPart && lastPart.type === "refusal") {
200
- currentBlock.text += event.delta;
201
- lastPart.refusal += event.delta;
202
- stream.push({
203
- type: "text_delta",
204
- contentIndex: blockIndex(),
205
- delta: event.delta,
206
- partial: output,
207
- });
208
- }
209
- }
210
- }
211
- // Handle function call argument deltas
212
- else if (event.type === "response.function_call_arguments.delta") {
213
- if (
214
- currentItem &&
215
- currentItem.type === "function_call" &&
216
- currentBlock &&
217
- currentBlock.type === "toolCall"
218
- ) {
219
- currentBlock.partialJson += event.delta;
220
- currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
221
- stream.push({
222
- type: "toolcall_delta",
223
- contentIndex: blockIndex(),
224
- delta: event.delta,
225
- partial: output,
226
- });
227
- }
228
- }
229
- // Handle output item completion
230
- else if (event.type === "response.output_item.done") {
231
- const item = event.item;
232
-
233
- if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
234
- currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
235
- stream.push({
236
- type: "thinking_end",
237
- contentIndex: blockIndex(),
238
- content: currentBlock.thinking,
239
- partial: output,
240
- });
241
- currentBlock = null;
242
- } else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
243
- currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
244
- stream.push({
245
- type: "text_end",
246
- contentIndex: blockIndex(),
247
- content: currentBlock.text,
248
- partial: output,
249
- });
250
- currentBlock = null;
251
- } else if (item.type === "function_call") {
252
- const toolCall: AssistantToolCall = {
253
- type: "toolCall",
254
- id: item.call_id,
255
- name: item.name,
256
- arguments: JSON.parse(item.arguments),
257
- };
258
-
259
- // Validate tool arguments if tool definition is available
260
- if (context.tools) {
261
- const tool = context.tools.find((t) => t.name === toolCall.name);
262
- if (tool) {
263
- toolCall.arguments = validateToolArguments(tool, toolCall) as Record<string, any>;
264
- }
265
- }
266
-
267
- stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
268
- }
269
- }
270
- // Handle completion
271
- else if (event.type === "response.completed") {
272
- const response = event.response;
273
- // Update the final Response
274
- finalResponse = response
275
- if (response?.usage) {
276
- const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
277
- output.usage = {
278
- // OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input
279
- input: (response.usage.input_tokens || 0) - cachedTokens,
280
- output: response.usage.output_tokens || 0,
281
- cacheRead: cachedTokens,
282
- cacheWrite: 0,
283
- totalTokens: response.usage.total_tokens || 0,
284
- cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
285
- };
286
- }
287
- calculateCost(model, output.usage);
288
- // Map status to stop reason
289
- output.stopReason = mapStopReason(response?.status);
290
- if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
291
- output.stopReason = "toolUse";
292
- }
293
- }
294
- // Handle errors
295
- else if (event.type === "error") {
296
- throw new Error(`OpenAI API Error (${event.code}): ${event.message}` || "Unknown OpenAI error");
297
- } else if (event.type === "response.failed") {
298
- throw new Error("OpenAI response failed without error details");
299
- }
300
- }
301
- if (options?.signal?.aborted) {
302
- throw new Error("Request was aborted");
303
- }
304
-
305
- if (output.stopReason === "aborted" || output.stopReason === "error") {
306
- throw new Error(
307
- `Stream ended with status: ${output.stopReason}${output.errorMessage ? ` - ${output.errorMessage}` : ""}`
308
- );
309
- }
310
-
311
- stream.push({ type: "done", reason: output.stopReason, message: output });
312
- stream.end({
313
- _provider: 'openai',
314
- role: 'assistant',
315
- message: finalResponse
316
- });
317
- }catch(error){
318
- for (const block of output.content) delete (block as any).index;
319
- output.stopReason = options?.signal?.aborted ? "aborted" : "error";
320
- output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
321
- stream.push({ type: "error", reason: output.stopReason, error: output });
322
-
323
- // Update finalResponse to reflect the error state
324
- finalResponse.status = options?.signal?.aborted ? "cancelled" : "failed";
325
- finalResponse.error = error instanceof Error ? {
326
- message: error.message,
327
- code: (error as any).code || "unknown_error",
328
- type: error.name || "Error"
329
- } as any : { message: String(error) } as any;
330
-
331
- stream.end({
332
- _provider: 'openai',
333
- role: 'assistant',
334
- message: finalResponse
335
- });
336
- }
337
- })()
338
-
339
- return stream;
340
- }
341
-
342
-
343
- function createClient(model: Model<"openai">, apiKey?: string) {
344
- if (!apiKey) {
345
- if (!process.env.OPENAI_API_KEY) {
346
- throw new Error(
347
- "OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.",
348
- );
349
- }
350
- apiKey = process.env.OPENAI_API_KEY;
351
- }
352
- return new OpenAI({
353
- apiKey,
354
- baseURL: model.baseUrl,
355
- dangerouslyAllowBrowser: true,
356
- defaultHeaders: model.headers,
357
- });
358
- }
359
-
360
- function buildParams(model: Model<"openai">, context: Context, options?: OpenAIProviderOptions){
361
- const messages = buildOpenAIMessages(model, context);
362
-
363
- const params: ResponseCreateParamsStreaming = {
364
- include: ['reasoning.encrypted_content'],
365
- input: messages,
366
- model: model.id,
367
- stream: true
368
- }
369
-
370
- if(options?.maxOutputTokens){
371
- params.max_output_tokens = options.maxOutputTokens;
372
- }
373
-
374
- if(options?.parallelToolCalls){
375
- params.parallel_tool_calls = options.parallelToolCalls;
376
- }
377
-
378
- if(options?.prompt_cache_key){
379
- params.prompt_cache_key = options.prompt_cache_key;
380
- }
381
-
382
- if(options?.promptCacheRetention){
383
- params.prompt_cache_retention = options.promptCacheRetention;
384
- }
385
-
386
- if(options?.reasoning){
387
- params.reasoning = {
388
- effort: options.reasoning.effort || "medium",
389
- summary: options.reasoning.summary || "auto"
390
- }
391
- }
392
-
393
- if(options?.temperature){
394
- params.temperature = options.temperature
395
- }
396
-
397
- if(context.tools){
398
- params.tools = convertTools(context.tools)
399
- }
400
-
401
- if(options?.truncation){
402
- params.truncation = options.truncation
403
- }
404
-
405
- return params;
406
- }
407
-
408
- function convertTools(tools: readonly Tool[]): OpenAITool[] {
409
- return tools.map((tool) => ({
410
- type: "function",
411
- name: tool.name,
412
- description: tool.description,
413
- parameters: tool.parameters as any, // TypeBox already generates JSON Schema
414
- strict: null,
415
- }));
416
- }
417
-
418
- function mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason {
419
- if (!status) return "stop";
420
- switch (status) {
421
- case "completed":
422
- return "stop";
423
- case "incomplete":
424
- return "length";
425
- case "failed":
426
- case "cancelled":
427
- return "error";
428
- // These two are wonky ...
429
- case "in_progress":
430
- case "queued":
431
- return "stop";
432
- default: {
433
- const _exhaustive: never = status;
434
- throw new Error(`Unhandled stop reason: ${_exhaustive}`);
435
- }
436
- }
437
- }
package/src/stream.ts DELETED
@@ -1,60 +0,0 @@
1
- import type { Model, Context, Api, OptionsForApi } from "./types";
2
- import { AssistantMessageEventStream } from "./utils/event-stream";
3
- import { streamOpenAI, OpenAIProviderOptions } from "./providers/openai";
4
- import { streamGoogle, GoogleProviderOptions } from "./providers/google";
5
-
6
- const envMap: Record<Api, string> = {
7
- openai: "OPENAI_API_KEY",
8
- // anthropic: "ANTHROPIC_API_KEY",
9
- google: "GEMINI_API_KEY",
10
- // groq: "GROQ_API_KEY",
11
- // cerebras: "CEREBRAS_API_KEY",
12
- // xai: "XAI_API_KEY",
13
- // openrouter: "OPENROUTER_API_KEY",
14
- // zai: "ZAI_API_KEY",
15
- };
16
-
17
-
18
- function getApiKeyFromEnv(api: Api){
19
- const envVar = envMap[api]
20
- return process.env[envVar]
21
- }
22
-
23
- export function stream<TApi extends Api>(
24
- model: Model<TApi>,
25
- context: Context,
26
- options?: OptionsForApi<TApi>,
27
- ): AssistantMessageEventStream {
28
-
29
- const apiKey = options?.apiKey !== undefined ? options.apiKey : getApiKeyFromEnv(model.api)
30
- if(!apiKey){
31
- throw new Error(`No API key for provider: ${model.api}`);
32
- }
33
-
34
- const providerOptions = { ...options, apiKey };
35
-
36
- // Switch directly on model.api and use type assertions for each provider
37
- switch (model.api) {
38
- case 'openai':
39
- // TypeScript knows this branch only runs when model.api === 'openai'
40
- return streamOpenAI(
41
- model as Model<'openai'>,
42
- context,
43
- providerOptions as OpenAIProviderOptions
44
- );
45
-
46
- case 'google':
47
- // TypeScript knows this branch only runs when model.api === 'google'
48
- return streamGoogle(
49
- model as Model<'google'>,
50
- context,
51
- providerOptions as GoogleProviderOptions
52
- );
53
-
54
- default: {
55
- const _exhaustive: never = model.api;
56
- throw new Error(`Unhandled API: ${_exhaustive}`);
57
- }
58
- }
59
-
60
- }
package/src/types.ts DELETED
@@ -1,198 +0,0 @@
1
- import { Response } from "openai/resources/responses/responses.js";
2
- import type { TSchema } from "@sinclair/typebox";
3
- import { OpenAIProviderOptions } from "./providers/openai";
4
- import { AssistantMessageEventStream } from "./utils/event-stream";
5
- import { GoogleProviderOptions } from "./providers/google";
6
- import type { GenerateContentResponse } from "@google/genai";
7
-
8
- export type Api = 'openai' | 'google'
9
-
10
- export interface Model<TApi extends Api> {
11
- id: string;
12
- name: string;
13
- api: TApi;
14
- baseUrl: string;
15
- reasoning: boolean;
16
- input: ("text" | "image" | "file")[];
17
- cost: {
18
- input: number; // $/million tokens
19
- output: number; // $/million tokens
20
- cacheRead: number; // $/million tokens
21
- cacheWrite: number; // $/million tokens
22
- };
23
- contextWindow: number;
24
- maxTokens: number;
25
- headers?: Record<string, string>;
26
- }
27
-
28
-
29
- // ################################ Types for Standardized storing of User Message and Tool Result
30
-
31
- export interface UserMessage {
32
- role: "user"
33
- timestamp: number;
34
- content: (UserTextContent | UserImageContent | UserFileContent)[] // Supports text, images and files
35
- }
36
-
37
- export interface ToolResultMessage<TDetails = any> {
38
- role: "toolResult";
39
- toolName: string;
40
- toolCallId?: string;
41
- content: (UserTextContent | UserImageContent | UserFileContent)[]; // Supports text, images and files
42
- details?: TDetails; // Any extra information not sent to model
43
- isError: boolean;
44
- error?: {
45
- message: string;
46
- name?: string;
47
- stack?: string;
48
- }; // Full error details if isError is true
49
- timestamp: number; // Unix timestamp in milliseconds
50
- }
51
-
52
- export interface UserImageContent {
53
- type: "image"
54
- data: string; // base64 encoded image data
55
- mimeType: string; // e.g., "image/jpeg", "image/png"
56
- }
57
-
58
- export interface UserFileContent {
59
- type: "file"
60
- data: string; // base64 buffer encoded data
61
- mimeType: string; // e.g., "application/pdf",
62
- }
63
-
64
- export interface UserTextContent {
65
- type: 'text'
66
- content: string
67
- }
68
-
69
- // ################################ Types for Native Assistant Message
70
-
71
- export interface NativeOpenAIMessage {
72
- role: "assistant"
73
- _provider: 'openai'
74
- message: Response
75
- }
76
-
77
- export interface NativeGoogleMessage {
78
- role: "assistant"
79
- _provider: 'google'
80
- message: GenerateContentResponse
81
- }
82
-
83
- export type NativeAssistantMessage = NativeOpenAIMessage | NativeGoogleMessage;
84
-
85
- // ################################ Types for Stored Message
86
-
87
- export type Message = UserMessage | NativeAssistantMessage | ToolResultMessage
88
-
89
- export interface Tool<TParameters extends TSchema = TSchema, TName extends string = string> {
90
- name: TName;
91
- description: string;
92
- parameters: TParameters;
93
- }
94
-
95
- // Helper type to extract tool names from a tool array for better autocomplete
96
- export type ToolName<TTool extends Tool> = TTool["name"];
97
- export type ToolNames<TTools extends readonly Tool[]> = TTools[number]["name"];
98
-
99
- // Helper function to create a tool with better type inference
100
- // Use 'as const' on the tool array for best autocomplete:
101
- // const tools = [defineTool({ name: "calculator", ... }), ...] as const
102
- export function defineTool<TParameters extends TSchema, TName extends string>(
103
- tool: Tool<TParameters, TName>
104
- ): Tool<TParameters, TName> {
105
- return tool;
106
- }
107
-
108
- export interface Context<TTools extends readonly Tool[] = readonly Tool[]> {
109
- messages: Message[]
110
- systemPrompt?: string;
111
- tools?: TTools
112
- }
113
-
114
- // ################################ Types for Standardized streaming of Assistant Message
115
-
116
- export interface Usage {
117
- input: number;
118
- output: number;
119
- cacheRead: number;
120
- cacheWrite: number;
121
- totalTokens: number;
122
- cost: {
123
- input: number;
124
- output: number;
125
- cacheRead: number;
126
- cacheWrite: number;
127
- total: number;
128
- };
129
- }
130
-
131
- export type StopReason = "stop" | "length" | "toolUse" | "error" | "aborted";
132
-
133
- // return content from assistant -> Thinking, Text, ToolCall, Image (for image models)
134
- export interface AssistantMessage {
135
- role: "assistant";
136
- content: (AssistantTextContent | AssistantThinkingContent | AssistantToolCall | AbstractedImageContent)[];
137
- api: Api;
138
- model: string;
139
- usage: Usage;
140
- stopReason: StopReason;
141
- errorMessage?: string;
142
- timestamp: number; // Unix timestamp in milliseconds
143
- }
144
-
145
- export interface AssistantTextContent {
146
- type: 'text'
147
- text: string
148
- }
149
-
150
- export interface AssistantThinkingContent {
151
- type: 'thinking'
152
- thinking: string
153
- }
154
-
155
- export interface AssistantToolCall {
156
- type: "toolCall";
157
- name: string
158
- arguments: Record<string, any>;
159
- id?: string
160
- }
161
-
162
- export interface AbstractedImageContent {
163
- type: "image";
164
- data: string; // base64 encoded image data
165
- mimeType: string; // e.g., "image/jpeg", "image/png"
166
- }
167
-
168
- export type AssistantMessageEvent =
169
- | { type: "start"; partial: AssistantMessage }
170
- | { type: "text_start"; contentIndex: number; partial: AssistantMessage }
171
- | { type: "text_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
172
- | { type: "text_end"; contentIndex: number; content: string; partial: AssistantMessage }
173
- | { type: "thinking_start"; contentIndex: number; partial: AssistantMessage }
174
- | { type: "thinking_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
175
- | { type: "thinking_end"; contentIndex: number; content: string; partial: AssistantMessage }
176
- | { type: "toolcall_start"; contentIndex: number; partial: AssistantMessage }
177
- | { type: "toolcall_delta"; contentIndex: number; delta: string; partial: AssistantMessage }
178
- | { type: "toolcall_end"; contentIndex: number; toolCall: AssistantToolCall; partial: AssistantMessage }
179
- | { type: "done"; reason: Extract<StopReason, "stop" | "length" | "toolUse">; message: AssistantMessage }
180
- | { type: "error"; reason: Extract<StopReason, "aborted" | "error">; error: AssistantMessage };
181
-
182
-
183
-
184
- // ################################ Types for Stream Function
185
-
186
- export interface ApiOptionsMap {
187
- "openai": OpenAIProviderOptions;
188
- "google": GoogleProviderOptions;
189
- }
190
-
191
- export type OptionsForApi<TApi extends Api> = ApiOptionsMap[TApi];
192
-
193
-
194
- export type StreamFunction<TApi extends Api> = (
195
- model: Model<TApi>,
196
- context: Context,
197
- options: OptionsForApi<TApi>,
198
- ) => AssistantMessageEventStream;