@iinm/plain-agent 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.config/agents.library/code-simplifier.md +5 -0
  2. package/.config/agents.library/qa-engineer.md +74 -0
  3. package/.config/agents.library/software-architect.md +278 -0
  4. package/.config/agents.predefined/worker.md +3 -0
  5. package/.config/config.predefined.json +825 -0
  6. package/.config/prompts.library/code-review.md +8 -0
  7. package/.config/prompts.library/feature-dev.md +6 -0
  8. package/.config/prompts.predefined/shortcuts/commit-by-user.md +9 -0
  9. package/.config/prompts.predefined/shortcuts/commit.md +10 -0
  10. package/.config/prompts.predefined/shortcuts/general-question.md +6 -0
  11. package/LICENSE +21 -0
  12. package/README.md +624 -0
  13. package/bin/plain +3 -0
  14. package/bin/plain-interrupt +6 -0
  15. package/bin/plain-notify-desktop +19 -0
  16. package/bin/plain-notify-terminal-bell +3 -0
  17. package/package.json +57 -0
  18. package/sandbox/bin/plain-sandbox +972 -0
  19. package/src/agent.d.ts +48 -0
  20. package/src/agent.mjs +159 -0
  21. package/src/agentLoop.mjs +369 -0
  22. package/src/agentState.mjs +41 -0
  23. package/src/cliArgs.mjs +45 -0
  24. package/src/cliFormatter.mjs +217 -0
  25. package/src/cliInteractive.mjs +739 -0
  26. package/src/config.d.ts +48 -0
  27. package/src/config.mjs +168 -0
  28. package/src/context/consumeInterruptMessage.mjs +30 -0
  29. package/src/context/loadAgentRoles.mjs +272 -0
  30. package/src/context/loadPrompts.mjs +312 -0
  31. package/src/context/loadUserMessageContext.mjs +147 -0
  32. package/src/env.mjs +46 -0
  33. package/src/main.mjs +202 -0
  34. package/src/mcp.mjs +202 -0
  35. package/src/model.d.ts +109 -0
  36. package/src/modelCaller.mjs +29 -0
  37. package/src/modelDefinition.d.ts +73 -0
  38. package/src/prompt.mjs +128 -0
  39. package/src/providers/anthropic.d.ts +248 -0
  40. package/src/providers/anthropic.mjs +596 -0
  41. package/src/providers/gemini.d.ts +208 -0
  42. package/src/providers/gemini.mjs +752 -0
  43. package/src/providers/openai.d.ts +281 -0
  44. package/src/providers/openai.mjs +551 -0
  45. package/src/providers/openaiCompatible.d.ts +147 -0
  46. package/src/providers/openaiCompatible.mjs +658 -0
  47. package/src/providers/platform/azure.mjs +42 -0
  48. package/src/providers/platform/bedrock.mjs +74 -0
  49. package/src/providers/platform/googleCloud.mjs +34 -0
  50. package/src/subagent.mjs +247 -0
  51. package/src/tmpfile.mjs +27 -0
  52. package/src/tool.d.ts +74 -0
  53. package/src/toolExecutor.mjs +236 -0
  54. package/src/toolInputValidator.mjs +183 -0
  55. package/src/toolUseApprover.mjs +98 -0
  56. package/src/tools/askGoogle.mjs +135 -0
  57. package/src/tools/delegateToSubagent.d.ts +4 -0
  58. package/src/tools/delegateToSubagent.mjs +48 -0
  59. package/src/tools/execCommand.d.ts +22 -0
  60. package/src/tools/execCommand.mjs +200 -0
  61. package/src/tools/fetchWebPage.mjs +96 -0
  62. package/src/tools/patchFile.d.ts +4 -0
  63. package/src/tools/patchFile.mjs +96 -0
  64. package/src/tools/reportAsSubagent.d.ts +3 -0
  65. package/src/tools/reportAsSubagent.mjs +44 -0
  66. package/src/tools/tavilySearch.d.ts +6 -0
  67. package/src/tools/tavilySearch.mjs +57 -0
  68. package/src/tools/tmuxCommand.d.ts +14 -0
  69. package/src/tools/tmuxCommand.mjs +194 -0
  70. package/src/tools/writeFile.d.ts +4 -0
  71. package/src/tools/writeFile.mjs +56 -0
  72. package/src/utils/evalJSONConfig.mjs +48 -0
  73. package/src/utils/matchValue.d.ts +6 -0
  74. package/src/utils/matchValue.mjs +40 -0
  75. package/src/utils/noThrow.mjs +31 -0
  76. package/src/utils/notify.mjs +28 -0
  77. package/src/utils/parseFileRange.mjs +18 -0
  78. package/src/utils/readFileRange.mjs +33 -0
  79. package/src/utils/retryOnError.mjs +41 -0
@@ -0,0 +1,551 @@
1
+ /**
2
+ * @import { ModelInput, Message, AssistantMessage, ModelOutput, PartialMessageContent } from "../model"
3
+ * @import { OpenAIFunctionToolCall, OpenAIInputImage, OpenAIInputItem, OpenAIModelConfig, OpenAIOutputItem, OpenAIOutputMessage, OpenAIReasoning, OpenAIRequest, OpenAIStreamEvent, OpenAIToolFunction } from "./openai"
4
+ * @import { ToolDefinition } from "../tool"
5
+ */
6
+
7
+ import { styleText } from "node:util";
8
+ import { noThrow } from "../utils/noThrow.mjs";
9
+ import { getAzureAccessToken } from "./platform/azure.mjs";
10
+
11
+ /**
12
+ * @param {import("../modelDefinition").PlatformConfig} platformConfig
13
+ * @param {OpenAIModelConfig} modelConfig
14
+ * @param {ModelInput} input
15
+ * @param {number} retryCount
16
+ * @returns {Promise<ModelOutput | Error>}
17
+ */
18
+ export async function callOpenAIModel(
19
+ platformConfig,
20
+ modelConfig,
21
+ input,
22
+ retryCount = 0,
23
+ ) {
24
+ return await noThrow(async () => {
25
+ const messages = convertGenericMessageToOpenAIFormat(input.messages);
26
+ const tools = convertGenericeToolDefinitionToOpenAIFormat(
27
+ input.tools || [],
28
+ );
29
+
30
+ const { model, ...baseModelConfig } = modelConfig;
31
+
32
+ /** @type {OpenAIRequest} */
33
+ const request = {
34
+ ...baseModelConfig,
35
+ model: model,
36
+ input: messages,
37
+ tools: tools.length ? tools : undefined,
38
+ stream: true,
39
+ };
40
+
41
+ const apiKey = await (async () => {
42
+ switch (platformConfig.name) {
43
+ case "openai":
44
+ return platformConfig.apiKey;
45
+ case "azure":
46
+ return getAzureAccessToken(
47
+ platformConfig.azureConfigDir
48
+ ? {
49
+ azureConfigDir: platformConfig.azureConfigDir,
50
+ }
51
+ : undefined,
52
+ );
53
+ default:
54
+ throw new Error(`Unsupported platform: ${platformConfig.name}`);
55
+ }
56
+ })();
57
+
58
+ const response = await fetch(`${platformConfig.baseURL}/v1/responses`, {
59
+ method: "POST",
60
+ headers: {
61
+ ...platformConfig.customHeaders,
62
+ "Content-Type": "application/json",
63
+ Authorization: `Bearer ${apiKey}`,
64
+ },
65
+ body: JSON.stringify(request),
66
+ signal: AbortSignal.timeout(5 * 60 * 1000),
67
+ });
68
+
69
+ const retryInterval = Math.min(2 * 2 ** retryCount, 16);
70
+ if (response.status === 429 || response.status >= 500) {
71
+ console.error(
72
+ styleText(
73
+ "yellow",
74
+ `OpenAI rate limit exceeded. Retry in ${retryInterval} seconds...`,
75
+ ),
76
+ );
77
+ await new Promise((resolve) => setTimeout(resolve, retryInterval * 1000));
78
+ return callOpenAIModel(
79
+ platformConfig,
80
+ modelConfig,
81
+ input,
82
+ retryCount + 1,
83
+ );
84
+ }
85
+
86
+ if (response.status !== 200) {
87
+ throw new Error(
88
+ `Failed to call OpenAI model: status=${response.status}, body=${await response.text()}`,
89
+ );
90
+ }
91
+
92
+ if (!response.body) {
93
+ throw new Error("Response body is empty");
94
+ }
95
+
96
+ const reader = response.body.getReader();
97
+
98
+ /** @type {OpenAIStreamEvent[]} */
99
+ const streamEvents = [];
100
+ for await (const streamEvent of readOpenAIStreamData(reader)) {
101
+ streamEvents.push(streamEvent);
102
+ const partialContent =
103
+ convertOpenAIStreamDataToAgentPartialContent(streamEvent);
104
+ if (input.onPartialMessageContent && partialContent) {
105
+ input.onPartialMessageContent(partialContent);
106
+ }
107
+ }
108
+
109
+ const lastEvent = streamEvents.at(-1);
110
+ if (lastEvent?.type !== "response.completed") {
111
+ const lastEventTrimmed =
112
+ lastEvent?.type === "response.failed"
113
+ ? {
114
+ type: lastEvent.type,
115
+ response: { error: lastEvent.response.error },
116
+ }
117
+ : lastEvent;
118
+ console.error(
119
+ styleText(
120
+ "yellow",
121
+ `OpenAI stream did not complete: ${JSON.stringify(lastEventTrimmed)}. Retry in ${retryInterval} seconds...`,
122
+ ),
123
+ );
124
+ await new Promise((resolve) => setTimeout(resolve, retryInterval * 1000));
125
+ return callOpenAIModel(
126
+ platformConfig,
127
+ modelConfig,
128
+ input,
129
+ retryCount + 1,
130
+ );
131
+ }
132
+
133
+ return {
134
+ message: convertOpenAIAssistantMessageToGenericFormat(
135
+ lastEvent.response.output,
136
+ ),
137
+ providerTokenUsage: lastEvent.response.usage,
138
+ };
139
+ });
140
+ }
141
+
142
+ /**
143
+ * @param {Message[]} genericMessages
144
+ * @returns {OpenAIInputItem[]}
145
+ */
146
+ function convertGenericMessageToOpenAIFormat(genericMessages) {
147
+ /** @type {OpenAIInputItem[]} */
148
+ const openAIInputItems = [];
149
+ for (const genericMessage of genericMessages) {
150
+ switch (genericMessage.role) {
151
+ case "system": {
152
+ openAIInputItems.push({
153
+ role: "system",
154
+ content: genericMessage.content.map((part) => ({
155
+ type: "input_text",
156
+ text: part.text,
157
+ })),
158
+ });
159
+ break;
160
+ }
161
+ case "user": {
162
+ const toolResults = genericMessage.content.filter(
163
+ (part) => part.type === "tool_result",
164
+ );
165
+ const userContentParts = genericMessage.content.filter(
166
+ (part) => part.type === "text" || part.type === "image",
167
+ );
168
+
169
+ // Tool Results
170
+ let imageIndex = 0;
171
+ for (const result of toolResults) {
172
+ const toolResultContentString = result.content
173
+ .map((part) => {
174
+ switch (part.type) {
175
+ case "text":
176
+ return part.text;
177
+ case "image":
178
+ imageIndex += 1;
179
+ return `(Image [${imageIndex}] omitted. See next message from user.)`;
180
+ default:
181
+ throw new Error(
182
+ `Unsupported content part: ${JSON.stringify(part)}`,
183
+ );
184
+ }
185
+ })
186
+ .join("\n\n");
187
+ openAIInputItems.push({
188
+ type: "function_call_output",
189
+ call_id: result.toolUseId,
190
+ output: toolResultContentString,
191
+ });
192
+ }
193
+
194
+ /** @type {OpenAIInputImage[]} */
195
+ const imageInputs = [];
196
+ for (const result of toolResults) {
197
+ for (const part of result.content) {
198
+ if (part.type === "image") {
199
+ imageInputs.push({
200
+ type: "input_image",
201
+ image_url: `data:${part.mimeType};base64,${part.data}`,
202
+ detail: "auto",
203
+ });
204
+ }
205
+ }
206
+ }
207
+
208
+ if (imageInputs.length) {
209
+ openAIInputItems.push({
210
+ role: "user",
211
+ content: imageInputs,
212
+ });
213
+ }
214
+
215
+ // User Input Parts
216
+ if (userContentParts.length) {
217
+ openAIInputItems.push({
218
+ role: "user",
219
+ content: userContentParts.map((part) => {
220
+ if (part.type === "text") {
221
+ return { type: "input_text", text: part.text };
222
+ }
223
+ if (part.type === "image") {
224
+ return {
225
+ type: "input_image",
226
+ image_url: `data:${part.mimeType};base64,${part.data}`,
227
+ };
228
+ }
229
+ throw new Error(
230
+ `Unsupported content part: ${JSON.stringify(part)}`,
231
+ );
232
+ }),
233
+ });
234
+ }
235
+
236
+ break;
237
+ }
238
+ case "assistant": {
239
+ // if (!genericMessage.provider?.source) {
240
+ // throw new Error(
241
+ // "Original message is required for assistant role but not provided.",
242
+ // );
243
+ // }
244
+ // const source = /** @type {OpenAIOutputItem[]} */ (
245
+ // genericMessage.provider.source
246
+ // );
247
+ // openAIInputItems.push(...source);
248
+
249
+ for (const part of genericMessage.content) {
250
+ if (part.type === "thinking") {
251
+ openAIInputItems.push(
252
+ /** @type {OpenAIReasoning} */ ({
253
+ type: "reasoning",
254
+ ...part.provider?.fields,
255
+ }),
256
+ );
257
+ }
258
+ if (part.type === "tool_use") {
259
+ openAIInputItems.push(
260
+ /** @type {OpenAIFunctionToolCall} */ ({
261
+ type: "function_call",
262
+ name: part.toolName,
263
+ arguments: JSON.stringify(part.input),
264
+ call_id: part.toolUseId,
265
+ ...part.provider?.fields,
266
+ }),
267
+ );
268
+ }
269
+ if (part.type === "text") {
270
+ const itemId = /** @type {string | undefined} */ (
271
+ part.provider?.fields?.id
272
+ );
273
+ const item = /** @type {OpenAIOutputMessage | undefined} */ (
274
+ openAIInputItems.find(
275
+ (item) =>
276
+ "id" in item && item.id === itemId && item.type === "message",
277
+ )
278
+ );
279
+
280
+ if (item) {
281
+ item.content.push({
282
+ type: "output_text",
283
+ text: part.text,
284
+ });
285
+ } else {
286
+ openAIInputItems.push(
287
+ /** @type {OpenAIOutputMessage} */ ({
288
+ type: "message",
289
+ role: "assistant",
290
+ content: [
291
+ {
292
+ type: "output_text",
293
+ text: part.text,
294
+ },
295
+ ],
296
+ ...part.provider?.fields,
297
+ }),
298
+ );
299
+ }
300
+ }
301
+ }
302
+ }
303
+ }
304
+ }
305
+
306
+ return openAIInputItems;
307
+ }
308
+
309
+ /**
310
+ * @param {ToolDefinition[]} genericToolDefs
311
+ * @returns {OpenAIToolFunction[]}
312
+ */
313
+ function convertGenericeToolDefinitionToOpenAIFormat(genericToolDefs) {
314
+ /** @type {OpenAIToolFunction[]} */
315
+ const openAIToolDefs = [];
316
+ for (const toolDef of genericToolDefs) {
317
+ openAIToolDefs.push({
318
+ type: "function",
319
+ name: toolDef.name,
320
+ description: toolDef.description,
321
+ parameters: toolDef.inputSchema,
322
+ });
323
+ }
324
+
325
+ return openAIToolDefs;
326
+ }
327
+
328
+ /**
329
+ * @param {OpenAIOutputItem[]} openAIOutputItems
330
+ * @returns {AssistantMessage}
331
+ */
332
+ function convertOpenAIAssistantMessageToGenericFormat(openAIOutputItems) {
333
+ /** @type {AssistantMessage["content"]} */
334
+ const content = [];
335
+ for (const item of openAIOutputItems) {
336
+ if (item.type === "reasoning") {
337
+ content.push({
338
+ type: "thinking",
339
+ thinking: item.summary.at(0)?.text ?? "",
340
+ provider: {
341
+ fields: {
342
+ id: item.id,
343
+ encrypted_content: item.encrypted_content,
344
+ summary: item.summary,
345
+ },
346
+ },
347
+ });
348
+ }
349
+
350
+ if (item.type === "message") {
351
+ for (const part of item.content) {
352
+ if (part.type === "output_text") {
353
+ content.push({
354
+ type: "text",
355
+ text: part.text,
356
+ provider: {
357
+ fields: {
358
+ id: item.id,
359
+ },
360
+ },
361
+ });
362
+ }
363
+ }
364
+ }
365
+
366
+ if (item.type === "function_call") {
367
+ content.push({
368
+ type: "tool_use",
369
+ toolUseId: item.call_id,
370
+ toolName: item.name,
371
+ input: JSON.parse(item.arguments),
372
+ provider: {
373
+ fields: {
374
+ id: item.id,
375
+ },
376
+ },
377
+ });
378
+ }
379
+ }
380
+
381
+ return {
382
+ role: "assistant",
383
+ content,
384
+
385
+ // provider: {
386
+ // source: openAIOutputItems,
387
+ // },
388
+ };
389
+ }
390
+
391
+ /**
392
+ * @param {OpenAIStreamEvent} streamEvent
393
+ * @returns {PartialMessageContent | undefined}
394
+ */
395
+ function convertOpenAIStreamDataToAgentPartialContent(streamEvent) {
396
+ // thinking
397
+ if (streamEvent.type === "response.output_item.added") {
398
+ if (streamEvent.item.type === "reasoning") {
399
+ return {
400
+ type: "thinking",
401
+ position: "start",
402
+ };
403
+ }
404
+ }
405
+
406
+ if (streamEvent.type === "response.reasoning_summary_part.added") {
407
+ return {
408
+ type: "thinking",
409
+ position: "delta",
410
+ content: streamEvent.part.text,
411
+ };
412
+ }
413
+
414
+ if (streamEvent.type === "response.reasoning_summary_text.delta") {
415
+ return {
416
+ type: "thinking",
417
+ position: "delta",
418
+ content: streamEvent.delta,
419
+ };
420
+ }
421
+
422
+ if (streamEvent.type === "response.reasoning_summary_text.done") {
423
+ return {
424
+ type: "thinking",
425
+ position: "delta",
426
+ content: streamEvent.text,
427
+ };
428
+ }
429
+
430
+ if (streamEvent.type === "response.output_item.done") {
431
+ if (streamEvent.item.type === "reasoning") {
432
+ return {
433
+ type: "thinking",
434
+ position: "stop",
435
+ };
436
+ }
437
+ }
438
+
439
+ // text
440
+ if (streamEvent.type === "response.content_part.added") {
441
+ if (streamEvent.part.type === "output_text") {
442
+ return {
443
+ type: "text",
444
+ position: "start",
445
+ content: streamEvent.part.text,
446
+ };
447
+ }
448
+ if (streamEvent.part.type === "refusal") {
449
+ return {
450
+ type: "refusal",
451
+ position: "start",
452
+ content: streamEvent.part.refusal,
453
+ };
454
+ }
455
+ }
456
+
457
+ if (streamEvent.type === "response.output_text.delta") {
458
+ return {
459
+ type: "text",
460
+ position: "delta",
461
+ content: streamEvent.delta,
462
+ };
463
+ }
464
+
465
+ if (streamEvent.type === "response.content_part.done") {
466
+ if (streamEvent.part.type === "output_text") {
467
+ return {
468
+ type: "text",
469
+ position: "stop",
470
+ };
471
+ }
472
+ if (streamEvent.part.type === "refusal") {
473
+ return {
474
+ type: "refusal",
475
+ position: "stop",
476
+ };
477
+ }
478
+ }
479
+
480
+ // tool use
481
+ if (streamEvent.type === "response.output_item.added") {
482
+ if (streamEvent.item.type === "function_call") {
483
+ return {
484
+ type: "tool_use",
485
+ position: "start",
486
+ content: streamEvent.item.arguments,
487
+ };
488
+ }
489
+ }
490
+
491
+ if (streamEvent.type === "response.function_call_arguments.delta") {
492
+ return {
493
+ type: "tool_use",
494
+ position: "delta",
495
+ content: streamEvent.delta,
496
+ };
497
+ }
498
+
499
+ if (streamEvent.type === "response.output_item.done") {
500
+ if (streamEvent.item.type === "function_call") {
501
+ return {
502
+ type: "tool_use",
503
+ position: "stop",
504
+ };
505
+ }
506
+ }
507
+ }
508
+
509
+ /**
510
+ * @param {ReadableStreamDefaultReader<Uint8Array>} reader
511
+ */
512
+ async function* readOpenAIStreamData(reader) {
513
+ let buffer = new Uint8Array();
514
+
515
+ while (true) {
516
+ const { done, value } = await reader.read();
517
+ if (done) {
518
+ break;
519
+ }
520
+
521
+ const nextBuffer = new Uint8Array(buffer.length + value.length);
522
+ nextBuffer.set(buffer);
523
+ nextBuffer.set(value, buffer.length);
524
+ buffer = nextBuffer;
525
+
526
+ const lineFeed = "\n".charCodeAt(0);
527
+ const dataEndIndices = [];
528
+ for (let i = 0; i < buffer.length - 1; i++) {
529
+ if (buffer[i] === lineFeed && buffer[i + 1] === lineFeed) {
530
+ dataEndIndices.push(i);
531
+ }
532
+ }
533
+
534
+ for (let i = 0; i < dataEndIndices.length; i++) {
535
+ const dataStartIndex = i === 0 ? 0 : dataEndIndices[i - 1] + 2;
536
+ const dataEndIndex = dataEndIndices[i];
537
+ const data = buffer.slice(dataStartIndex, dataEndIndex);
538
+ const decodedData = new TextDecoder().decode(data);
539
+ if (decodedData.startsWith("event: ")) {
540
+ const eventDate = decodedData.split("\n").slice(1).join("\n");
541
+ /** @type {OpenAIStreamEvent} */
542
+ const parsedData = JSON.parse(eventDate.slice("data: ".length));
543
+ yield parsedData;
544
+ }
545
+ }
546
+
547
+ if (dataEndIndices.length) {
548
+ buffer = buffer.slice(dataEndIndices[dataEndIndices.length - 1] + 2);
549
+ }
550
+ }
551
+ }
@@ -0,0 +1,147 @@
1
+ /* Model */
2
+ export type OpenAICompatibleModelConfig =
3
+ | {
4
+ model: string;
5
+ temperature?: number;
6
+ }
7
+ | {
8
+ model: string;
9
+ reasoning_effort?: "low" | "medium" | "high";
10
+ };
11
+
12
+ /* Request */
13
+ export type OpenAIChatCompletionRequest = {
14
+ model?: string;
15
+ messages: OpenAIMessage[];
16
+ tools?: OpenAIToolDefinition[];
17
+ stream?: boolean;
18
+ temperature?: number;
19
+ reasoning_effort?: "low" | "medium" | "high";
20
+ stream_options?: {
21
+ include_usage: boolean;
22
+ };
23
+ };
24
+
25
+ /* Output */
26
+ export type OpenAIChatCompletion = {
27
+ id: string;
28
+ object: string;
29
+ created: number;
30
+ model: string;
31
+ choices: OpenIAChatCompletionChoice[];
32
+ usage: OpenAIChatCompletionUsage;
33
+ };
34
+
35
+ export type OpenIAChatCompletionChoice = {
36
+ index: number;
37
+ message: OpenAIAssistantMessage;
38
+ finish_reason: string;
39
+ };
40
+
41
+ /* Message */
42
+ export type OpenAIMessage =
43
+ | OpenAISystemMessage
44
+ | OpenAIUserMessage
45
+ | OpenAIAssistantMessage
46
+ | OpenAIToolMessage;
47
+
48
+ export type OpenAISystemMessage = {
49
+ role: "system";
50
+ content: OpenAIMessageContentText[];
51
+ };
52
+
53
+ export type OpenAIUserMessage = {
54
+ role: "user";
55
+ content: (OpenAIMessageContentText | OpenAIMessageContentImage)[];
56
+ };
57
+
58
+ export type OpenAIAssistantMessage = {
59
+ role: "assistant";
60
+ reasoning_content?: string;
61
+ content?: string;
62
+ tool_calls?: OpenAIMessageToolCall[];
63
+ };
64
+
65
+ export type OpenAIToolMessage = {
66
+ role: "tool";
67
+ content: string;
68
+ tool_call_id: string;
69
+ };
70
+
71
+ export type OpenAIMessageContentText = {
72
+ type: "text";
73
+ text: string;
74
+ };
75
+
76
+ export type OpenAIMessageContentImage = {
77
+ type: "image_url";
78
+ image_url: {
79
+ url: string;
80
+ };
81
+ };
82
+
83
+ export type OpenAIMessageToolCall = {
84
+ id: string;
85
+ type: "function";
86
+ function: OpenAIToolCallFunction;
87
+ };
88
+
89
+ export type OpenAIToolCallFunction = {
90
+ name: string;
91
+ arguments: string;
92
+ };
93
+
94
+ /* Usage */
95
+ export type OpenAIChatCompletionUsage = {
96
+ prompt_tokens: number;
97
+ completion_tokens: number;
98
+ total_tokens: number;
99
+ prompt_tokens_details: Record<string, number>;
100
+ completion_tokens_details: Record<string, number>;
101
+ };
102
+
103
+ /* Tool */
104
+ export type OpenAIToolDefinition = {
105
+ type: "function";
106
+ function: {
107
+ name: string;
108
+ description: string;
109
+ parameters: Record<string, unknown>;
110
+ };
111
+ };
112
+
113
+ /* Streaming Data */
114
+ export type OpenAIStreamData = {
115
+ id: string;
116
+ object: string;
117
+ created: number;
118
+ model: string;
119
+ service_tier?: string;
120
+ system_fingerprint?: string;
121
+ choices: OpenAIStreamDataChoice[];
122
+ usage?: OpenAIChatCompletionUsage;
123
+ };
124
+
125
+ export type OpenAIStreamDataChoice = {
126
+ index: number;
127
+ delta: OpenAIStreamDataDelta;
128
+ finish_reason: string;
129
+ };
130
+
131
+ export type OpenAIStreamDataDelta = {
132
+ role?: "assistant";
133
+ reasoning_content?: string;
134
+ content?: string;
135
+ refusal?: unknown;
136
+ tool_calls?: OpenAIStreamDataToolCall[];
137
+ };
138
+
139
+ export type OpenAIStreamDataToolCall = {
140
+ index: number;
141
+ id?: string;
142
+ type?: string;
143
+ function?: {
144
+ name?: string;
145
+ arguments: string;
146
+ };
147
+ };