@shareai-lab/kode-sdk 2.7.1 → 2.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/core/agent/breakpoint-manager.js +36 -0
- package/dist/core/agent/message-queue.js +57 -0
- package/dist/core/agent/permission-manager.js +32 -0
- package/dist/core/agent/todo-manager.js +91 -0
- package/dist/core/agent/tool-runner.js +45 -0
- package/dist/core/agent.js +2035 -0
- package/dist/core/config.js +2 -0
- package/dist/core/context-manager.js +241 -0
- package/dist/core/errors.js +49 -0
- package/dist/core/events.js +329 -0
- package/dist/core/file-pool.d.ts +2 -0
- package/dist/core/file-pool.js +125 -0
- package/dist/core/hooks.js +71 -0
- package/dist/core/permission-modes.js +61 -0
- package/dist/core/pool.js +301 -0
- package/dist/core/room.js +57 -0
- package/dist/core/scheduler.js +58 -0
- package/dist/core/skills/index.js +20 -0
- package/dist/core/skills/management-manager.js +557 -0
- package/dist/core/skills/manager.js +243 -0
- package/dist/core/skills/operation-queue.js +113 -0
- package/dist/core/skills/sandbox-file-manager.js +183 -0
- package/dist/core/skills/types.js +9 -0
- package/dist/core/skills/xml-generator.js +70 -0
- package/dist/core/template.js +35 -0
- package/dist/core/time-bridge.js +100 -0
- package/dist/core/todo.js +89 -0
- package/dist/core/types.js +3 -0
- package/dist/index.js +148 -60461
- package/dist/infra/db/postgres/postgres-store.js +1073 -0
- package/dist/infra/db/sqlite/sqlite-store.js +800 -0
- package/dist/infra/e2b/e2b-fs.js +128 -0
- package/dist/infra/e2b/e2b-sandbox.js +156 -0
- package/dist/infra/e2b/e2b-template.js +105 -0
- package/dist/infra/e2b/index.js +9 -0
- package/dist/infra/e2b/types.js +2 -0
- package/dist/infra/provider.js +67 -0
- package/dist/infra/providers/anthropic.js +308 -0
- package/dist/infra/providers/core/errors.js +353 -0
- package/dist/infra/providers/core/fork.js +418 -0
- package/dist/infra/providers/core/index.js +76 -0
- package/dist/infra/providers/core/logger.js +191 -0
- package/dist/infra/providers/core/retry.js +189 -0
- package/dist/infra/providers/core/usage.js +376 -0
- package/dist/infra/providers/gemini.js +493 -0
- package/dist/infra/providers/index.js +83 -0
- package/dist/infra/providers/openai.js +662 -0
- package/dist/infra/providers/types.js +20 -0
- package/dist/infra/providers/utils.js +400 -0
- package/dist/infra/sandbox-factory.js +30 -0
- package/dist/infra/sandbox.js +243 -0
- package/dist/infra/store/factory.js +80 -0
- package/dist/infra/store/index.js +26 -0
- package/dist/infra/store/json-store.js +606 -0
- package/dist/infra/store/types.js +2 -0
- package/dist/infra/store.js +29 -0
- package/dist/tools/bash_kill/index.js +35 -0
- package/dist/tools/bash_kill/prompt.js +14 -0
- package/dist/tools/bash_logs/index.js +40 -0
- package/dist/tools/bash_logs/prompt.js +14 -0
- package/dist/tools/bash_run/index.js +61 -0
- package/dist/tools/bash_run/prompt.js +18 -0
- package/dist/tools/builtin.js +26 -0
- package/dist/tools/define.js +214 -0
- package/dist/tools/fs_edit/index.js +62 -0
- package/dist/tools/fs_edit/prompt.js +15 -0
- package/dist/tools/fs_glob/index.js +40 -0
- package/dist/tools/fs_glob/prompt.js +15 -0
- package/dist/tools/fs_grep/index.js +66 -0
- package/dist/tools/fs_grep/prompt.js +16 -0
- package/dist/tools/fs_multi_edit/index.js +106 -0
- package/dist/tools/fs_multi_edit/prompt.js +16 -0
- package/dist/tools/fs_read/index.js +40 -0
- package/dist/tools/fs_read/prompt.js +16 -0
- package/dist/tools/fs_write/index.js +40 -0
- package/dist/tools/fs_write/prompt.js +15 -0
- package/dist/tools/index.js +61 -0
- package/dist/tools/mcp.js +185 -0
- package/dist/tools/registry.js +26 -0
- package/dist/tools/scripts.js +205 -0
- package/dist/tools/skills.js +115 -0
- package/dist/tools/task_run/index.js +58 -0
- package/dist/tools/task_run/prompt.js +25 -0
- package/dist/tools/todo_read/index.js +29 -0
- package/dist/tools/todo_read/prompt.js +18 -0
- package/dist/tools/todo_write/index.js +42 -0
- package/dist/tools/todo_write/prompt.js +23 -0
- package/dist/tools/tool.js +211 -0
- package/dist/tools/toolkit.js +98 -0
- package/dist/tools/type-inference.js +207 -0
- package/dist/utils/agent-id.js +28 -0
- package/dist/utils/logger.js +44 -0
- package/dist/utils/session-id.js +64 -0
- package/package.json +7 -38
- package/dist/index.js.map +0 -7
- package/dist/index.mjs +0 -60385
- package/dist/index.mjs.map +0 -7
|
@@ -0,0 +1,662 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* OpenAI Provider Adapter
|
|
4
|
+
*
|
|
5
|
+
* Converts internal Anthropic-style messages to OpenAI API format.
|
|
6
|
+
* Supports:
|
|
7
|
+
* - Chat Completions API (GPT-4.x)
|
|
8
|
+
* - Responses API (GPT-5.x with reasoning)
|
|
9
|
+
* - Streaming with SSE
|
|
10
|
+
* - Tool calls
|
|
11
|
+
* - Reasoning tokens (reasoning_content, reasoning_details)
|
|
12
|
+
*/
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.OpenAIProvider = void 0;
|
|
15
|
+
const utils_1 = require("./utils");
|
|
16
|
+
class OpenAIProvider {
|
|
17
|
+
constructor(apiKey, model = 'gpt-4o', baseUrl = 'https://api.openai.com/v1', proxyUrl, options) {
|
|
18
|
+
this.apiKey = apiKey;
|
|
19
|
+
this.maxWindowSize = 128000;
|
|
20
|
+
this.maxOutputTokens = 4096;
|
|
21
|
+
this.temperature = 0.7;
|
|
22
|
+
this.model = model;
|
|
23
|
+
this.baseUrl = (0, utils_1.normalizeOpenAIBaseUrl)(baseUrl);
|
|
24
|
+
this.dispatcher = (0, utils_1.getProxyDispatcher)(proxyUrl);
|
|
25
|
+
this.reasoningTransport = options?.reasoningTransport ?? 'text';
|
|
26
|
+
this.extraHeaders = options?.extraHeaders;
|
|
27
|
+
this.extraBody = options?.extraBody;
|
|
28
|
+
this.providerOptions = options?.providerOptions;
|
|
29
|
+
this.multimodal = options?.multimodal;
|
|
30
|
+
this.openaiApi = options?.api ?? this.providerOptions?.openaiApi ?? 'chat';
|
|
31
|
+
this.thinking = options?.thinking;
|
|
32
|
+
this.reasoning = options?.reasoning;
|
|
33
|
+
this.responsesConfig = options?.responses;
|
|
34
|
+
}
|
|
35
|
+
applyReasoningDefaults(body) {
|
|
36
|
+
// Apply reasoning request parameters from configuration
|
|
37
|
+
if (this.reasoning?.requestParams) {
|
|
38
|
+
for (const [key, value] of Object.entries(this.reasoning.requestParams)) {
|
|
39
|
+
if (body[key] === undefined) {
|
|
40
|
+
body[key] = value;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
// Apply Responses API reasoning config
|
|
45
|
+
if (this.openaiApi === 'responses' && this.responsesConfig?.reasoning) {
|
|
46
|
+
if (!body.reasoning) {
|
|
47
|
+
body.reasoning = this.responsesConfig.reasoning;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
// Apply Responses API store option
|
|
51
|
+
if (this.openaiApi === 'responses' && this.responsesConfig?.store !== undefined) {
|
|
52
|
+
if (body.store === undefined) {
|
|
53
|
+
body.store = this.responsesConfig.store;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
// Apply previous_response_id for continuation
|
|
57
|
+
if (this.openaiApi === 'responses' && this.responsesConfig?.previousResponseId) {
|
|
58
|
+
if (!body.previous_response_id) {
|
|
59
|
+
body.previous_response_id = this.responsesConfig.previousResponseId;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
async uploadFile(input) {
|
|
64
|
+
if (input.kind !== 'file') {
|
|
65
|
+
return null;
|
|
66
|
+
}
|
|
67
|
+
const FormDataCtor = globalThis.FormData;
|
|
68
|
+
const BlobCtor = globalThis.Blob;
|
|
69
|
+
if (!FormDataCtor || !BlobCtor) {
|
|
70
|
+
return null;
|
|
71
|
+
}
|
|
72
|
+
const form = new FormDataCtor();
|
|
73
|
+
form.append('file', new BlobCtor([input.data], { type: input.mimeType }), input.filename || 'file.pdf');
|
|
74
|
+
const purpose = this.providerOptions?.fileUploadPurpose || 'assistants';
|
|
75
|
+
form.append('purpose', purpose);
|
|
76
|
+
const response = await fetch(`${this.baseUrl}/files`, (0, utils_1.withProxy)({
|
|
77
|
+
method: 'POST',
|
|
78
|
+
headers: {
|
|
79
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
80
|
+
...(this.extraHeaders || {}),
|
|
81
|
+
},
|
|
82
|
+
body: form,
|
|
83
|
+
}, this.dispatcher));
|
|
84
|
+
if (!response.ok) {
|
|
85
|
+
const error = await response.text();
|
|
86
|
+
throw new Error(`OpenAI file upload error: ${response.status} ${error}`);
|
|
87
|
+
}
|
|
88
|
+
const data = await response.json();
|
|
89
|
+
const fileId = data?.id ?? data?.file_id;
|
|
90
|
+
if (!fileId) {
|
|
91
|
+
return null;
|
|
92
|
+
}
|
|
93
|
+
return { fileId };
|
|
94
|
+
}
|
|
95
|
+
async complete(messages, opts) {
|
|
96
|
+
const responseApi = this.resolveOpenAIApi(messages);
|
|
97
|
+
if (responseApi === 'responses') {
|
|
98
|
+
return this.completeWithResponses(messages, opts);
|
|
99
|
+
}
|
|
100
|
+
const body = {
|
|
101
|
+
...(this.extraBody || {}),
|
|
102
|
+
model: this.model,
|
|
103
|
+
messages: this.buildOpenAIMessages(messages, opts?.system, this.reasoningTransport),
|
|
104
|
+
};
|
|
105
|
+
if (opts?.tools && opts.tools.length > 0) {
|
|
106
|
+
body.tools = this.buildOpenAITools(opts.tools);
|
|
107
|
+
}
|
|
108
|
+
if (opts?.maxTokens !== undefined)
|
|
109
|
+
body.max_tokens = opts.maxTokens;
|
|
110
|
+
if (opts?.temperature !== undefined)
|
|
111
|
+
body.temperature = opts.temperature;
|
|
112
|
+
this.applyReasoningDefaults(body);
|
|
113
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, (0, utils_1.withProxy)({
|
|
114
|
+
method: 'POST',
|
|
115
|
+
headers: {
|
|
116
|
+
'Content-Type': 'application/json',
|
|
117
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
118
|
+
...(this.extraHeaders || {}),
|
|
119
|
+
},
|
|
120
|
+
body: JSON.stringify(body),
|
|
121
|
+
}, this.dispatcher));
|
|
122
|
+
if (!response.ok) {
|
|
123
|
+
const error = await response.text();
|
|
124
|
+
throw new Error(`OpenAI API error: ${response.status} ${error}`);
|
|
125
|
+
}
|
|
126
|
+
const data = await response.json();
|
|
127
|
+
const message = data.choices?.[0]?.message ?? {};
|
|
128
|
+
const contentBlocks = [];
|
|
129
|
+
const text = typeof message.content === 'string' ? message.content : '';
|
|
130
|
+
if (text) {
|
|
131
|
+
contentBlocks.push({ type: 'text', text });
|
|
132
|
+
}
|
|
133
|
+
const toolCalls = Array.isArray(message.tool_calls) ? message.tool_calls : [];
|
|
134
|
+
for (const call of toolCalls) {
|
|
135
|
+
const args = call?.function?.arguments;
|
|
136
|
+
let input = {};
|
|
137
|
+
if (typeof args === 'string') {
|
|
138
|
+
try {
|
|
139
|
+
input = JSON.parse(args);
|
|
140
|
+
}
|
|
141
|
+
catch {
|
|
142
|
+
input = { raw: args };
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
contentBlocks.push({
|
|
146
|
+
type: 'tool_use',
|
|
147
|
+
id: call.id,
|
|
148
|
+
name: call?.function?.name ?? 'tool',
|
|
149
|
+
input,
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
const reasoningBlocks = (0, utils_1.extractReasoningDetails)(message);
|
|
153
|
+
const combinedBlocks = reasoningBlocks.length > 0 ? [...reasoningBlocks, ...contentBlocks] : contentBlocks;
|
|
154
|
+
const normalizedBlocks = (0, utils_1.normalizeThinkBlocks)(combinedBlocks, this.reasoningTransport);
|
|
155
|
+
return {
|
|
156
|
+
role: 'assistant',
|
|
157
|
+
content: normalizedBlocks,
|
|
158
|
+
usage: data.usage
|
|
159
|
+
? {
|
|
160
|
+
input_tokens: data.usage.prompt_tokens ?? 0,
|
|
161
|
+
output_tokens: data.usage.completion_tokens ?? 0,
|
|
162
|
+
}
|
|
163
|
+
: undefined,
|
|
164
|
+
stop_reason: data.choices?.[0]?.finish_reason,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
async *stream(messages, opts) {
|
|
168
|
+
const responseApi = this.resolveOpenAIApi(messages);
|
|
169
|
+
if (responseApi === 'responses') {
|
|
170
|
+
const response = await this.completeWithResponses(messages, opts);
|
|
171
|
+
let index = 0;
|
|
172
|
+
for (const block of response.content) {
|
|
173
|
+
if (block.type === 'text') {
|
|
174
|
+
yield { type: 'content_block_start', index, content_block: { type: 'text', text: '' } };
|
|
175
|
+
if (block.text) {
|
|
176
|
+
yield { type: 'content_block_delta', index, delta: { type: 'text_delta', text: block.text } };
|
|
177
|
+
}
|
|
178
|
+
yield { type: 'content_block_stop', index };
|
|
179
|
+
index += 1;
|
|
180
|
+
continue;
|
|
181
|
+
}
|
|
182
|
+
if (block.type === 'reasoning') {
|
|
183
|
+
yield { type: 'content_block_start', index, content_block: { type: 'reasoning', reasoning: '' } };
|
|
184
|
+
if (block.reasoning) {
|
|
185
|
+
yield { type: 'content_block_delta', index, delta: { type: 'reasoning_delta', text: block.reasoning } };
|
|
186
|
+
}
|
|
187
|
+
yield { type: 'content_block_stop', index };
|
|
188
|
+
index += 1;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
if (response.usage) {
|
|
192
|
+
yield {
|
|
193
|
+
type: 'message_delta',
|
|
194
|
+
usage: {
|
|
195
|
+
input_tokens: response.usage.input_tokens ?? 0,
|
|
196
|
+
output_tokens: response.usage.output_tokens ?? 0,
|
|
197
|
+
},
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
yield { type: 'message_stop' };
|
|
201
|
+
return;
|
|
202
|
+
}
|
|
203
|
+
const body = {
|
|
204
|
+
...(this.extraBody || {}),
|
|
205
|
+
model: this.model,
|
|
206
|
+
messages: this.buildOpenAIMessages(messages, opts?.system, this.reasoningTransport),
|
|
207
|
+
stream: true,
|
|
208
|
+
stream_options: { include_usage: true },
|
|
209
|
+
};
|
|
210
|
+
if (opts?.tools && opts.tools.length > 0) {
|
|
211
|
+
body.tools = this.buildOpenAITools(opts.tools);
|
|
212
|
+
}
|
|
213
|
+
if (opts?.maxTokens !== undefined)
|
|
214
|
+
body.max_tokens = opts.maxTokens;
|
|
215
|
+
if (opts?.temperature !== undefined)
|
|
216
|
+
body.temperature = opts.temperature;
|
|
217
|
+
this.applyReasoningDefaults(body);
|
|
218
|
+
const response = await fetch(`${this.baseUrl}/chat/completions`, (0, utils_1.withProxy)({
|
|
219
|
+
method: 'POST',
|
|
220
|
+
headers: {
|
|
221
|
+
'Content-Type': 'application/json',
|
|
222
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
223
|
+
...(this.extraHeaders || {}),
|
|
224
|
+
},
|
|
225
|
+
body: JSON.stringify(body),
|
|
226
|
+
}, this.dispatcher));
|
|
227
|
+
if (!response.ok) {
|
|
228
|
+
const error = await response.text();
|
|
229
|
+
throw new Error(`OpenAI API error: ${response.status} ${error}`);
|
|
230
|
+
}
|
|
231
|
+
const reader = response.body?.getReader();
|
|
232
|
+
if (!reader)
|
|
233
|
+
throw new Error('No response body');
|
|
234
|
+
const decoder = new TextDecoder();
|
|
235
|
+
let buffer = '';
|
|
236
|
+
let textStarted = false;
|
|
237
|
+
const textIndex = 0;
|
|
238
|
+
let reasoningStarted = false;
|
|
239
|
+
const reasoningIndex = 1000;
|
|
240
|
+
let sawFinishReason = false;
|
|
241
|
+
let usageEmitted = false;
|
|
242
|
+
const toolCallBuffers = new Map();
|
|
243
|
+
function* flushToolCalls() {
|
|
244
|
+
if (toolCallBuffers.size === 0)
|
|
245
|
+
return;
|
|
246
|
+
const entries = Array.from(toolCallBuffers.entries()).sort((a, b) => a[0] - b[0]);
|
|
247
|
+
for (const [index, call] of entries) {
|
|
248
|
+
yield {
|
|
249
|
+
type: 'content_block_start',
|
|
250
|
+
index,
|
|
251
|
+
content_block: {
|
|
252
|
+
type: 'tool_use',
|
|
253
|
+
id: call.id ?? `toolcall-${index}`,
|
|
254
|
+
name: call.name ?? 'tool',
|
|
255
|
+
input: {},
|
|
256
|
+
},
|
|
257
|
+
};
|
|
258
|
+
if (call.args) {
|
|
259
|
+
yield {
|
|
260
|
+
type: 'content_block_delta',
|
|
261
|
+
index,
|
|
262
|
+
delta: { type: 'input_json_delta', partial_json: call.args },
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
yield { type: 'content_block_stop', index };
|
|
266
|
+
}
|
|
267
|
+
toolCallBuffers.clear();
|
|
268
|
+
}
|
|
269
|
+
while (true) {
|
|
270
|
+
const { done, value } = await reader.read();
|
|
271
|
+
if (done)
|
|
272
|
+
break;
|
|
273
|
+
buffer += decoder.decode(value, { stream: true });
|
|
274
|
+
const lines = buffer.split('\n');
|
|
275
|
+
buffer = lines.pop() || '';
|
|
276
|
+
for (const line of lines) {
|
|
277
|
+
const trimmed = line.trim();
|
|
278
|
+
if (!trimmed || !trimmed.startsWith('data:'))
|
|
279
|
+
continue;
|
|
280
|
+
const data = trimmed.slice(5).trim();
|
|
281
|
+
if (!data || data === '[DONE]')
|
|
282
|
+
continue;
|
|
283
|
+
let event;
|
|
284
|
+
try {
|
|
285
|
+
event = JSON.parse(data);
|
|
286
|
+
}
|
|
287
|
+
catch {
|
|
288
|
+
continue;
|
|
289
|
+
}
|
|
290
|
+
const choice = event.choices?.[0];
|
|
291
|
+
if (!choice)
|
|
292
|
+
continue;
|
|
293
|
+
const delta = choice.delta ?? {};
|
|
294
|
+
if (typeof delta.content === 'string' && delta.content.length > 0) {
|
|
295
|
+
if (!textStarted) {
|
|
296
|
+
textStarted = true;
|
|
297
|
+
yield {
|
|
298
|
+
type: 'content_block_start',
|
|
299
|
+
index: textIndex,
|
|
300
|
+
content_block: { type: 'text', text: '' },
|
|
301
|
+
};
|
|
302
|
+
}
|
|
303
|
+
yield {
|
|
304
|
+
type: 'content_block_delta',
|
|
305
|
+
index: textIndex,
|
|
306
|
+
delta: { type: 'text_delta', text: delta.content },
|
|
307
|
+
};
|
|
308
|
+
}
|
|
309
|
+
if (typeof delta.reasoning_content === 'string') {
|
|
310
|
+
const reasoningText = delta.reasoning_content;
|
|
311
|
+
if (!reasoningStarted) {
|
|
312
|
+
reasoningStarted = true;
|
|
313
|
+
yield {
|
|
314
|
+
type: 'content_block_start',
|
|
315
|
+
index: reasoningIndex,
|
|
316
|
+
content_block: { type: 'reasoning', reasoning: '' },
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
yield {
|
|
320
|
+
type: 'content_block_delta',
|
|
321
|
+
index: reasoningIndex,
|
|
322
|
+
delta: { type: 'reasoning_delta', text: reasoningText },
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
const toolCalls = Array.isArray(delta.tool_calls) ? delta.tool_calls : [];
|
|
326
|
+
for (const call of toolCalls) {
|
|
327
|
+
const index = typeof call.index === 'number' ? call.index : 0;
|
|
328
|
+
const entry = toolCallBuffers.get(index) ?? { args: '' };
|
|
329
|
+
if (call.id)
|
|
330
|
+
entry.id = call.id;
|
|
331
|
+
if (call.function?.name)
|
|
332
|
+
entry.name = call.function.name;
|
|
333
|
+
if (typeof call.function?.arguments === 'string') {
|
|
334
|
+
entry.args += call.function.arguments;
|
|
335
|
+
}
|
|
336
|
+
toolCallBuffers.set(index, entry);
|
|
337
|
+
}
|
|
338
|
+
if (event.usage && !usageEmitted) {
|
|
339
|
+
usageEmitted = true;
|
|
340
|
+
yield {
|
|
341
|
+
type: 'message_delta',
|
|
342
|
+
usage: {
|
|
343
|
+
input_tokens: event.usage.prompt_tokens ?? 0,
|
|
344
|
+
output_tokens: event.usage.completion_tokens ?? 0,
|
|
345
|
+
},
|
|
346
|
+
};
|
|
347
|
+
}
|
|
348
|
+
if (choice.finish_reason) {
|
|
349
|
+
sawFinishReason = true;
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
if (textStarted) {
|
|
354
|
+
yield { type: 'content_block_stop', index: textIndex };
|
|
355
|
+
}
|
|
356
|
+
if (reasoningStarted) {
|
|
357
|
+
yield { type: 'content_block_stop', index: reasoningIndex };
|
|
358
|
+
}
|
|
359
|
+
if (toolCallBuffers.size > 0) {
|
|
360
|
+
yield* flushToolCalls();
|
|
361
|
+
}
|
|
362
|
+
if (sawFinishReason && !usageEmitted) {
|
|
363
|
+
yield {
|
|
364
|
+
type: 'message_delta',
|
|
365
|
+
usage: { input_tokens: 0, output_tokens: 0 },
|
|
366
|
+
};
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
toConfig() {
|
|
370
|
+
return {
|
|
371
|
+
provider: 'openai',
|
|
372
|
+
model: this.model,
|
|
373
|
+
baseUrl: this.baseUrl,
|
|
374
|
+
apiKey: this.apiKey,
|
|
375
|
+
maxTokens: this.maxOutputTokens,
|
|
376
|
+
temperature: this.temperature,
|
|
377
|
+
reasoningTransport: this.reasoningTransport,
|
|
378
|
+
extraHeaders: this.extraHeaders,
|
|
379
|
+
extraBody: this.extraBody,
|
|
380
|
+
providerOptions: {
|
|
381
|
+
...this.providerOptions,
|
|
382
|
+
api: this.openaiApi,
|
|
383
|
+
reasoning: this.reasoning,
|
|
384
|
+
responses: this.responsesConfig,
|
|
385
|
+
},
|
|
386
|
+
multimodal: this.multimodal,
|
|
387
|
+
thinking: this.thinking,
|
|
388
|
+
};
|
|
389
|
+
}
|
|
390
|
+
resolveOpenAIApi(messages) {
|
|
391
|
+
if (this.openaiApi !== 'responses') {
|
|
392
|
+
return 'chat';
|
|
393
|
+
}
|
|
394
|
+
const hasFile = messages.some((message) => (0, utils_1.getMessageBlocks)(message).some((block) => block.type === 'file'));
|
|
395
|
+
return hasFile ? 'responses' : 'chat';
|
|
396
|
+
}
|
|
397
|
+
async completeWithResponses(messages, opts) {
|
|
398
|
+
const input = this.buildOpenAIResponsesInput(messages, this.reasoningTransport);
|
|
399
|
+
const body = {
|
|
400
|
+
...(this.extraBody || {}),
|
|
401
|
+
model: this.model,
|
|
402
|
+
input,
|
|
403
|
+
};
|
|
404
|
+
if (opts?.temperature !== undefined)
|
|
405
|
+
body.temperature = opts.temperature;
|
|
406
|
+
if (opts?.maxTokens !== undefined)
|
|
407
|
+
body.max_output_tokens = opts.maxTokens;
|
|
408
|
+
if (opts?.system)
|
|
409
|
+
body.instructions = opts.system;
|
|
410
|
+
this.applyReasoningDefaults(body);
|
|
411
|
+
const response = await fetch(`${this.baseUrl}/responses`, (0, utils_1.withProxy)({
|
|
412
|
+
method: 'POST',
|
|
413
|
+
headers: {
|
|
414
|
+
'Content-Type': 'application/json',
|
|
415
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
416
|
+
...(this.extraHeaders || {}),
|
|
417
|
+
},
|
|
418
|
+
body: JSON.stringify(body),
|
|
419
|
+
}, this.dispatcher));
|
|
420
|
+
if (!response.ok) {
|
|
421
|
+
const error = await response.text();
|
|
422
|
+
throw new Error(`OpenAI API error: ${response.status} ${error}`);
|
|
423
|
+
}
|
|
424
|
+
const data = await response.json();
|
|
425
|
+
const contentBlocks = [];
|
|
426
|
+
const outputs = Array.isArray(data.output) ? data.output : [];
|
|
427
|
+
for (const output of outputs) {
|
|
428
|
+
const parts = output?.content || [];
|
|
429
|
+
for (const part of parts) {
|
|
430
|
+
if (part.type === 'output_text' && typeof part.text === 'string') {
|
|
431
|
+
contentBlocks.push({ type: 'text', text: part.text });
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
const normalizedBlocks = (0, utils_1.normalizeThinkBlocks)(contentBlocks, this.reasoningTransport);
|
|
436
|
+
return {
|
|
437
|
+
role: 'assistant',
|
|
438
|
+
content: normalizedBlocks,
|
|
439
|
+
usage: data.usage
|
|
440
|
+
? {
|
|
441
|
+
input_tokens: data.usage.input_tokens ?? 0,
|
|
442
|
+
output_tokens: data.usage.output_tokens ?? 0,
|
|
443
|
+
}
|
|
444
|
+
: undefined,
|
|
445
|
+
stop_reason: data.status,
|
|
446
|
+
};
|
|
447
|
+
}
|
|
448
|
+
buildOpenAITools(tools) {
|
|
449
|
+
return tools.map((tool) => ({
|
|
450
|
+
type: 'function',
|
|
451
|
+
function: {
|
|
452
|
+
name: tool.name,
|
|
453
|
+
description: tool.description,
|
|
454
|
+
parameters: tool.input_schema,
|
|
455
|
+
},
|
|
456
|
+
}));
|
|
457
|
+
}
|
|
458
|
+
buildOpenAIMessages(messages, system, reasoningTransport = 'text') {
|
|
459
|
+
const output = [];
|
|
460
|
+
const toolCallNames = new Map();
|
|
461
|
+
const useStructuredContent = messages.some((msg) => (0, utils_1.getMessageBlocks)(msg).some((block) => block.type === 'image' || block.type === 'audio' || block.type === 'file'));
|
|
462
|
+
for (const msg of messages) {
|
|
463
|
+
for (const block of (0, utils_1.getMessageBlocks)(msg)) {
|
|
464
|
+
if (block.type === 'tool_use') {
|
|
465
|
+
toolCallNames.set(block.id, block.name);
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
if (system) {
|
|
470
|
+
output.push({
|
|
471
|
+
role: 'system',
|
|
472
|
+
content: useStructuredContent ? [{ type: 'text', text: system }] : system,
|
|
473
|
+
});
|
|
474
|
+
}
|
|
475
|
+
for (const msg of messages) {
|
|
476
|
+
const blocks = (0, utils_1.getMessageBlocks)(msg);
|
|
477
|
+
if (msg.role === 'system') {
|
|
478
|
+
const text = (0, utils_1.concatTextWithReasoning)(blocks, reasoningTransport);
|
|
479
|
+
if (text) {
|
|
480
|
+
output.push({
|
|
481
|
+
role: 'system',
|
|
482
|
+
content: useStructuredContent ? [{ type: 'text', text }] : text,
|
|
483
|
+
});
|
|
484
|
+
}
|
|
485
|
+
continue;
|
|
486
|
+
}
|
|
487
|
+
if (msg.role === 'assistant') {
|
|
488
|
+
const text = (0, utils_1.concatTextWithReasoning)(blocks, reasoningTransport);
|
|
489
|
+
const toolCalls = blocks.filter((block) => block.type === 'tool_use');
|
|
490
|
+
const reasoningBlocks = blocks.filter((block) => block.type === 'reasoning');
|
|
491
|
+
const entry = { role: 'assistant' };
|
|
492
|
+
if (text) {
|
|
493
|
+
entry.content = useStructuredContent ? [{ type: 'text', text }] : text;
|
|
494
|
+
}
|
|
495
|
+
if (toolCalls.length > 0) {
|
|
496
|
+
entry.tool_calls = toolCalls.map((call) => ({
|
|
497
|
+
id: call.id,
|
|
498
|
+
type: 'function',
|
|
499
|
+
function: {
|
|
500
|
+
name: call.name,
|
|
501
|
+
arguments: (0, utils_1.safeJsonStringify)(call.input ?? {}),
|
|
502
|
+
},
|
|
503
|
+
}));
|
|
504
|
+
if (!entry.content)
|
|
505
|
+
entry.content = null;
|
|
506
|
+
}
|
|
507
|
+
// Add reasoning to history based on configuration
|
|
508
|
+
if (reasoningTransport === 'provider' && reasoningBlocks.length > 0) {
|
|
509
|
+
// Skip if stripFromHistory is enabled (e.g., DeepSeek)
|
|
510
|
+
if (!this.reasoning?.stripFromHistory) {
|
|
511
|
+
const fieldName = this.reasoning?.fieldName ?? 'reasoning_content';
|
|
512
|
+
if (fieldName === 'reasoning_details') {
|
|
513
|
+
// Minimax format: array of { text: string }
|
|
514
|
+
entry.reasoning_details = reasoningBlocks.map((block) => ({ text: block.reasoning }));
|
|
515
|
+
}
|
|
516
|
+
else {
|
|
517
|
+
// Default format: concatenated string
|
|
518
|
+
entry.reasoning_content = (0, utils_1.joinReasoningBlocks)(reasoningBlocks);
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
if (entry.content !== undefined || entry.tool_calls || entry.reasoning_content || entry.reasoning_details) {
|
|
523
|
+
output.push(entry);
|
|
524
|
+
}
|
|
525
|
+
continue;
|
|
526
|
+
}
|
|
527
|
+
if (msg.role === 'user') {
|
|
528
|
+
const result = this.buildOpenAIUserMessages(blocks, toolCallNames, reasoningTransport);
|
|
529
|
+
if (result.degraded) {
|
|
530
|
+
(0, utils_1.markTransportIfDegraded)(msg, blocks);
|
|
531
|
+
}
|
|
532
|
+
for (const entry of result.entries) {
|
|
533
|
+
output.push(entry);
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
return output;
|
|
538
|
+
}
|
|
539
|
+
buildOpenAIUserMessages(blocks, toolCallNames, reasoningTransport = 'text') {
|
|
540
|
+
const entries = [];
|
|
541
|
+
let contentParts = [];
|
|
542
|
+
let degraded = false;
|
|
543
|
+
const appendText = (text) => {
|
|
544
|
+
if (!text)
|
|
545
|
+
return;
|
|
546
|
+
const last = contentParts[contentParts.length - 1];
|
|
547
|
+
if (last && last.type === 'text') {
|
|
548
|
+
last.text += text;
|
|
549
|
+
}
|
|
550
|
+
else {
|
|
551
|
+
contentParts.push({ type: 'text', text });
|
|
552
|
+
}
|
|
553
|
+
};
|
|
554
|
+
const flushUser = () => {
|
|
555
|
+
if (contentParts.length === 0)
|
|
556
|
+
return;
|
|
557
|
+
entries.push({ role: 'user', content: contentParts });
|
|
558
|
+
contentParts = [];
|
|
559
|
+
};
|
|
560
|
+
for (const block of blocks) {
|
|
561
|
+
if (block.type === 'text') {
|
|
562
|
+
appendText(block.text);
|
|
563
|
+
continue;
|
|
564
|
+
}
|
|
565
|
+
if (block.type === 'reasoning') {
|
|
566
|
+
if (reasoningTransport === 'text') {
|
|
567
|
+
appendText(`<think>${block.reasoning}</think>`);
|
|
568
|
+
}
|
|
569
|
+
continue;
|
|
570
|
+
}
|
|
571
|
+
if (block.type === 'image') {
|
|
572
|
+
if (block.url) {
|
|
573
|
+
contentParts.push({ type: 'image_url', image_url: { url: block.url } });
|
|
574
|
+
}
|
|
575
|
+
else if (block.base64 && block.mime_type) {
|
|
576
|
+
contentParts.push({
|
|
577
|
+
type: 'image_url',
|
|
578
|
+
image_url: { url: `data:${block.mime_type};base64,${block.base64}` },
|
|
579
|
+
});
|
|
580
|
+
}
|
|
581
|
+
else {
|
|
582
|
+
degraded = true;
|
|
583
|
+
appendText(utils_1.IMAGE_UNSUPPORTED_TEXT);
|
|
584
|
+
}
|
|
585
|
+
continue;
|
|
586
|
+
}
|
|
587
|
+
if (block.type === 'audio') {
|
|
588
|
+
degraded = true;
|
|
589
|
+
appendText(utils_1.AUDIO_UNSUPPORTED_TEXT);
|
|
590
|
+
continue;
|
|
591
|
+
}
|
|
592
|
+
if (block.type === 'file') {
|
|
593
|
+
degraded = true;
|
|
594
|
+
appendText(utils_1.FILE_UNSUPPORTED_TEXT);
|
|
595
|
+
continue;
|
|
596
|
+
}
|
|
597
|
+
if (block.type === 'tool_result') {
|
|
598
|
+
flushUser();
|
|
599
|
+
const toolMessage = {
|
|
600
|
+
role: 'tool',
|
|
601
|
+
tool_call_id: block.tool_use_id,
|
|
602
|
+
content: (0, utils_1.formatToolResult)(block.content),
|
|
603
|
+
};
|
|
604
|
+
const name = toolCallNames.get(block.tool_use_id);
|
|
605
|
+
if (name)
|
|
606
|
+
toolMessage.name = name;
|
|
607
|
+
entries.push(toolMessage);
|
|
608
|
+
continue;
|
|
609
|
+
}
|
|
610
|
+
}
|
|
611
|
+
flushUser();
|
|
612
|
+
return { entries, degraded };
|
|
613
|
+
}
|
|
614
|
+
buildOpenAIResponsesInput(messages, reasoningTransport = 'text') {
|
|
615
|
+
const input = [];
|
|
616
|
+
for (const msg of messages) {
|
|
617
|
+
const blocks = (0, utils_1.getMessageBlocks)(msg);
|
|
618
|
+
const parts = [];
|
|
619
|
+
let degraded = false;
|
|
620
|
+
const textType = msg.role === 'assistant' ? 'output_text' : 'input_text';
|
|
621
|
+
for (const block of blocks) {
|
|
622
|
+
if (block.type === 'text') {
|
|
623
|
+
parts.push({ type: textType, text: block.text });
|
|
624
|
+
}
|
|
625
|
+
else if (block.type === 'reasoning' && reasoningTransport === 'text') {
|
|
626
|
+
parts.push({ type: textType, text: `<think>${block.reasoning}</think>` });
|
|
627
|
+
}
|
|
628
|
+
else if (block.type === 'audio') {
|
|
629
|
+
degraded = true;
|
|
630
|
+
parts.push({ type: textType, text: utils_1.AUDIO_UNSUPPORTED_TEXT });
|
|
631
|
+
}
|
|
632
|
+
else if (block.type === 'file') {
|
|
633
|
+
if (block.file_id) {
|
|
634
|
+
parts.push({ type: 'input_file', file_id: block.file_id });
|
|
635
|
+
}
|
|
636
|
+
else if (block.url) {
|
|
637
|
+
parts.push({ type: 'input_file', file_url: block.url });
|
|
638
|
+
}
|
|
639
|
+
else if (block.base64 && block.mime_type) {
|
|
640
|
+
parts.push({
|
|
641
|
+
type: 'input_file',
|
|
642
|
+
filename: block.filename || 'file.pdf',
|
|
643
|
+
file_data: `data:${block.mime_type};base64,${block.base64}`,
|
|
644
|
+
});
|
|
645
|
+
}
|
|
646
|
+
else {
|
|
647
|
+
degraded = true;
|
|
648
|
+
parts.push({ type: textType, text: utils_1.FILE_UNSUPPORTED_TEXT });
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
if (degraded) {
|
|
653
|
+
(0, utils_1.markTransportIfDegraded)(msg, blocks);
|
|
654
|
+
}
|
|
655
|
+
if (parts.length > 0) {
|
|
656
|
+
input.push({ role: msg.role, content: parts });
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
return input;
|
|
660
|
+
}
|
|
661
|
+
}
|
|
662
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Provider Adapter Types
|
|
4
|
+
*
|
|
5
|
+
* KODE Agent SDK uses Anthropic-style messages as the internal canonical format.
|
|
6
|
+
* All providers act as adapters that convert to/from this format.
|
|
7
|
+
*
|
|
8
|
+
* Internal Flow:
|
|
9
|
+
* Internal Message[] (Anthropic-style ContentBlocks)
|
|
10
|
+
* -> Provider.formatMessages() -> External API format
|
|
11
|
+
* -> API call
|
|
12
|
+
* -> Response -> normalizeContent() -> Internal ContentBlock[]
|
|
13
|
+
*
|
|
14
|
+
* Provider-Specific Requirements:
|
|
15
|
+
* - Anthropic: Preserve thinking signatures for multi-turn
|
|
16
|
+
* - OpenAI Responses: Use previous_response_id for state
|
|
17
|
+
* - DeepSeek/Qwen: Must NOT include reasoning_content in history
|
|
18
|
+
* - Gemini: Use thinkingLevel (not thinkingBudget) for 3.x
|
|
19
|
+
*/
|
|
20
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|