@agenr/agenr-plugin 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dist/anthropic-RE4XNAKE.js +5515 -0
- package/dist/azure-openai-responses-IQLXOCZS.js +190 -0
- package/dist/chunk-6DQXEU2A.js +32306 -0
- package/dist/chunk-EAQYK3U2.js +41 -0
- package/dist/chunk-HNWLZUWE.js +31 -0
- package/dist/chunk-JRUUYSFL.js +262 -0
- package/dist/chunk-OLOUBEE5.js +14022 -0
- package/dist/chunk-P5HNPYGQ.js +174 -0
- package/dist/chunk-RD7BUOBD.js +416 -0
- package/dist/chunk-RWWH2U4W.js +7056 -0
- package/dist/chunk-SEOMNQGB.js +86 -0
- package/dist/chunk-SQLXP7LT.js +4792 -0
- package/dist/chunk-URGOKODJ.js +17 -0
- package/dist/dist-R6ESEJ6P.js +1244 -0
- package/dist/google-NAVXTQLO.js +371 -0
- package/dist/google-gemini-cli-NKYJWHX2.js +712 -0
- package/dist/google-vertex-ZBJ2EDRH.js +414 -0
- package/dist/index.js +15942 -0
- package/dist/mistral-SBQYC4J5.js +38407 -0
- package/dist/multipart-parser-DV373IRF.js +371 -0
- package/dist/openai-codex-responses-XN3T3DEN.js +712 -0
- package/dist/openai-completions-75ZFOFU6.js +657 -0
- package/dist/openai-responses-DCK4BVNT.js +198 -0
- package/dist/src-T5RRS2HN.js +1408 -0
- package/openclaw.plugin.json +86 -0
- package/package.json +31 -0
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/utils/sanitize-unicode.js
|
|
2
|
+
function sanitizeSurrogates(text) {
|
|
3
|
+
return text.replace(/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g, "");
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/simple-options.js
|
|
7
|
+
function buildBaseOptions(model, options, apiKey) {
|
|
8
|
+
return {
|
|
9
|
+
temperature: options?.temperature,
|
|
10
|
+
maxTokens: options?.maxTokens || Math.min(model.maxTokens, 32e3),
|
|
11
|
+
signal: options?.signal,
|
|
12
|
+
apiKey: apiKey || options?.apiKey,
|
|
13
|
+
cacheRetention: options?.cacheRetention,
|
|
14
|
+
sessionId: options?.sessionId,
|
|
15
|
+
headers: options?.headers,
|
|
16
|
+
onPayload: options?.onPayload,
|
|
17
|
+
maxRetryDelayMs: options?.maxRetryDelayMs,
|
|
18
|
+
metadata: options?.metadata
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
function clampReasoning(effort) {
|
|
22
|
+
return effort === "xhigh" ? "high" : effort;
|
|
23
|
+
}
|
|
24
|
+
function adjustMaxTokensForThinking(baseMaxTokens, modelMaxTokens, reasoningLevel, customBudgets) {
|
|
25
|
+
const defaultBudgets = {
|
|
26
|
+
minimal: 1024,
|
|
27
|
+
low: 2048,
|
|
28
|
+
medium: 8192,
|
|
29
|
+
high: 16384
|
|
30
|
+
};
|
|
31
|
+
const budgets = { ...defaultBudgets, ...customBudgets };
|
|
32
|
+
const minOutputTokens = 1024;
|
|
33
|
+
const level = clampReasoning(reasoningLevel);
|
|
34
|
+
let thinkingBudget = budgets[level];
|
|
35
|
+
const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
|
|
36
|
+
if (maxTokens <= thinkingBudget) {
|
|
37
|
+
thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
|
|
38
|
+
}
|
|
39
|
+
return { maxTokens, thinkingBudget };
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/transform-messages.js
|
|
43
|
+
function transformMessages(messages, model, normalizeToolCallId) {
|
|
44
|
+
const toolCallIdMap = /* @__PURE__ */ new Map();
|
|
45
|
+
const transformed = messages.map((msg) => {
|
|
46
|
+
if (msg.role === "user") {
|
|
47
|
+
return msg;
|
|
48
|
+
}
|
|
49
|
+
if (msg.role === "toolResult") {
|
|
50
|
+
const normalizedId = toolCallIdMap.get(msg.toolCallId);
|
|
51
|
+
if (normalizedId && normalizedId !== msg.toolCallId) {
|
|
52
|
+
return { ...msg, toolCallId: normalizedId };
|
|
53
|
+
}
|
|
54
|
+
return msg;
|
|
55
|
+
}
|
|
56
|
+
if (msg.role === "assistant") {
|
|
57
|
+
const assistantMsg = msg;
|
|
58
|
+
const isSameModel = assistantMsg.provider === model.provider && assistantMsg.api === model.api && assistantMsg.model === model.id;
|
|
59
|
+
const transformedContent = assistantMsg.content.flatMap((block) => {
|
|
60
|
+
if (block.type === "thinking") {
|
|
61
|
+
if (block.redacted) {
|
|
62
|
+
return isSameModel ? block : [];
|
|
63
|
+
}
|
|
64
|
+
if (isSameModel && block.thinkingSignature)
|
|
65
|
+
return block;
|
|
66
|
+
if (!block.thinking || block.thinking.trim() === "")
|
|
67
|
+
return [];
|
|
68
|
+
if (isSameModel)
|
|
69
|
+
return block;
|
|
70
|
+
return {
|
|
71
|
+
type: "text",
|
|
72
|
+
text: block.thinking
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
if (block.type === "text") {
|
|
76
|
+
if (isSameModel)
|
|
77
|
+
return block;
|
|
78
|
+
return {
|
|
79
|
+
type: "text",
|
|
80
|
+
text: block.text
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
if (block.type === "toolCall") {
|
|
84
|
+
const toolCall = block;
|
|
85
|
+
let normalizedToolCall = toolCall;
|
|
86
|
+
if (!isSameModel && toolCall.thoughtSignature) {
|
|
87
|
+
normalizedToolCall = { ...toolCall };
|
|
88
|
+
delete normalizedToolCall.thoughtSignature;
|
|
89
|
+
}
|
|
90
|
+
if (!isSameModel && normalizeToolCallId) {
|
|
91
|
+
const normalizedId = normalizeToolCallId(toolCall.id, model, assistantMsg);
|
|
92
|
+
if (normalizedId !== toolCall.id) {
|
|
93
|
+
toolCallIdMap.set(toolCall.id, normalizedId);
|
|
94
|
+
normalizedToolCall = { ...normalizedToolCall, id: normalizedId };
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
return normalizedToolCall;
|
|
98
|
+
}
|
|
99
|
+
return block;
|
|
100
|
+
});
|
|
101
|
+
return {
|
|
102
|
+
...assistantMsg,
|
|
103
|
+
content: transformedContent
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
return msg;
|
|
107
|
+
});
|
|
108
|
+
const result = [];
|
|
109
|
+
let pendingToolCalls = [];
|
|
110
|
+
let existingToolResultIds = /* @__PURE__ */ new Set();
|
|
111
|
+
for (let i = 0; i < transformed.length; i++) {
|
|
112
|
+
const msg = transformed[i];
|
|
113
|
+
if (msg.role === "assistant") {
|
|
114
|
+
if (pendingToolCalls.length > 0) {
|
|
115
|
+
for (const tc of pendingToolCalls) {
|
|
116
|
+
if (!existingToolResultIds.has(tc.id)) {
|
|
117
|
+
result.push({
|
|
118
|
+
role: "toolResult",
|
|
119
|
+
toolCallId: tc.id,
|
|
120
|
+
toolName: tc.name,
|
|
121
|
+
content: [{ type: "text", text: "No result provided" }],
|
|
122
|
+
isError: true,
|
|
123
|
+
timestamp: Date.now()
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
pendingToolCalls = [];
|
|
128
|
+
existingToolResultIds = /* @__PURE__ */ new Set();
|
|
129
|
+
}
|
|
130
|
+
const assistantMsg = msg;
|
|
131
|
+
if (assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted") {
|
|
132
|
+
continue;
|
|
133
|
+
}
|
|
134
|
+
const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall");
|
|
135
|
+
if (toolCalls.length > 0) {
|
|
136
|
+
pendingToolCalls = toolCalls;
|
|
137
|
+
existingToolResultIds = /* @__PURE__ */ new Set();
|
|
138
|
+
}
|
|
139
|
+
result.push(msg);
|
|
140
|
+
} else if (msg.role === "toolResult") {
|
|
141
|
+
existingToolResultIds.add(msg.toolCallId);
|
|
142
|
+
result.push(msg);
|
|
143
|
+
} else if (msg.role === "user") {
|
|
144
|
+
if (pendingToolCalls.length > 0) {
|
|
145
|
+
for (const tc of pendingToolCalls) {
|
|
146
|
+
if (!existingToolResultIds.has(tc.id)) {
|
|
147
|
+
result.push({
|
|
148
|
+
role: "toolResult",
|
|
149
|
+
toolCallId: tc.id,
|
|
150
|
+
toolName: tc.name,
|
|
151
|
+
content: [{ type: "text", text: "No result provided" }],
|
|
152
|
+
isError: true,
|
|
153
|
+
timestamp: Date.now()
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
pendingToolCalls = [];
|
|
158
|
+
existingToolResultIds = /* @__PURE__ */ new Set();
|
|
159
|
+
}
|
|
160
|
+
result.push(msg);
|
|
161
|
+
} else {
|
|
162
|
+
result.push(msg);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
return result;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
export {
|
|
169
|
+
sanitizeSurrogates,
|
|
170
|
+
buildBaseOptions,
|
|
171
|
+
clampReasoning,
|
|
172
|
+
adjustMaxTokensForThinking,
|
|
173
|
+
transformMessages
|
|
174
|
+
};
|
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
import {
|
|
2
|
+
shortHash
|
|
3
|
+
} from "./chunk-URGOKODJ.js";
|
|
4
|
+
import {
|
|
5
|
+
parseStreamingJson
|
|
6
|
+
} from "./chunk-JRUUYSFL.js";
|
|
7
|
+
import {
|
|
8
|
+
sanitizeSurrogates,
|
|
9
|
+
transformMessages
|
|
10
|
+
} from "./chunk-P5HNPYGQ.js";
|
|
11
|
+
import {
|
|
12
|
+
calculateCost
|
|
13
|
+
} from "./chunk-OLOUBEE5.js";
|
|
14
|
+
|
|
15
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/openai-responses-shared.js
|
|
16
|
+
function encodeTextSignatureV1(id, phase) {
|
|
17
|
+
const payload = { v: 1, id };
|
|
18
|
+
if (phase)
|
|
19
|
+
payload.phase = phase;
|
|
20
|
+
return JSON.stringify(payload);
|
|
21
|
+
}
|
|
22
|
+
function parseTextSignature(signature) {
|
|
23
|
+
if (!signature)
|
|
24
|
+
return void 0;
|
|
25
|
+
if (signature.startsWith("{")) {
|
|
26
|
+
try {
|
|
27
|
+
const parsed = JSON.parse(signature);
|
|
28
|
+
if (parsed.v === 1 && typeof parsed.id === "string") {
|
|
29
|
+
if (parsed.phase === "commentary" || parsed.phase === "final_answer") {
|
|
30
|
+
return { id: parsed.id, phase: parsed.phase };
|
|
31
|
+
}
|
|
32
|
+
return { id: parsed.id };
|
|
33
|
+
}
|
|
34
|
+
} catch {
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return { id: signature };
|
|
38
|
+
}
|
|
39
|
+
function convertResponsesMessages(model, context, allowedToolCallProviders, options) {
|
|
40
|
+
const messages = [];
|
|
41
|
+
const normalizeIdPart = (part) => {
|
|
42
|
+
const sanitized = part.replace(/[^a-zA-Z0-9_-]/g, "_");
|
|
43
|
+
const normalized = sanitized.length > 64 ? sanitized.slice(0, 64) : sanitized;
|
|
44
|
+
return normalized.replace(/_+$/, "");
|
|
45
|
+
};
|
|
46
|
+
const buildForeignResponsesItemId = (itemId) => {
|
|
47
|
+
const normalized = `fc_${shortHash(itemId)}`;
|
|
48
|
+
return normalized.length > 64 ? normalized.slice(0, 64) : normalized;
|
|
49
|
+
};
|
|
50
|
+
const normalizeToolCallId = (id, _targetModel, source) => {
|
|
51
|
+
if (!allowedToolCallProviders.has(model.provider))
|
|
52
|
+
return normalizeIdPart(id);
|
|
53
|
+
if (!id.includes("|"))
|
|
54
|
+
return normalizeIdPart(id);
|
|
55
|
+
const [callId, itemId] = id.split("|");
|
|
56
|
+
const normalizedCallId = normalizeIdPart(callId);
|
|
57
|
+
const isForeignToolCall = source.provider !== model.provider || source.api !== model.api;
|
|
58
|
+
let normalizedItemId = isForeignToolCall ? buildForeignResponsesItemId(itemId) : normalizeIdPart(itemId);
|
|
59
|
+
if (!normalizedItemId.startsWith("fc_")) {
|
|
60
|
+
normalizedItemId = normalizeIdPart(`fc_${normalizedItemId}`);
|
|
61
|
+
}
|
|
62
|
+
return `${normalizedCallId}|${normalizedItemId}`;
|
|
63
|
+
};
|
|
64
|
+
const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
|
|
65
|
+
const includeSystemPrompt = options?.includeSystemPrompt ?? true;
|
|
66
|
+
if (includeSystemPrompt && context.systemPrompt) {
|
|
67
|
+
const role = model.reasoning ? "developer" : "system";
|
|
68
|
+
messages.push({
|
|
69
|
+
role,
|
|
70
|
+
content: sanitizeSurrogates(context.systemPrompt)
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
let msgIndex = 0;
|
|
74
|
+
for (const msg of transformedMessages) {
|
|
75
|
+
if (msg.role === "user") {
|
|
76
|
+
if (typeof msg.content === "string") {
|
|
77
|
+
messages.push({
|
|
78
|
+
role: "user",
|
|
79
|
+
content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }]
|
|
80
|
+
});
|
|
81
|
+
} else {
|
|
82
|
+
const content = msg.content.map((item) => {
|
|
83
|
+
if (item.type === "text") {
|
|
84
|
+
return {
|
|
85
|
+
type: "input_text",
|
|
86
|
+
text: sanitizeSurrogates(item.text)
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
return {
|
|
90
|
+
type: "input_image",
|
|
91
|
+
detail: "auto",
|
|
92
|
+
image_url: `data:${item.mimeType};base64,${item.data}`
|
|
93
|
+
};
|
|
94
|
+
});
|
|
95
|
+
const filteredContent = !model.input.includes("image") ? content.filter((c) => c.type !== "input_image") : content;
|
|
96
|
+
if (filteredContent.length === 0)
|
|
97
|
+
continue;
|
|
98
|
+
messages.push({
|
|
99
|
+
role: "user",
|
|
100
|
+
content: filteredContent
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
} else if (msg.role === "assistant") {
|
|
104
|
+
const output = [];
|
|
105
|
+
const assistantMsg = msg;
|
|
106
|
+
const isDifferentModel = assistantMsg.model !== model.id && assistantMsg.provider === model.provider && assistantMsg.api === model.api;
|
|
107
|
+
for (const block of msg.content) {
|
|
108
|
+
if (block.type === "thinking") {
|
|
109
|
+
if (block.thinkingSignature) {
|
|
110
|
+
const reasoningItem = JSON.parse(block.thinkingSignature);
|
|
111
|
+
output.push(reasoningItem);
|
|
112
|
+
}
|
|
113
|
+
} else if (block.type === "text") {
|
|
114
|
+
const textBlock = block;
|
|
115
|
+
const parsedSignature = parseTextSignature(textBlock.textSignature);
|
|
116
|
+
let msgId = parsedSignature?.id;
|
|
117
|
+
if (!msgId) {
|
|
118
|
+
msgId = `msg_${msgIndex}`;
|
|
119
|
+
} else if (msgId.length > 64) {
|
|
120
|
+
msgId = `msg_${shortHash(msgId)}`;
|
|
121
|
+
}
|
|
122
|
+
output.push({
|
|
123
|
+
type: "message",
|
|
124
|
+
role: "assistant",
|
|
125
|
+
content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }],
|
|
126
|
+
status: "completed",
|
|
127
|
+
id: msgId,
|
|
128
|
+
phase: parsedSignature?.phase
|
|
129
|
+
});
|
|
130
|
+
} else if (block.type === "toolCall") {
|
|
131
|
+
const toolCall = block;
|
|
132
|
+
const [callId, itemIdRaw] = toolCall.id.split("|");
|
|
133
|
+
let itemId = itemIdRaw;
|
|
134
|
+
if (isDifferentModel && itemId?.startsWith("fc_")) {
|
|
135
|
+
itemId = void 0;
|
|
136
|
+
}
|
|
137
|
+
output.push({
|
|
138
|
+
type: "function_call",
|
|
139
|
+
id: itemId,
|
|
140
|
+
call_id: callId,
|
|
141
|
+
name: toolCall.name,
|
|
142
|
+
arguments: JSON.stringify(toolCall.arguments)
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
if (output.length === 0)
|
|
147
|
+
continue;
|
|
148
|
+
messages.push(...output);
|
|
149
|
+
} else if (msg.role === "toolResult") {
|
|
150
|
+
const textResult = msg.content.filter((c) => c.type === "text").map((c) => c.text).join("\n");
|
|
151
|
+
const hasImages = msg.content.some((c) => c.type === "image");
|
|
152
|
+
const hasText = textResult.length > 0;
|
|
153
|
+
const [callId] = msg.toolCallId.split("|");
|
|
154
|
+
let output;
|
|
155
|
+
if (hasImages && model.input.includes("image")) {
|
|
156
|
+
const contentParts = [];
|
|
157
|
+
if (hasText) {
|
|
158
|
+
contentParts.push({
|
|
159
|
+
type: "input_text",
|
|
160
|
+
text: sanitizeSurrogates(textResult)
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
for (const block of msg.content) {
|
|
164
|
+
if (block.type === "image") {
|
|
165
|
+
contentParts.push({
|
|
166
|
+
type: "input_image",
|
|
167
|
+
detail: "auto",
|
|
168
|
+
image_url: `data:${block.mimeType};base64,${block.data}`
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
output = contentParts;
|
|
173
|
+
} else {
|
|
174
|
+
output = sanitizeSurrogates(hasText ? textResult : "(see attached image)");
|
|
175
|
+
}
|
|
176
|
+
messages.push({
|
|
177
|
+
type: "function_call_output",
|
|
178
|
+
call_id: callId,
|
|
179
|
+
output
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
msgIndex++;
|
|
183
|
+
}
|
|
184
|
+
return messages;
|
|
185
|
+
}
|
|
186
|
+
function convertResponsesTools(tools, options) {
|
|
187
|
+
const strict = options?.strict === void 0 ? false : options.strict;
|
|
188
|
+
return tools.map((tool) => ({
|
|
189
|
+
type: "function",
|
|
190
|
+
name: tool.name,
|
|
191
|
+
description: tool.description,
|
|
192
|
+
parameters: tool.parameters,
|
|
193
|
+
// TypeBox already generates JSON Schema
|
|
194
|
+
strict
|
|
195
|
+
}));
|
|
196
|
+
}
|
|
197
|
+
async function processResponsesStream(openaiStream, output, stream, model, options) {
|
|
198
|
+
let currentItem = null;
|
|
199
|
+
let currentBlock = null;
|
|
200
|
+
const blocks = output.content;
|
|
201
|
+
const blockIndex = () => blocks.length - 1;
|
|
202
|
+
for await (const event of openaiStream) {
|
|
203
|
+
if (event.type === "response.created") {
|
|
204
|
+
output.responseId = event.response.id;
|
|
205
|
+
} else if (event.type === "response.output_item.added") {
|
|
206
|
+
const item = event.item;
|
|
207
|
+
if (item.type === "reasoning") {
|
|
208
|
+
currentItem = item;
|
|
209
|
+
currentBlock = { type: "thinking", thinking: "" };
|
|
210
|
+
output.content.push(currentBlock);
|
|
211
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
212
|
+
} else if (item.type === "message") {
|
|
213
|
+
currentItem = item;
|
|
214
|
+
currentBlock = { type: "text", text: "" };
|
|
215
|
+
output.content.push(currentBlock);
|
|
216
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
217
|
+
} else if (item.type === "function_call") {
|
|
218
|
+
currentItem = item;
|
|
219
|
+
currentBlock = {
|
|
220
|
+
type: "toolCall",
|
|
221
|
+
id: `${item.call_id}|${item.id}`,
|
|
222
|
+
name: item.name,
|
|
223
|
+
arguments: {},
|
|
224
|
+
partialJson: item.arguments || ""
|
|
225
|
+
};
|
|
226
|
+
output.content.push(currentBlock);
|
|
227
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
228
|
+
}
|
|
229
|
+
} else if (event.type === "response.reasoning_summary_part.added") {
|
|
230
|
+
if (currentItem && currentItem.type === "reasoning") {
|
|
231
|
+
currentItem.summary = currentItem.summary || [];
|
|
232
|
+
currentItem.summary.push(event.part);
|
|
233
|
+
}
|
|
234
|
+
} else if (event.type === "response.reasoning_summary_text.delta") {
|
|
235
|
+
if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
236
|
+
currentItem.summary = currentItem.summary || [];
|
|
237
|
+
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
238
|
+
if (lastPart) {
|
|
239
|
+
currentBlock.thinking += event.delta;
|
|
240
|
+
lastPart.text += event.delta;
|
|
241
|
+
stream.push({
|
|
242
|
+
type: "thinking_delta",
|
|
243
|
+
contentIndex: blockIndex(),
|
|
244
|
+
delta: event.delta,
|
|
245
|
+
partial: output
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
} else if (event.type === "response.reasoning_summary_part.done") {
|
|
250
|
+
if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
251
|
+
currentItem.summary = currentItem.summary || [];
|
|
252
|
+
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
253
|
+
if (lastPart) {
|
|
254
|
+
currentBlock.thinking += "\n\n";
|
|
255
|
+
lastPart.text += "\n\n";
|
|
256
|
+
stream.push({
|
|
257
|
+
type: "thinking_delta",
|
|
258
|
+
contentIndex: blockIndex(),
|
|
259
|
+
delta: "\n\n",
|
|
260
|
+
partial: output
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
} else if (event.type === "response.content_part.added") {
|
|
265
|
+
if (currentItem?.type === "message") {
|
|
266
|
+
currentItem.content = currentItem.content || [];
|
|
267
|
+
if (event.part.type === "output_text" || event.part.type === "refusal") {
|
|
268
|
+
currentItem.content.push(event.part);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
} else if (event.type === "response.output_text.delta") {
|
|
272
|
+
if (currentItem?.type === "message" && currentBlock?.type === "text") {
|
|
273
|
+
if (!currentItem.content || currentItem.content.length === 0) {
|
|
274
|
+
continue;
|
|
275
|
+
}
|
|
276
|
+
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
277
|
+
if (lastPart?.type === "output_text") {
|
|
278
|
+
currentBlock.text += event.delta;
|
|
279
|
+
lastPart.text += event.delta;
|
|
280
|
+
stream.push({
|
|
281
|
+
type: "text_delta",
|
|
282
|
+
contentIndex: blockIndex(),
|
|
283
|
+
delta: event.delta,
|
|
284
|
+
partial: output
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
} else if (event.type === "response.refusal.delta") {
|
|
289
|
+
if (currentItem?.type === "message" && currentBlock?.type === "text") {
|
|
290
|
+
if (!currentItem.content || currentItem.content.length === 0) {
|
|
291
|
+
continue;
|
|
292
|
+
}
|
|
293
|
+
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
294
|
+
if (lastPart?.type === "refusal") {
|
|
295
|
+
currentBlock.text += event.delta;
|
|
296
|
+
lastPart.refusal += event.delta;
|
|
297
|
+
stream.push({
|
|
298
|
+
type: "text_delta",
|
|
299
|
+
contentIndex: blockIndex(),
|
|
300
|
+
delta: event.delta,
|
|
301
|
+
partial: output
|
|
302
|
+
});
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
} else if (event.type === "response.function_call_arguments.delta") {
|
|
306
|
+
if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") {
|
|
307
|
+
currentBlock.partialJson += event.delta;
|
|
308
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
|
|
309
|
+
stream.push({
|
|
310
|
+
type: "toolcall_delta",
|
|
311
|
+
contentIndex: blockIndex(),
|
|
312
|
+
delta: event.delta,
|
|
313
|
+
partial: output
|
|
314
|
+
});
|
|
315
|
+
}
|
|
316
|
+
} else if (event.type === "response.function_call_arguments.done") {
|
|
317
|
+
if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") {
|
|
318
|
+
currentBlock.partialJson = event.arguments;
|
|
319
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
|
|
320
|
+
}
|
|
321
|
+
} else if (event.type === "response.output_item.done") {
|
|
322
|
+
const item = event.item;
|
|
323
|
+
if (item.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
324
|
+
currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
|
|
325
|
+
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
326
|
+
stream.push({
|
|
327
|
+
type: "thinking_end",
|
|
328
|
+
contentIndex: blockIndex(),
|
|
329
|
+
content: currentBlock.thinking,
|
|
330
|
+
partial: output
|
|
331
|
+
});
|
|
332
|
+
currentBlock = null;
|
|
333
|
+
} else if (item.type === "message" && currentBlock?.type === "text") {
|
|
334
|
+
currentBlock.text = item.content.map((c) => c.type === "output_text" ? c.text : c.refusal).join("");
|
|
335
|
+
currentBlock.textSignature = encodeTextSignatureV1(item.id, item.phase ?? void 0);
|
|
336
|
+
stream.push({
|
|
337
|
+
type: "text_end",
|
|
338
|
+
contentIndex: blockIndex(),
|
|
339
|
+
content: currentBlock.text,
|
|
340
|
+
partial: output
|
|
341
|
+
});
|
|
342
|
+
currentBlock = null;
|
|
343
|
+
} else if (item.type === "function_call") {
|
|
344
|
+
const args = currentBlock?.type === "toolCall" && currentBlock.partialJson ? parseStreamingJson(currentBlock.partialJson) : parseStreamingJson(item.arguments || "{}");
|
|
345
|
+
const toolCall = {
|
|
346
|
+
type: "toolCall",
|
|
347
|
+
id: `${item.call_id}|${item.id}`,
|
|
348
|
+
name: item.name,
|
|
349
|
+
arguments: args
|
|
350
|
+
};
|
|
351
|
+
currentBlock = null;
|
|
352
|
+
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
353
|
+
}
|
|
354
|
+
} else if (event.type === "response.completed") {
|
|
355
|
+
const response = event.response;
|
|
356
|
+
if (response?.id) {
|
|
357
|
+
output.responseId = response.id;
|
|
358
|
+
}
|
|
359
|
+
if (response?.usage) {
|
|
360
|
+
const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
|
|
361
|
+
output.usage = {
|
|
362
|
+
// OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input
|
|
363
|
+
input: (response.usage.input_tokens || 0) - cachedTokens,
|
|
364
|
+
output: response.usage.output_tokens || 0,
|
|
365
|
+
cacheRead: cachedTokens,
|
|
366
|
+
cacheWrite: 0,
|
|
367
|
+
totalTokens: response.usage.total_tokens || 0,
|
|
368
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
calculateCost(model, output.usage);
|
|
372
|
+
if (options?.applyServiceTierPricing) {
|
|
373
|
+
const serviceTier = response?.service_tier ?? options.serviceTier;
|
|
374
|
+
options.applyServiceTierPricing(output.usage, serviceTier);
|
|
375
|
+
}
|
|
376
|
+
output.stopReason = mapStopReason(response?.status);
|
|
377
|
+
if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
|
|
378
|
+
output.stopReason = "toolUse";
|
|
379
|
+
}
|
|
380
|
+
} else if (event.type === "error") {
|
|
381
|
+
throw new Error(`Error Code ${event.code}: ${event.message}` || "Unknown error");
|
|
382
|
+
} else if (event.type === "response.failed") {
|
|
383
|
+
const error = event.response?.error;
|
|
384
|
+
const details = event.response?.incomplete_details;
|
|
385
|
+
const msg = error ? `${error.code || "unknown"}: ${error.message || "no message"}` : details?.reason ? `incomplete: ${details.reason}` : "Unknown error (no error details in response)";
|
|
386
|
+
throw new Error(msg);
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
function mapStopReason(status) {
|
|
391
|
+
if (!status)
|
|
392
|
+
return "stop";
|
|
393
|
+
switch (status) {
|
|
394
|
+
case "completed":
|
|
395
|
+
return "stop";
|
|
396
|
+
case "incomplete":
|
|
397
|
+
return "length";
|
|
398
|
+
case "failed":
|
|
399
|
+
case "cancelled":
|
|
400
|
+
return "error";
|
|
401
|
+
// These two are wonky ...
|
|
402
|
+
case "in_progress":
|
|
403
|
+
case "queued":
|
|
404
|
+
return "stop";
|
|
405
|
+
default: {
|
|
406
|
+
const _exhaustive = status;
|
|
407
|
+
throw new Error(`Unhandled stop reason: ${_exhaustive}`);
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
export {
|
|
413
|
+
convertResponsesMessages,
|
|
414
|
+
convertResponsesTools,
|
|
415
|
+
processResponsesStream
|
|
416
|
+
};
|