@ottocode/server 0.1.228 → 0.1.230
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -3
- package/src/openapi/paths/ask.ts +11 -0
- package/src/openapi/paths/config.ts +15 -0
- package/src/openapi/paths/messages.ts +6 -0
- package/src/openapi/schemas.ts +5 -0
- package/src/routes/ask.ts +8 -0
- package/src/routes/config/defaults.ts +9 -1
- package/src/routes/config/main.ts +1 -0
- package/src/routes/session-messages.ts +6 -1
- package/src/routes/sessions.ts +4 -1
- package/src/runtime/agent/runner-setup.ts +43 -34
- package/src/runtime/agent/runner.ts +171 -8
- package/src/runtime/ask/service.ts +16 -0
- package/src/runtime/debug/turn-dump.ts +330 -0
- package/src/runtime/message/history-builder.ts +99 -91
- package/src/runtime/message/service.ts +8 -1
- package/src/runtime/prompt/builder.ts +8 -6
- package/src/runtime/provider/reasoning.ts +291 -0
- package/src/runtime/session/queue.ts +2 -0
- package/src/tools/adapter.ts +84 -7
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
import {
|
|
2
|
+
catalog,
|
|
3
|
+
getModelNpmBinding,
|
|
4
|
+
getUnderlyingProviderKey,
|
|
5
|
+
modelSupportsReasoning,
|
|
6
|
+
type ProviderId,
|
|
7
|
+
type ReasoningLevel,
|
|
8
|
+
} from '@ottocode/sdk';
|
|
9
|
+
|
|
10
|
+
const THINKING_BUDGET = 16000;
|
|
11
|
+
|
|
12
|
+
export type ReasoningConfigResult = {
|
|
13
|
+
providerOptions: Record<string, unknown>;
|
|
14
|
+
effectiveMaxOutputTokens: number | undefined;
|
|
15
|
+
enabled: boolean;
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
function normalizeReasoningLevel(
|
|
19
|
+
level: ReasoningLevel | undefined,
|
|
20
|
+
): Exclude<ReasoningLevel, 'xhigh'> {
|
|
21
|
+
if (!level) return 'high';
|
|
22
|
+
if (level === 'xhigh') return 'high';
|
|
23
|
+
return level;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function toAnthropicEffort(
|
|
27
|
+
level: ReasoningLevel | undefined,
|
|
28
|
+
): 'low' | 'medium' | 'high' | 'max' {
|
|
29
|
+
switch (level) {
|
|
30
|
+
case 'minimal':
|
|
31
|
+
case 'low':
|
|
32
|
+
return 'low';
|
|
33
|
+
case 'medium':
|
|
34
|
+
return 'medium';
|
|
35
|
+
case 'max':
|
|
36
|
+
case 'xhigh':
|
|
37
|
+
return 'max';
|
|
38
|
+
case 'high':
|
|
39
|
+
default:
|
|
40
|
+
return 'high';
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function toOpenAIEffort(
|
|
45
|
+
level: ReasoningLevel | undefined,
|
|
46
|
+
): 'minimal' | 'low' | 'medium' | 'high' | 'xhigh' {
|
|
47
|
+
switch (level) {
|
|
48
|
+
case 'minimal':
|
|
49
|
+
return 'minimal';
|
|
50
|
+
case 'low':
|
|
51
|
+
return 'low';
|
|
52
|
+
case 'medium':
|
|
53
|
+
return 'medium';
|
|
54
|
+
case 'max':
|
|
55
|
+
case 'xhigh':
|
|
56
|
+
return 'xhigh';
|
|
57
|
+
case 'high':
|
|
58
|
+
default:
|
|
59
|
+
return 'high';
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function toGoogleThinkingLevel(
|
|
64
|
+
level: ReasoningLevel | undefined,
|
|
65
|
+
): 'minimal' | 'low' | 'medium' | 'high' {
|
|
66
|
+
switch (level) {
|
|
67
|
+
case 'minimal':
|
|
68
|
+
return 'minimal';
|
|
69
|
+
case 'low':
|
|
70
|
+
return 'low';
|
|
71
|
+
case 'medium':
|
|
72
|
+
return 'medium';
|
|
73
|
+
case 'max':
|
|
74
|
+
case 'xhigh':
|
|
75
|
+
case 'high':
|
|
76
|
+
default:
|
|
77
|
+
return 'high';
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
function toThinkingBudget(
|
|
82
|
+
level: ReasoningLevel | undefined,
|
|
83
|
+
maxOutputTokens: number | undefined,
|
|
84
|
+
): number {
|
|
85
|
+
const cap = maxOutputTokens
|
|
86
|
+
? Math.max(maxOutputTokens, THINKING_BUDGET)
|
|
87
|
+
: THINKING_BUDGET;
|
|
88
|
+
switch (level) {
|
|
89
|
+
case 'minimal':
|
|
90
|
+
return Math.min(2048, cap);
|
|
91
|
+
case 'low':
|
|
92
|
+
return Math.min(4096, cap);
|
|
93
|
+
case 'medium':
|
|
94
|
+
return Math.min(8192, cap);
|
|
95
|
+
case 'max':
|
|
96
|
+
case 'xhigh':
|
|
97
|
+
return Math.min(24000, cap);
|
|
98
|
+
case 'high':
|
|
99
|
+
default:
|
|
100
|
+
return Math.min(16000, cap);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
function toCamelCaseKey(value: string): string {
|
|
105
|
+
return value
|
|
106
|
+
.replace(/[^a-zA-Z0-9]+/g, ' ')
|
|
107
|
+
.trim()
|
|
108
|
+
.split(/\s+/)
|
|
109
|
+
.map((segment, index) => {
|
|
110
|
+
const lower = segment.toLowerCase();
|
|
111
|
+
if (index === 0) return lower;
|
|
112
|
+
return lower.charAt(0).toUpperCase() + lower.slice(1);
|
|
113
|
+
})
|
|
114
|
+
.join('');
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
function getOpenAICompatibleProviderOptionKeys(provider: ProviderId): string[] {
|
|
118
|
+
const entry = catalog[provider];
|
|
119
|
+
const keys = new Set<string>(['openaiCompatible', toCamelCaseKey(provider)]);
|
|
120
|
+
if (entry?.label) {
|
|
121
|
+
keys.add(toCamelCaseKey(entry.label));
|
|
122
|
+
}
|
|
123
|
+
return Array.from(keys).filter(Boolean);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
function buildSharedProviderOptions(
|
|
127
|
+
provider: ProviderId,
|
|
128
|
+
options: Record<string, unknown>,
|
|
129
|
+
): Record<string, unknown> {
|
|
130
|
+
const keys = getOpenAICompatibleProviderOptionKeys(provider);
|
|
131
|
+
return Object.fromEntries(keys.map((key) => [key, options]));
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
function usesAdaptiveAnthropicThinking(model: string): boolean {
|
|
135
|
+
const lower = model.toLowerCase();
|
|
136
|
+
return (
|
|
137
|
+
lower.includes('claude-opus-4-6') ||
|
|
138
|
+
lower.includes('claude-opus-4.6') ||
|
|
139
|
+
lower.includes('claude-sonnet-4-6') ||
|
|
140
|
+
lower.includes('claude-sonnet-4.6')
|
|
141
|
+
);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
function getReasoningProviderTarget(
|
|
145
|
+
provider: ProviderId,
|
|
146
|
+
model: string,
|
|
147
|
+
):
|
|
148
|
+
| 'anthropic'
|
|
149
|
+
| 'openai'
|
|
150
|
+
| 'google'
|
|
151
|
+
| 'openai-compatible'
|
|
152
|
+
| 'openrouter'
|
|
153
|
+
| null {
|
|
154
|
+
if (provider === 'openrouter') return 'openrouter';
|
|
155
|
+
if (
|
|
156
|
+
provider === 'moonshot' ||
|
|
157
|
+
provider === 'zai' ||
|
|
158
|
+
provider === 'zai-coding'
|
|
159
|
+
) {
|
|
160
|
+
return 'openai-compatible';
|
|
161
|
+
}
|
|
162
|
+
if (provider === 'minimax') return 'anthropic';
|
|
163
|
+
|
|
164
|
+
const npmBinding = getModelNpmBinding(provider, model);
|
|
165
|
+
if (npmBinding === '@ai-sdk/anthropic') return 'anthropic';
|
|
166
|
+
if (npmBinding === '@ai-sdk/openai') return 'openai';
|
|
167
|
+
if (npmBinding === '@ai-sdk/google') return 'google';
|
|
168
|
+
if (npmBinding === '@ai-sdk/openai-compatible') return 'openai-compatible';
|
|
169
|
+
if (npmBinding === '@openrouter/ai-sdk-provider') return 'openrouter';
|
|
170
|
+
|
|
171
|
+
const underlyingProvider = getUnderlyingProviderKey(provider, model);
|
|
172
|
+
if (underlyingProvider === 'anthropic') return 'anthropic';
|
|
173
|
+
if (underlyingProvider === 'openai') return 'openai';
|
|
174
|
+
if (underlyingProvider === 'google') return 'google';
|
|
175
|
+
if (underlyingProvider === 'openai-compatible') return 'openai-compatible';
|
|
176
|
+
return null;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
export function buildReasoningConfig(args: {
|
|
180
|
+
provider: ProviderId;
|
|
181
|
+
model: string;
|
|
182
|
+
reasoningText?: boolean;
|
|
183
|
+
reasoningLevel?: ReasoningLevel;
|
|
184
|
+
maxOutputTokens: number | undefined;
|
|
185
|
+
}): ReasoningConfigResult {
|
|
186
|
+
const { provider, model, reasoningText, reasoningLevel, maxOutputTokens } =
|
|
187
|
+
args;
|
|
188
|
+
if (!reasoningText || !modelSupportsReasoning(provider, model)) {
|
|
189
|
+
return {
|
|
190
|
+
providerOptions: {},
|
|
191
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
192
|
+
enabled: false,
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
const reasoningTarget = getReasoningProviderTarget(provider, model);
|
|
197
|
+
if (reasoningTarget === 'anthropic') {
|
|
198
|
+
if (usesAdaptiveAnthropicThinking(model)) {
|
|
199
|
+
return {
|
|
200
|
+
providerOptions: {
|
|
201
|
+
anthropic: {
|
|
202
|
+
thinking: { type: 'adaptive' },
|
|
203
|
+
effort: toAnthropicEffort(reasoningLevel),
|
|
204
|
+
},
|
|
205
|
+
},
|
|
206
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
207
|
+
enabled: true,
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
const thinkingBudget = toThinkingBudget(reasoningLevel, maxOutputTokens);
|
|
212
|
+
|
|
213
|
+
return {
|
|
214
|
+
providerOptions: {
|
|
215
|
+
anthropic: {
|
|
216
|
+
thinking: { type: 'enabled', budgetTokens: thinkingBudget },
|
|
217
|
+
},
|
|
218
|
+
},
|
|
219
|
+
effectiveMaxOutputTokens:
|
|
220
|
+
maxOutputTokens && maxOutputTokens > thinkingBudget
|
|
221
|
+
? maxOutputTokens - thinkingBudget
|
|
222
|
+
: maxOutputTokens,
|
|
223
|
+
enabled: true,
|
|
224
|
+
};
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
if (reasoningTarget === 'openai') {
|
|
228
|
+
return {
|
|
229
|
+
providerOptions: {
|
|
230
|
+
openai: {
|
|
231
|
+
reasoningEffort: toOpenAIEffort(reasoningLevel),
|
|
232
|
+
reasoningSummary: 'auto',
|
|
233
|
+
},
|
|
234
|
+
},
|
|
235
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
236
|
+
enabled: true,
|
|
237
|
+
};
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (reasoningTarget === 'google') {
|
|
241
|
+
const isGemini3 = model.includes('gemini-3');
|
|
242
|
+
return {
|
|
243
|
+
providerOptions: {
|
|
244
|
+
google: {
|
|
245
|
+
thinkingConfig: isGemini3
|
|
246
|
+
? {
|
|
247
|
+
thinkingLevel: toGoogleThinkingLevel(reasoningLevel),
|
|
248
|
+
includeThoughts: true,
|
|
249
|
+
}
|
|
250
|
+
: {
|
|
251
|
+
thinkingBudget: toThinkingBudget(
|
|
252
|
+
reasoningLevel,
|
|
253
|
+
maxOutputTokens,
|
|
254
|
+
),
|
|
255
|
+
includeThoughts: true,
|
|
256
|
+
},
|
|
257
|
+
},
|
|
258
|
+
},
|
|
259
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
260
|
+
enabled: true,
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
if (reasoningTarget === 'openrouter') {
|
|
265
|
+
return {
|
|
266
|
+
providerOptions: {
|
|
267
|
+
openrouter: {
|
|
268
|
+
reasoning: { effort: normalizeReasoningLevel(reasoningLevel) },
|
|
269
|
+
},
|
|
270
|
+
},
|
|
271
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
272
|
+
enabled: true,
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
if (reasoningTarget === 'openai-compatible') {
|
|
277
|
+
return {
|
|
278
|
+
providerOptions: buildSharedProviderOptions(provider, {
|
|
279
|
+
reasoningEffort: normalizeReasoningLevel(reasoningLevel),
|
|
280
|
+
}),
|
|
281
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
282
|
+
enabled: true,
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return {
|
|
287
|
+
providerOptions: {},
|
|
288
|
+
effectiveMaxOutputTokens: maxOutputTokens,
|
|
289
|
+
enabled: false,
|
|
290
|
+
};
|
|
291
|
+
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { ProviderName } from '../provider/index.ts';
|
|
2
2
|
import { publish } from '../../events/bus.ts';
|
|
3
3
|
import type { ToolApprovalMode } from '../tools/approval.ts';
|
|
4
|
+
import type { ReasoningLevel } from '@ottocode/sdk';
|
|
4
5
|
|
|
5
6
|
export type RunOpts = {
|
|
6
7
|
sessionId: string;
|
|
@@ -12,6 +13,7 @@ export type RunOpts = {
|
|
|
12
13
|
oneShot?: boolean;
|
|
13
14
|
userContext?: string;
|
|
14
15
|
reasoningText?: boolean;
|
|
16
|
+
reasoningLevel?: ReasoningLevel;
|
|
15
17
|
abortSignal?: AbortSignal;
|
|
16
18
|
isCompactCommand?: boolean;
|
|
17
19
|
compactionContext?: string;
|
package/src/tools/adapter.ts
CHANGED
|
@@ -18,6 +18,7 @@ import {
|
|
|
18
18
|
requestApproval,
|
|
19
19
|
} from '../runtime/tools/approval.ts';
|
|
20
20
|
import { guardToolCall } from '../runtime/tools/guards.ts';
|
|
21
|
+
import { debugLog } from '../runtime/debug/index.ts';
|
|
21
22
|
|
|
22
23
|
export type { ToolAdapterContext } from '../runtime/tools/context.ts';
|
|
23
24
|
|
|
@@ -55,6 +56,37 @@ function getPendingQueue(
|
|
|
55
56
|
return queue;
|
|
56
57
|
}
|
|
57
58
|
|
|
59
|
+
function extractToolCallId(options: unknown): string | undefined {
|
|
60
|
+
return (options as { toolCallId?: string } | undefined)?.toolCallId;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const DEFAULT_TRACED_TOOL_INPUTS = new Set(['write', 'apply_patch']);
|
|
64
|
+
|
|
65
|
+
function shouldTraceToolInput(name: string): boolean {
|
|
66
|
+
const raw = process.env.OTTO_DEBUG_TOOL_INPUT?.trim();
|
|
67
|
+
if (!raw) return false;
|
|
68
|
+
const normalized = raw.toLowerCase();
|
|
69
|
+
if (['1', 'true', 'yes', 'on', 'all'].includes(normalized)) {
|
|
70
|
+
return DEFAULT_TRACED_TOOL_INPUTS.has(name);
|
|
71
|
+
}
|
|
72
|
+
const tokens = raw
|
|
73
|
+
.split(/[\s,]+/)
|
|
74
|
+
.map((token) => token.trim().toLowerCase())
|
|
75
|
+
.filter(Boolean);
|
|
76
|
+
return tokens.includes('all') || tokens.includes(name.toLowerCase());
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
function summarizeTraceValue(value: unknown, max = 160): string {
|
|
80
|
+
try {
|
|
81
|
+
const json = JSON.stringify(value);
|
|
82
|
+
if (typeof json === 'string') {
|
|
83
|
+
return json.length > max ? `${json.slice(0, max)}…` : json;
|
|
84
|
+
}
|
|
85
|
+
} catch {}
|
|
86
|
+
const fallback = String(value);
|
|
87
|
+
return fallback.length > max ? `${fallback.slice(0, max)}…` : fallback;
|
|
88
|
+
}
|
|
89
|
+
|
|
58
90
|
function unwrapDoubleWrappedArgs(
|
|
59
91
|
input: unknown,
|
|
60
92
|
expectedName: string,
|
|
@@ -199,12 +231,18 @@ export function adaptTools(
|
|
|
199
231
|
...base,
|
|
200
232
|
...(providerOptions ? { providerOptions } : {}),
|
|
201
233
|
async onInputStart(options: unknown) {
|
|
234
|
+
const sdkCallId = extractToolCallId(options);
|
|
202
235
|
const queue = getPendingQueue(pendingCalls, name);
|
|
203
236
|
queue.push({
|
|
204
|
-
callId: crypto.randomUUID(),
|
|
237
|
+
callId: sdkCallId || crypto.randomUUID(),
|
|
205
238
|
startTs: Date.now(),
|
|
206
239
|
stepIndex: ctx.stepIndex,
|
|
207
240
|
});
|
|
241
|
+
if (shouldTraceToolInput(name)) {
|
|
242
|
+
debugLog(
|
|
243
|
+
`[TOOL_INPUT_TRACE][adapter] onInputStart tool=${name} callId=${sdkCallId ?? queue[queue.length - 1]?.callId ?? 'unknown'} step=${ctx.stepIndex}`,
|
|
244
|
+
);
|
|
245
|
+
}
|
|
208
246
|
if (typeof base.onInputStart === 'function')
|
|
209
247
|
// biome-ignore lint/suspicious/noExplicitAny: AI SDK types are complex
|
|
210
248
|
await base.onInputStart(options as any);
|
|
@@ -212,8 +250,14 @@ export function adaptTools(
|
|
|
212
250
|
async onInputDelta(options: unknown) {
|
|
213
251
|
const delta = (options as { inputTextDelta?: string } | undefined)
|
|
214
252
|
?.inputTextDelta;
|
|
253
|
+
const sdkCallId = extractToolCallId(options);
|
|
215
254
|
const queue = pendingCalls.get(name);
|
|
216
255
|
const meta = queue?.length ? queue[queue.length - 1] : undefined;
|
|
256
|
+
if (shouldTraceToolInput(name)) {
|
|
257
|
+
debugLog(
|
|
258
|
+
`[TOOL_INPUT_TRACE][adapter] onInputDelta tool=${name} callId=${sdkCallId ?? meta?.callId ?? 'unknown'} step=${meta?.stepIndex ?? ctx.stepIndex} delta=${summarizeTraceValue(delta ?? '')}`,
|
|
259
|
+
);
|
|
260
|
+
}
|
|
217
261
|
// Stream tool argument deltas as events if needed
|
|
218
262
|
publish({
|
|
219
263
|
type: 'tool.delta',
|
|
@@ -233,21 +277,30 @@ export function adaptTools(
|
|
|
233
277
|
},
|
|
234
278
|
async onInputAvailable(options: unknown) {
|
|
235
279
|
const args = (options as { input?: unknown } | undefined)?.input;
|
|
280
|
+
const sdkCallId = extractToolCallId(options);
|
|
236
281
|
const queue = getPendingQueue(pendingCalls, name);
|
|
237
282
|
let meta = queue.length ? queue[queue.length - 1] : undefined;
|
|
238
283
|
if (!meta) {
|
|
239
284
|
meta = {
|
|
240
|
-
callId: crypto.randomUUID(),
|
|
285
|
+
callId: sdkCallId || crypto.randomUUID(),
|
|
241
286
|
startTs: Date.now(),
|
|
242
287
|
stepIndex: ctx.stepIndex,
|
|
243
288
|
};
|
|
244
289
|
queue.push(meta);
|
|
245
290
|
}
|
|
291
|
+
if (sdkCallId && meta.callId !== sdkCallId) {
|
|
292
|
+
meta.callId = sdkCallId;
|
|
293
|
+
}
|
|
246
294
|
meta.stepIndex = ctx.stepIndex;
|
|
247
295
|
meta.args = args;
|
|
248
296
|
const callId = meta.callId;
|
|
249
297
|
const callPartId = crypto.randomUUID();
|
|
250
298
|
const startTs = meta.startTs;
|
|
299
|
+
if (shouldTraceToolInput(name)) {
|
|
300
|
+
debugLog(
|
|
301
|
+
`[TOOL_INPUT_TRACE][adapter] onInputAvailable tool=${name} callId=${callId} step=${ctx.stepIndex} input=${summarizeTraceValue(args)}`,
|
|
302
|
+
);
|
|
303
|
+
}
|
|
251
304
|
|
|
252
305
|
if (
|
|
253
306
|
!firstToolCallReported &&
|
|
@@ -360,10 +413,11 @@ export function adaptTools(
|
|
|
360
413
|
},
|
|
361
414
|
async execute(input: ToolExecuteInput, options: ToolExecuteOptions) {
|
|
362
415
|
input = unwrapDoubleWrappedArgs(input, name);
|
|
416
|
+
const sdkCallId = extractToolCallId(options);
|
|
363
417
|
const queue = pendingCalls.get(name);
|
|
364
418
|
const meta = queue?.shift();
|
|
365
419
|
if (queue && queue.length === 0) pendingCalls.delete(name);
|
|
366
|
-
const callIdFromQueue = meta?.callId;
|
|
420
|
+
const callIdFromQueue = sdkCallId || meta?.callId;
|
|
367
421
|
const startTsFromQueue = meta?.startTs;
|
|
368
422
|
const stepIndexForEvent = meta?.stepIndex ?? ctx.stepIndex;
|
|
369
423
|
|
|
@@ -462,23 +516,46 @@ export function adaptTools(
|
|
|
462
516
|
// If tool returns an async iterable, stream deltas while accumulating
|
|
463
517
|
if (res && typeof res === 'object' && Symbol.asyncIterator in res) {
|
|
464
518
|
const chunks: unknown[] = [];
|
|
519
|
+
let streamedResult: unknown = null;
|
|
465
520
|
for await (const chunk of res as AsyncIterable<unknown>) {
|
|
466
521
|
chunks.push(chunk);
|
|
522
|
+
if (chunk && typeof chunk === 'object' && 'result' in chunk) {
|
|
523
|
+
streamedResult = (chunk as { result: unknown }).result;
|
|
524
|
+
continue;
|
|
525
|
+
}
|
|
526
|
+
const delta =
|
|
527
|
+
typeof chunk === 'string'
|
|
528
|
+
? chunk
|
|
529
|
+
: chunk &&
|
|
530
|
+
typeof chunk === 'object' &&
|
|
531
|
+
'delta' in chunk &&
|
|
532
|
+
typeof (chunk as { delta?: unknown }).delta === 'string'
|
|
533
|
+
? ((chunk as { delta: string }).delta ?? '')
|
|
534
|
+
: null;
|
|
535
|
+
if (!delta) continue;
|
|
536
|
+
const channel =
|
|
537
|
+
chunk &&
|
|
538
|
+
typeof chunk === 'object' &&
|
|
539
|
+
'channel' in chunk &&
|
|
540
|
+
typeof (chunk as { channel?: unknown }).channel === 'string'
|
|
541
|
+
? ((chunk as { channel: string }).channel ?? 'output')
|
|
542
|
+
: 'output';
|
|
467
543
|
publish({
|
|
468
544
|
type: 'tool.delta',
|
|
469
545
|
sessionId: ctx.sessionId,
|
|
470
546
|
payload: {
|
|
471
547
|
name,
|
|
472
|
-
channel
|
|
473
|
-
delta
|
|
548
|
+
channel,
|
|
549
|
+
delta,
|
|
474
550
|
stepIndex: stepIndexForEvent,
|
|
475
551
|
callId: callIdFromQueue,
|
|
476
552
|
messageId: ctx.messageId,
|
|
477
553
|
},
|
|
478
554
|
});
|
|
479
555
|
}
|
|
480
|
-
|
|
481
|
-
|
|
556
|
+
result =
|
|
557
|
+
streamedResult ??
|
|
558
|
+
(chunks.length > 0 ? chunks[chunks.length - 1] : null);
|
|
482
559
|
} else {
|
|
483
560
|
// Await promise or passthrough value
|
|
484
561
|
result = await Promise.resolve(res as ToolExecuteReturn);
|