llmist 6.1.0 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -1
- package/dist/{chunk-7BJX376V.js → chunk-5KEZ7SQX.js} +13 -25
- package/dist/chunk-5KEZ7SQX.js.map +1 -0
- package/dist/{chunk-VAJLPRJ6.js → chunk-SFZIL2VR.js} +410 -493
- package/dist/chunk-SFZIL2VR.js.map +1 -0
- package/dist/cli.cjs +11533 -11843
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +5528 -5751
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +5779 -5872
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +75 -299
- package/dist/index.d.ts +75 -299
- package/dist/index.js +5 -3
- package/dist/{mock-stream-Cq1Sxezz.d.cts → mock-stream-r5vjy2Iq.d.cts} +1103 -739
- package/dist/{mock-stream-Cq1Sxezz.d.ts → mock-stream-r5vjy2Iq.d.ts} +1103 -739
- package/dist/testing/index.cjs +401 -486
- package/dist/testing/index.cjs.map +1 -1
- package/dist/testing/index.d.cts +2 -2
- package/dist/testing/index.d.ts +2 -2
- package/dist/testing/index.js +1 -1
- package/package.json +2 -1
- package/dist/chunk-7BJX376V.js.map +0 -1
- package/dist/chunk-VAJLPRJ6.js.map +0 -1
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
import * as zod from 'zod';
|
|
2
|
+
import { ZodType, ZodTypeAny } from 'zod';
|
|
2
3
|
import { Logger, ILogObj } from 'tslog';
|
|
3
4
|
|
|
4
5
|
/**
|
|
@@ -213,755 +214,441 @@ interface SpeechModelSpec {
|
|
|
213
214
|
};
|
|
214
215
|
}
|
|
215
216
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
217
|
+
interface LLMGenerationOptions {
|
|
218
|
+
model: string;
|
|
219
|
+
messages: LLMMessage[];
|
|
220
|
+
maxTokens?: number;
|
|
221
|
+
temperature?: number;
|
|
222
|
+
topP?: number;
|
|
223
|
+
stopSequences?: string[];
|
|
224
|
+
responseFormat?: "text";
|
|
225
|
+
metadata?: Record<string, unknown>;
|
|
226
|
+
extra?: Record<string, unknown>;
|
|
227
|
+
/**
|
|
228
|
+
* Optional abort signal for cancelling the request mid-flight.
|
|
229
|
+
*
|
|
230
|
+
* When the signal is aborted, the provider will attempt to cancel
|
|
231
|
+
* the underlying HTTP request and the stream will terminate with
|
|
232
|
+
* an abort error. Use `isAbortError()` from `@/core/errors` to
|
|
233
|
+
* detect cancellation in error handling.
|
|
234
|
+
*
|
|
235
|
+
* @example
|
|
236
|
+
* ```typescript
|
|
237
|
+
* const controller = new AbortController();
|
|
238
|
+
*
|
|
239
|
+
* const stream = client.stream({
|
|
240
|
+
* model: "claude-3-5-sonnet-20241022",
|
|
241
|
+
* messages: [{ role: "user", content: "Tell me a long story" }],
|
|
242
|
+
* signal: controller.signal,
|
|
243
|
+
* });
|
|
244
|
+
*
|
|
245
|
+
* // Cancel after 5 seconds
|
|
246
|
+
* setTimeout(() => controller.abort(), 5000);
|
|
247
|
+
*
|
|
248
|
+
* try {
|
|
249
|
+
* for await (const chunk of stream) {
|
|
250
|
+
* process.stdout.write(chunk.text);
|
|
251
|
+
* }
|
|
252
|
+
* } catch (error) {
|
|
253
|
+
* if (isAbortError(error)) {
|
|
254
|
+
* console.log("\nRequest was cancelled");
|
|
255
|
+
* } else {
|
|
256
|
+
* throw error;
|
|
257
|
+
* }
|
|
258
|
+
* }
|
|
259
|
+
* ```
|
|
260
|
+
*/
|
|
261
|
+
signal?: AbortSignal;
|
|
231
262
|
}
|
|
232
|
-
interface
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
reasoning?: boolean;
|
|
241
|
-
/** Supports structured outputs */
|
|
242
|
-
structuredOutputs?: boolean;
|
|
243
|
-
/** Supports fine-tuning */
|
|
244
|
-
fineTuning?: boolean;
|
|
263
|
+
interface TokenUsage {
|
|
264
|
+
inputTokens: number;
|
|
265
|
+
outputTokens: number;
|
|
266
|
+
totalTokens: number;
|
|
267
|
+
/** Number of input tokens served from cache (subset of inputTokens) */
|
|
268
|
+
cachedInputTokens?: number;
|
|
269
|
+
/** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
|
|
270
|
+
cacheCreationInputTokens?: number;
|
|
245
271
|
}
|
|
246
|
-
interface
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
knowledgeCutoff: string;
|
|
261
|
-
/** Supported features and capabilities */
|
|
262
|
-
features: ModelFeatures;
|
|
263
|
-
/** Additional metadata */
|
|
264
|
-
metadata?: {
|
|
265
|
-
/** Model family/series */
|
|
266
|
-
family?: string;
|
|
267
|
-
/** Release date */
|
|
268
|
-
releaseDate?: string;
|
|
269
|
-
/** Deprecation date if applicable */
|
|
270
|
-
deprecationDate?: string;
|
|
271
|
-
/** Notes or special information */
|
|
272
|
-
notes?: string;
|
|
273
|
-
/** Whether manual temperature configuration is supported (defaults to true) */
|
|
274
|
-
supportsTemperature?: boolean;
|
|
275
|
-
};
|
|
272
|
+
interface LLMStreamChunk {
|
|
273
|
+
text: string;
|
|
274
|
+
/**
|
|
275
|
+
* Indicates that the provider has finished producing output and includes the reason if available.
|
|
276
|
+
*/
|
|
277
|
+
finishReason?: string | null;
|
|
278
|
+
/**
|
|
279
|
+
* Token usage information, typically available in the final chunk when the stream completes.
|
|
280
|
+
*/
|
|
281
|
+
usage?: TokenUsage;
|
|
282
|
+
/**
|
|
283
|
+
* Provider specific payload emitted at the same time as the text chunk. This is useful for debugging and tests.
|
|
284
|
+
*/
|
|
285
|
+
rawEvent?: unknown;
|
|
276
286
|
}
|
|
277
|
-
interface
|
|
278
|
-
contextWindow: number;
|
|
279
|
-
maxOutputTokens: number;
|
|
287
|
+
interface LLMStream extends AsyncIterable<LLMStreamChunk> {
|
|
280
288
|
}
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
289
|
+
type ProviderIdentifier = string;
|
|
290
|
+
interface ModelDescriptor {
|
|
291
|
+
provider: string;
|
|
292
|
+
name: string;
|
|
293
|
+
}
|
|
294
|
+
declare class ModelIdentifierParser {
|
|
295
|
+
private readonly defaultProvider;
|
|
296
|
+
constructor(defaultProvider?: string);
|
|
297
|
+
parse(identifier: string): ModelDescriptor;
|
|
290
298
|
}
|
|
291
299
|
|
|
292
300
|
/**
|
|
293
|
-
*
|
|
301
|
+
* Unified event types for the Execution Tree.
|
|
294
302
|
*
|
|
295
|
-
*
|
|
296
|
-
*
|
|
297
|
-
*
|
|
298
|
-
*
|
|
299
|
-
* -
|
|
303
|
+
* All events carry full tree context (nodeId, parentId, depth, path).
|
|
304
|
+
* No special SubagentEvent wrapper needed - subagent events are regular
|
|
305
|
+
* events with depth > 0.
|
|
306
|
+
*
|
|
307
|
+
* @module core/execution-events
|
|
300
308
|
*/
|
|
301
309
|
|
|
302
310
|
/**
|
|
303
|
-
*
|
|
311
|
+
* Base properties shared by all execution events.
|
|
312
|
+
* Every event carries full tree context.
|
|
304
313
|
*/
|
|
305
|
-
interface
|
|
306
|
-
/**
|
|
307
|
-
|
|
308
|
-
/**
|
|
309
|
-
|
|
310
|
-
/**
|
|
311
|
-
|
|
312
|
-
/**
|
|
313
|
-
|
|
314
|
-
/**
|
|
315
|
-
|
|
314
|
+
interface BaseExecutionEvent {
|
|
315
|
+
/** Monotonically increasing event ID */
|
|
316
|
+
eventId: number;
|
|
317
|
+
/** Event timestamp */
|
|
318
|
+
timestamp: number;
|
|
319
|
+
/** Node that emitted this event */
|
|
320
|
+
nodeId: string;
|
|
321
|
+
/** Parent node ID (null for root events) */
|
|
322
|
+
parentId: string | null;
|
|
323
|
+
/** Nesting depth (0 = root, 1 = child, etc.) */
|
|
324
|
+
depth: number;
|
|
325
|
+
/** Full path from root to this node */
|
|
326
|
+
path: string[];
|
|
316
327
|
}
|
|
317
328
|
/**
|
|
318
|
-
*
|
|
329
|
+
* Emitted when an LLM call starts.
|
|
319
330
|
*/
|
|
320
|
-
interface
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
metadata: {
|
|
329
|
-
/** Number of messages before compaction */
|
|
330
|
-
originalCount: number;
|
|
331
|
-
/** Number of messages after compaction */
|
|
332
|
-
compactedCount: number;
|
|
333
|
-
/** Estimated tokens before compaction */
|
|
334
|
-
tokensBefore: number;
|
|
335
|
-
/** Estimated tokens after compaction */
|
|
336
|
-
tokensAfter: number;
|
|
337
|
-
};
|
|
331
|
+
interface LLMCallStartEvent extends BaseExecutionEvent {
|
|
332
|
+
type: "llm_call_start";
|
|
333
|
+
/** Iteration number within agent loop (1-indexed) */
|
|
334
|
+
iteration: number;
|
|
335
|
+
/** Model identifier */
|
|
336
|
+
model: string;
|
|
337
|
+
/** Request messages */
|
|
338
|
+
request?: LLMMessage[];
|
|
338
339
|
}
|
|
339
340
|
/**
|
|
340
|
-
*
|
|
341
|
-
*
|
|
342
|
-
* Strategies receive the conversation history (excluding base messages like
|
|
343
|
-
* system prompt and gadget instructions) and must return a compacted version.
|
|
344
|
-
*
|
|
345
|
-
* @example
|
|
346
|
-
* ```typescript
|
|
347
|
-
* class MyCustomStrategy implements CompactionStrategy {
|
|
348
|
-
* readonly name = 'my-custom';
|
|
349
|
-
*
|
|
350
|
-
* async compact(
|
|
351
|
-
* messages: LLMMessage[],
|
|
352
|
-
* config: ResolvedCompactionConfig,
|
|
353
|
-
* context: CompactionContext
|
|
354
|
-
* ): Promise<CompactionResult> {
|
|
355
|
-
* // Custom compaction logic
|
|
356
|
-
* return {
|
|
357
|
-
* messages: compactedMessages,
|
|
358
|
-
* metadata: { ... }
|
|
359
|
-
* };
|
|
360
|
-
* }
|
|
361
|
-
* }
|
|
362
|
-
* ```
|
|
341
|
+
* Emitted for each streaming chunk from LLM.
|
|
363
342
|
*/
|
|
364
|
-
interface
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
* Compact the given messages to fit within target token count.
|
|
369
|
-
*
|
|
370
|
-
* @param messages - Conversation history messages (excludes system/gadget base)
|
|
371
|
-
* @param config - Resolved compaction configuration
|
|
372
|
-
* @param context - Context including token counts and LLM client
|
|
373
|
-
* @returns Compacted messages with metadata
|
|
374
|
-
*/
|
|
375
|
-
compact(messages: LLMMessage[], config: ResolvedCompactionConfig, context: CompactionContext): Promise<CompactionResult>;
|
|
343
|
+
interface LLMCallStreamEvent extends BaseExecutionEvent {
|
|
344
|
+
type: "llm_call_stream";
|
|
345
|
+
/** Text chunk */
|
|
346
|
+
chunk: string;
|
|
376
347
|
}
|
|
377
348
|
/**
|
|
378
|
-
*
|
|
379
|
-
*
|
|
380
|
-
* A "turn" is typically a user message followed by an assistant response.
|
|
381
|
-
* Gadget calls are grouped with the preceding assistant message.
|
|
349
|
+
* Emitted when an LLM call completes successfully.
|
|
382
350
|
*/
|
|
383
|
-
interface
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
351
|
+
interface LLMCallCompleteEvent extends BaseExecutionEvent {
|
|
352
|
+
type: "llm_call_complete";
|
|
353
|
+
/** Complete response text */
|
|
354
|
+
response: string;
|
|
355
|
+
/** Token usage */
|
|
356
|
+
usage?: TokenUsage;
|
|
357
|
+
/** Finish reason from LLM */
|
|
358
|
+
finishReason?: string | null;
|
|
359
|
+
/** Cost in USD */
|
|
360
|
+
cost?: number;
|
|
388
361
|
}
|
|
389
|
-
|
|
390
362
|
/**
|
|
391
|
-
*
|
|
392
|
-
*
|
|
393
|
-
* Context compaction automatically manages conversation history to prevent
|
|
394
|
-
* context window overflow in long-running agent conversations.
|
|
363
|
+
* Emitted when an LLM call fails.
|
|
395
364
|
*/
|
|
396
|
-
|
|
365
|
+
interface LLMCallErrorEvent extends BaseExecutionEvent {
|
|
366
|
+
type: "llm_call_error";
|
|
367
|
+
/** The error that occurred */
|
|
368
|
+
error: Error;
|
|
369
|
+
/** Whether the error was recovered by a controller */
|
|
370
|
+
recovered: boolean;
|
|
371
|
+
}
|
|
397
372
|
/**
|
|
398
|
-
*
|
|
399
|
-
* This is included in StreamEvent for UI visibility.
|
|
373
|
+
* Emitted when a gadget call is parsed from LLM output (before execution).
|
|
400
374
|
*/
|
|
401
|
-
interface
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
375
|
+
interface GadgetCallEvent extends BaseExecutionEvent {
|
|
376
|
+
type: "gadget_call";
|
|
377
|
+
/** Invocation ID */
|
|
378
|
+
invocationId: string;
|
|
379
|
+
/** Gadget name */
|
|
380
|
+
name: string;
|
|
381
|
+
/** Parameters */
|
|
382
|
+
parameters: Record<string, unknown>;
|
|
383
|
+
/** Dependencies (other invocation IDs) */
|
|
384
|
+
dependencies: string[];
|
|
385
|
+
}
|
|
386
|
+
/**
|
|
387
|
+
* Emitted when gadget execution starts.
|
|
388
|
+
*/
|
|
389
|
+
interface GadgetStartEvent extends BaseExecutionEvent {
|
|
390
|
+
type: "gadget_start";
|
|
391
|
+
/** Invocation ID */
|
|
392
|
+
invocationId: string;
|
|
393
|
+
/** Gadget name */
|
|
394
|
+
name: string;
|
|
395
|
+
}
|
|
396
|
+
/**
|
|
397
|
+
* Emitted when gadget execution completes successfully.
|
|
398
|
+
*/
|
|
399
|
+
interface GadgetCompleteEvent extends BaseExecutionEvent {
|
|
400
|
+
type: "gadget_complete";
|
|
401
|
+
/** Invocation ID */
|
|
402
|
+
invocationId: string;
|
|
403
|
+
/** Gadget name */
|
|
404
|
+
name: string;
|
|
405
|
+
/** Result string */
|
|
406
|
+
result: string;
|
|
407
|
+
/** Execution time in ms */
|
|
408
|
+
executionTimeMs: number;
|
|
409
|
+
/** Cost in USD */
|
|
410
|
+
cost?: number;
|
|
411
|
+
/** Media outputs */
|
|
412
|
+
media?: GadgetMediaOutput[];
|
|
413
|
+
}
|
|
414
|
+
/**
|
|
415
|
+
* Emitted when gadget execution fails.
|
|
416
|
+
*/
|
|
417
|
+
interface GadgetErrorEvent extends BaseExecutionEvent {
|
|
418
|
+
type: "gadget_error";
|
|
419
|
+
/** Invocation ID */
|
|
420
|
+
invocationId: string;
|
|
421
|
+
/** Gadget name */
|
|
422
|
+
name: string;
|
|
423
|
+
/** Error message */
|
|
424
|
+
error: string;
|
|
425
|
+
/** Execution time in ms */
|
|
426
|
+
executionTimeMs: number;
|
|
427
|
+
}
|
|
428
|
+
/**
|
|
429
|
+
* Emitted when a gadget is skipped.
|
|
430
|
+
*/
|
|
431
|
+
interface GadgetSkippedEvent$1 extends BaseExecutionEvent {
|
|
432
|
+
type: "gadget_skipped";
|
|
433
|
+
/** Invocation ID */
|
|
434
|
+
invocationId: string;
|
|
435
|
+
/** Gadget name */
|
|
436
|
+
name: string;
|
|
437
|
+
/** Reason for skipping */
|
|
438
|
+
reason: "dependency_failed" | "controller_skip";
|
|
439
|
+
/** Error message (combines reason and failedDependencyError for consistency with GadgetErrorEvent) */
|
|
440
|
+
error: string;
|
|
441
|
+
/** Failed dependency invocation ID (if dependency_failed) */
|
|
442
|
+
failedDependency?: string;
|
|
443
|
+
/** Error message from failed dependency */
|
|
444
|
+
failedDependencyError?: string;
|
|
445
|
+
}
|
|
446
|
+
/**
|
|
447
|
+
* Emitted for text output from LLM (pure notification, not a tree node).
|
|
448
|
+
*/
|
|
449
|
+
interface TextEvent extends BaseExecutionEvent {
|
|
450
|
+
type: "text";
|
|
451
|
+
/** Text content */
|
|
452
|
+
content: string;
|
|
453
|
+
}
|
|
454
|
+
/**
|
|
455
|
+
* Emitted when context compaction occurs.
|
|
456
|
+
*/
|
|
457
|
+
interface CompactionEvent$1 extends BaseExecutionEvent {
|
|
458
|
+
type: "compaction";
|
|
459
|
+
/** Tokens before compaction */
|
|
405
460
|
tokensBefore: number;
|
|
406
|
-
/**
|
|
461
|
+
/** Tokens after compaction */
|
|
407
462
|
tokensAfter: number;
|
|
408
|
-
/**
|
|
409
|
-
|
|
410
|
-
/**
|
|
411
|
-
|
|
412
|
-
/** Summary text if summarization was used */
|
|
413
|
-
summary?: string;
|
|
414
|
-
/** Agent iteration when compaction occurred */
|
|
415
|
-
iteration: number;
|
|
463
|
+
/** Compaction strategy used */
|
|
464
|
+
strategy: string;
|
|
465
|
+
/** Messages removed */
|
|
466
|
+
messagesRemoved: number;
|
|
416
467
|
}
|
|
417
468
|
/**
|
|
418
|
-
*
|
|
469
|
+
* Emitted when human input is required.
|
|
419
470
|
*/
|
|
420
|
-
interface
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
percent: number;
|
|
429
|
-
};
|
|
430
|
-
/** Model's context window size */
|
|
431
|
-
contextWindow: number;
|
|
471
|
+
interface HumanInputRequiredEvent extends BaseExecutionEvent {
|
|
472
|
+
type: "human_input_required";
|
|
473
|
+
/** Question for the user */
|
|
474
|
+
question: string;
|
|
475
|
+
/** Gadget name requesting input */
|
|
476
|
+
gadgetName: string;
|
|
477
|
+
/** Invocation ID */
|
|
478
|
+
invocationId: string;
|
|
432
479
|
}
|
|
433
480
|
/**
|
|
434
|
-
*
|
|
435
|
-
*
|
|
436
|
-
* @example
|
|
437
|
-
* ```typescript
|
|
438
|
-
* // Custom configuration
|
|
439
|
-
* const agent = await LLMist.createAgent()
|
|
440
|
-
* .withModel('sonnet')
|
|
441
|
-
* .withCompaction({
|
|
442
|
-
* triggerThresholdPercent: 70,
|
|
443
|
-
* targetPercent: 40,
|
|
444
|
-
* preserveRecentTurns: 10,
|
|
445
|
-
* })
|
|
446
|
-
* .ask('...');
|
|
447
|
-
*
|
|
448
|
-
* // Disable compaction
|
|
449
|
-
* const agent = await LLMist.createAgent()
|
|
450
|
-
* .withModel('sonnet')
|
|
451
|
-
* .withoutCompaction()
|
|
452
|
-
* .ask('...');
|
|
453
|
-
* ```
|
|
481
|
+
* Emitted when the execution stream completes.
|
|
454
482
|
*/
|
|
455
|
-
interface
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
/**
|
|
462
|
-
|
|
463
|
-
* - 'sliding-window': Fast, drops oldest turns (no LLM call)
|
|
464
|
-
* - 'summarization': LLM-based compression of old messages
|
|
465
|
-
* - 'hybrid': Summarizes old messages + keeps recent turns (recommended)
|
|
466
|
-
* - Or provide a custom CompactionStrategy instance
|
|
467
|
-
* @default 'hybrid'
|
|
468
|
-
*/
|
|
469
|
-
strategy?: "sliding-window" | "summarization" | "hybrid" | CompactionStrategy;
|
|
470
|
-
/**
|
|
471
|
-
* Context usage percentage that triggers compaction.
|
|
472
|
-
* When token count exceeds this percentage of the context window,
|
|
473
|
-
* compaction is performed before the next LLM call.
|
|
474
|
-
* @default 80
|
|
475
|
-
*/
|
|
476
|
-
triggerThresholdPercent?: number;
|
|
477
|
-
/**
|
|
478
|
-
* Target context usage percentage after compaction.
|
|
479
|
-
* The compaction will aim to reduce tokens to this percentage.
|
|
480
|
-
* @default 50
|
|
481
|
-
*/
|
|
482
|
-
targetPercent?: number;
|
|
483
|
-
/**
|
|
484
|
-
* Number of recent turns to preserve during compaction.
|
|
485
|
-
* A "turn" is a user message + assistant response pair.
|
|
486
|
-
* Recent turns are kept verbatim while older ones are summarized/dropped.
|
|
487
|
-
* @default 5
|
|
488
|
-
*/
|
|
489
|
-
preserveRecentTurns?: number;
|
|
490
|
-
/**
|
|
491
|
-
* Model to use for summarization.
|
|
492
|
-
* If not specified, uses the agent's model.
|
|
493
|
-
* @default undefined (uses agent's model)
|
|
494
|
-
*/
|
|
495
|
-
summarizationModel?: string;
|
|
496
|
-
/**
|
|
497
|
-
* Custom system prompt for summarization.
|
|
498
|
-
* If not specified, uses a default prompt optimized for context preservation.
|
|
499
|
-
*/
|
|
500
|
-
summarizationPrompt?: string;
|
|
501
|
-
/**
|
|
502
|
-
* Callback invoked when compaction occurs.
|
|
503
|
-
* Useful for logging or analytics.
|
|
504
|
-
*/
|
|
505
|
-
onCompaction?: (event: CompactionEvent$1) => void;
|
|
483
|
+
interface StreamCompleteEvent extends BaseExecutionEvent {
|
|
484
|
+
type: "stream_complete";
|
|
485
|
+
/** Whether any gadgets were executed */
|
|
486
|
+
didExecuteGadgets: boolean;
|
|
487
|
+
/** Whether the agent loop should break */
|
|
488
|
+
shouldBreakLoop: boolean;
|
|
489
|
+
/** Total cost for this iteration */
|
|
490
|
+
iterationCost?: number;
|
|
506
491
|
}
|
|
507
492
|
/**
|
|
508
|
-
*
|
|
509
|
-
* Compaction is enabled by default with the hybrid strategy.
|
|
493
|
+
* All LLM-related events.
|
|
510
494
|
*/
|
|
511
|
-
|
|
495
|
+
type LLMEvent = LLMCallStartEvent | LLMCallStreamEvent | LLMCallCompleteEvent | LLMCallErrorEvent;
|
|
512
496
|
/**
|
|
513
|
-
*
|
|
497
|
+
* All gadget-related events.
|
|
514
498
|
*/
|
|
515
|
-
|
|
499
|
+
type GadgetEvent = GadgetCallEvent | GadgetStartEvent | GadgetCompleteEvent | GadgetErrorEvent | GadgetSkippedEvent$1;
|
|
516
500
|
/**
|
|
517
|
-
*
|
|
501
|
+
* Union of all execution events.
|
|
518
502
|
*/
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
* });
|
|
557
|
-
*
|
|
558
|
-
* // Cancel after 5 seconds
|
|
559
|
-
* setTimeout(() => controller.abort(), 5000);
|
|
560
|
-
*
|
|
561
|
-
* try {
|
|
562
|
-
* for await (const chunk of stream) {
|
|
563
|
-
* process.stdout.write(chunk.text);
|
|
564
|
-
* }
|
|
565
|
-
* } catch (error) {
|
|
566
|
-
* if (isAbortError(error)) {
|
|
567
|
-
* console.log("\nRequest was cancelled");
|
|
568
|
-
* } else {
|
|
569
|
-
* throw error;
|
|
570
|
-
* }
|
|
571
|
-
* }
|
|
572
|
-
* ```
|
|
573
|
-
*/
|
|
574
|
-
signal?: AbortSignal;
|
|
575
|
-
}
|
|
576
|
-
interface TokenUsage {
|
|
577
|
-
inputTokens: number;
|
|
578
|
-
outputTokens: number;
|
|
579
|
-
totalTokens: number;
|
|
580
|
-
/** Number of input tokens served from cache (subset of inputTokens) */
|
|
581
|
-
cachedInputTokens?: number;
|
|
582
|
-
/** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
|
|
583
|
-
cacheCreationInputTokens?: number;
|
|
584
|
-
}
|
|
585
|
-
interface LLMStreamChunk {
|
|
586
|
-
text: string;
|
|
587
|
-
/**
|
|
588
|
-
* Indicates that the provider has finished producing output and includes the reason if available.
|
|
589
|
-
*/
|
|
590
|
-
finishReason?: string | null;
|
|
591
|
-
/**
|
|
592
|
-
* Token usage information, typically available in the final chunk when the stream completes.
|
|
593
|
-
*/
|
|
594
|
-
usage?: TokenUsage;
|
|
595
|
-
/**
|
|
596
|
-
* Provider specific payload emitted at the same time as the text chunk. This is useful for debugging and tests.
|
|
597
|
-
*/
|
|
598
|
-
rawEvent?: unknown;
|
|
599
|
-
}
|
|
600
|
-
interface LLMStream extends AsyncIterable<LLMStreamChunk> {
|
|
601
|
-
}
|
|
602
|
-
type ProviderIdentifier = string;
|
|
603
|
-
interface ModelDescriptor {
|
|
604
|
-
provider: string;
|
|
605
|
-
name: string;
|
|
606
|
-
}
|
|
607
|
-
declare class ModelIdentifierParser {
|
|
608
|
-
private readonly defaultProvider;
|
|
609
|
-
constructor(defaultProvider?: string);
|
|
610
|
-
parse(identifier: string): ModelDescriptor;
|
|
611
|
-
}
|
|
503
|
+
type ExecutionEvent = LLMCallStartEvent | LLMCallStreamEvent | LLMCallCompleteEvent | LLMCallErrorEvent | GadgetCallEvent | GadgetStartEvent | GadgetCompleteEvent | GadgetErrorEvent | GadgetSkippedEvent$1 | TextEvent | CompactionEvent$1 | HumanInputRequiredEvent | StreamCompleteEvent;
|
|
504
|
+
/**
|
|
505
|
+
* Event type discriminator.
|
|
506
|
+
*/
|
|
507
|
+
type ExecutionEventType = ExecutionEvent["type"] | "*";
|
|
508
|
+
/**
|
|
509
|
+
* Check if an event is an LLM event.
|
|
510
|
+
*/
|
|
511
|
+
declare function isLLMEvent(event: ExecutionEvent): event is LLMEvent;
|
|
512
|
+
/**
|
|
513
|
+
* Check if an event is a gadget event.
|
|
514
|
+
*/
|
|
515
|
+
declare function isGadgetEvent(event: ExecutionEvent): event is GadgetEvent;
|
|
516
|
+
/**
|
|
517
|
+
* Check if an event is from a subagent (nested execution).
|
|
518
|
+
*/
|
|
519
|
+
declare function isSubagentEvent(event: ExecutionEvent): boolean;
|
|
520
|
+
/**
|
|
521
|
+
* Check if an event is from the root agent.
|
|
522
|
+
*/
|
|
523
|
+
declare function isRootEvent(event: ExecutionEvent): boolean;
|
|
524
|
+
/**
|
|
525
|
+
* Filter events by depth.
|
|
526
|
+
*/
|
|
527
|
+
declare function filterByDepth(events: ExecutionEvent[], depth: number): ExecutionEvent[];
|
|
528
|
+
/**
|
|
529
|
+
* Filter events by parent node.
|
|
530
|
+
*/
|
|
531
|
+
declare function filterByParent(events: ExecutionEvent[], parentId: string): ExecutionEvent[];
|
|
532
|
+
/**
|
|
533
|
+
* Filter events to only root-level events.
|
|
534
|
+
*/
|
|
535
|
+
declare function filterRootEvents(events: ExecutionEvent[]): ExecutionEvent[];
|
|
536
|
+
/**
|
|
537
|
+
* Group events by their parent node.
|
|
538
|
+
*/
|
|
539
|
+
declare function groupByParent(events: ExecutionEvent[]): Map<string | null, ExecutionEvent[]>;
|
|
612
540
|
|
|
613
541
|
/**
|
|
614
|
-
*
|
|
542
|
+
* First-class Execution Tree model for nested subagent support.
|
|
615
543
|
*
|
|
616
|
-
*
|
|
617
|
-
*
|
|
618
|
-
*
|
|
544
|
+
* The ExecutionTree is THE single source of truth for execution state.
|
|
545
|
+
* All nodes (including nested subagent nodes) live in one tree.
|
|
546
|
+
* Events are projections of tree changes.
|
|
619
547
|
*
|
|
620
|
-
* @module core/execution-
|
|
548
|
+
* @module core/execution-tree
|
|
621
549
|
*/
|
|
622
550
|
|
|
623
551
|
/**
|
|
624
|
-
*
|
|
625
|
-
*
|
|
552
|
+
* Unique identifier for any execution node.
|
|
553
|
+
* Format examples: "llm_1", "gadget_abc123", "llm_1_2" (nested)
|
|
626
554
|
*/
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
/**
|
|
555
|
+
type NodeId = string;
|
|
556
|
+
/**
|
|
557
|
+
* Node type discriminator.
|
|
558
|
+
*/
|
|
559
|
+
type ExecutionNodeType = "llm_call" | "gadget";
|
|
560
|
+
/**
|
|
561
|
+
* Base properties shared by all execution nodes.
|
|
562
|
+
*/
|
|
563
|
+
interface BaseExecutionNode {
|
|
564
|
+
/** Unique identifier for this node */
|
|
565
|
+
id: NodeId;
|
|
566
|
+
/** Node type discriminator */
|
|
567
|
+
type: ExecutionNodeType;
|
|
568
|
+
/** Parent node ID (null for root nodes) */
|
|
569
|
+
parentId: NodeId | null;
|
|
570
|
+
/** Nesting depth (0 = root, 1 = child of gadget, etc.) */
|
|
637
571
|
depth: number;
|
|
638
|
-
/**
|
|
639
|
-
path:
|
|
572
|
+
/** Path from root to this node: ["llm_1", "gadget_abc", "llm_1_1"] */
|
|
573
|
+
path: NodeId[];
|
|
574
|
+
/** Creation timestamp */
|
|
575
|
+
createdAt: number;
|
|
576
|
+
/** Completion timestamp (null if in progress) */
|
|
577
|
+
completedAt: number | null;
|
|
640
578
|
}
|
|
641
579
|
/**
|
|
642
|
-
*
|
|
580
|
+
* LLM call execution node.
|
|
643
581
|
*/
|
|
644
|
-
interface
|
|
645
|
-
type: "
|
|
646
|
-
/** Iteration number within agent loop (1-indexed) */
|
|
582
|
+
interface LLMCallNode extends BaseExecutionNode {
|
|
583
|
+
type: "llm_call";
|
|
584
|
+
/** Iteration number within the agent loop (1-indexed for display) */
|
|
647
585
|
iteration: number;
|
|
648
586
|
/** Model identifier */
|
|
649
587
|
model: string;
|
|
650
|
-
/** Request messages */
|
|
588
|
+
/** Request messages (set when call starts) */
|
|
651
589
|
request?: LLMMessage[];
|
|
652
|
-
|
|
653
|
-
/**
|
|
654
|
-
* Emitted for each streaming chunk from LLM.
|
|
655
|
-
*/
|
|
656
|
-
interface LLMCallStreamEvent extends BaseExecutionEvent {
|
|
657
|
-
type: "llm_call_stream";
|
|
658
|
-
/** Text chunk */
|
|
659
|
-
chunk: string;
|
|
660
|
-
}
|
|
661
|
-
/**
|
|
662
|
-
* Emitted when an LLM call completes successfully.
|
|
663
|
-
*/
|
|
664
|
-
interface LLMCallCompleteEvent extends BaseExecutionEvent {
|
|
665
|
-
type: "llm_call_complete";
|
|
666
|
-
/** Complete response text */
|
|
590
|
+
/** Accumulated response text */
|
|
667
591
|
response: string;
|
|
668
|
-
/** Token usage */
|
|
592
|
+
/** Token usage (set on completion) */
|
|
669
593
|
usage?: TokenUsage;
|
|
670
594
|
/** Finish reason from LLM */
|
|
671
595
|
finishReason?: string | null;
|
|
672
596
|
/** Cost in USD */
|
|
673
597
|
cost?: number;
|
|
598
|
+
/** Child node IDs (gadgets spawned by this LLM call) */
|
|
599
|
+
children: NodeId[];
|
|
674
600
|
}
|
|
675
601
|
/**
|
|
676
|
-
*
|
|
602
|
+
* Gadget execution state.
|
|
677
603
|
*/
|
|
678
|
-
|
|
679
|
-
type: "llm_call_error";
|
|
680
|
-
/** The error that occurred */
|
|
681
|
-
error: Error;
|
|
682
|
-
/** Whether the error was recovered by a controller */
|
|
683
|
-
recovered: boolean;
|
|
684
|
-
}
|
|
604
|
+
type GadgetState = "pending" | "running" | "completed" | "failed" | "skipped";
|
|
685
605
|
/**
|
|
686
|
-
*
|
|
606
|
+
* Gadget execution node.
|
|
687
607
|
*/
|
|
688
|
-
interface
|
|
689
|
-
type: "
|
|
690
|
-
/** Invocation ID */
|
|
608
|
+
interface GadgetNode extends BaseExecutionNode {
|
|
609
|
+
type: "gadget";
|
|
610
|
+
/** Invocation ID (LLM-generated or auto) */
|
|
691
611
|
invocationId: string;
|
|
692
612
|
/** Gadget name */
|
|
693
613
|
name: string;
|
|
694
|
-
/** Parameters */
|
|
614
|
+
/** Parameters passed to the gadget */
|
|
695
615
|
parameters: Record<string, unknown>;
|
|
696
|
-
/** Dependencies (other invocation IDs) */
|
|
616
|
+
/** Dependencies (other invocation IDs this gadget waits for) */
|
|
697
617
|
dependencies: string[];
|
|
618
|
+
/** Execution state */
|
|
619
|
+
state: GadgetState;
|
|
620
|
+
/** Result string (if completed successfully) */
|
|
621
|
+
result?: string;
|
|
622
|
+
/** Error message (if failed or skipped) */
|
|
623
|
+
error?: string;
|
|
624
|
+
/** Failed dependency invocation ID (if skipped due to dependency) */
|
|
625
|
+
failedDependency?: string;
|
|
626
|
+
/** Execution time in milliseconds */
|
|
627
|
+
executionTimeMs?: number;
|
|
628
|
+
/** Cost in USD */
|
|
629
|
+
cost?: number;
|
|
630
|
+
/** Media outputs from this gadget */
|
|
631
|
+
media?: GadgetMediaOutput[];
|
|
632
|
+
/** Child node IDs (nested LLM calls for subagent gadgets) */
|
|
633
|
+
children: NodeId[];
|
|
634
|
+
/** Whether this gadget is a subagent (has nested LLM calls) */
|
|
635
|
+
isSubagent: boolean;
|
|
698
636
|
}
|
|
699
637
|
/**
|
|
700
|
-
*
|
|
638
|
+
* Union of all execution node types.
|
|
701
639
|
*/
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
/**
|
|
705
|
-
|
|
706
|
-
/**
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
/** Invocation ID */
|
|
715
|
-
invocationId: string;
|
|
716
|
-
/** Gadget name */
|
|
717
|
-
name: string;
|
|
718
|
-
/** Result string */
|
|
719
|
-
result: string;
|
|
720
|
-
/** Execution time in ms */
|
|
721
|
-
executionTimeMs: number;
|
|
722
|
-
/** Cost in USD */
|
|
723
|
-
cost?: number;
|
|
724
|
-
/** Media outputs */
|
|
725
|
-
media?: GadgetMediaOutput[];
|
|
726
|
-
}
|
|
727
|
-
/**
|
|
728
|
-
* Emitted when gadget execution fails.
|
|
729
|
-
*/
|
|
730
|
-
interface GadgetErrorEvent extends BaseExecutionEvent {
|
|
731
|
-
type: "gadget_error";
|
|
732
|
-
/** Invocation ID */
|
|
733
|
-
invocationId: string;
|
|
734
|
-
/** Gadget name */
|
|
735
|
-
name: string;
|
|
736
|
-
/** Error message */
|
|
737
|
-
error: string;
|
|
738
|
-
/** Execution time in ms */
|
|
739
|
-
executionTimeMs: number;
|
|
740
|
-
}
|
|
741
|
-
/**
|
|
742
|
-
* Emitted when a gadget is skipped.
|
|
743
|
-
*/
|
|
744
|
-
interface GadgetSkippedEvent$1 extends BaseExecutionEvent {
|
|
745
|
-
type: "gadget_skipped";
|
|
746
|
-
/** Invocation ID */
|
|
747
|
-
invocationId: string;
|
|
748
|
-
/** Gadget name */
|
|
749
|
-
name: string;
|
|
750
|
-
/** Reason for skipping */
|
|
751
|
-
reason: "dependency_failed" | "controller_skip";
|
|
752
|
-
/** Error message (combines reason and failedDependencyError for consistency with GadgetErrorEvent) */
|
|
753
|
-
error: string;
|
|
754
|
-
/** Failed dependency invocation ID (if dependency_failed) */
|
|
755
|
-
failedDependency?: string;
|
|
756
|
-
/** Error message from failed dependency */
|
|
757
|
-
failedDependencyError?: string;
|
|
758
|
-
}
|
|
759
|
-
/**
|
|
760
|
-
* Emitted for text output from LLM (pure notification, not a tree node).
|
|
761
|
-
*/
|
|
762
|
-
interface TextEvent extends BaseExecutionEvent {
|
|
763
|
-
type: "text";
|
|
764
|
-
/** Text content */
|
|
765
|
-
content: string;
|
|
766
|
-
}
|
|
767
|
-
/**
|
|
768
|
-
* Emitted when context compaction occurs.
|
|
769
|
-
*/
|
|
770
|
-
interface CompactionEvent extends BaseExecutionEvent {
|
|
771
|
-
type: "compaction";
|
|
772
|
-
/** Tokens before compaction */
|
|
773
|
-
tokensBefore: number;
|
|
774
|
-
/** Tokens after compaction */
|
|
775
|
-
tokensAfter: number;
|
|
776
|
-
/** Compaction strategy used */
|
|
777
|
-
strategy: string;
|
|
778
|
-
/** Messages removed */
|
|
779
|
-
messagesRemoved: number;
|
|
780
|
-
}
|
|
781
|
-
/**
|
|
782
|
-
* Emitted when human input is required.
|
|
783
|
-
*/
|
|
784
|
-
interface HumanInputRequiredEvent extends BaseExecutionEvent {
|
|
785
|
-
type: "human_input_required";
|
|
786
|
-
/** Question for the user */
|
|
787
|
-
question: string;
|
|
788
|
-
/** Gadget name requesting input */
|
|
789
|
-
gadgetName: string;
|
|
790
|
-
/** Invocation ID */
|
|
791
|
-
invocationId: string;
|
|
792
|
-
}
|
|
793
|
-
/**
|
|
794
|
-
* Emitted when the execution stream completes.
|
|
795
|
-
*/
|
|
796
|
-
interface StreamCompleteEvent extends BaseExecutionEvent {
|
|
797
|
-
type: "stream_complete";
|
|
798
|
-
/** Whether any gadgets were executed */
|
|
799
|
-
didExecuteGadgets: boolean;
|
|
800
|
-
/** Whether the agent loop should break */
|
|
801
|
-
shouldBreakLoop: boolean;
|
|
802
|
-
/** Total cost for this iteration */
|
|
803
|
-
iterationCost?: number;
|
|
804
|
-
}
|
|
805
|
-
/**
|
|
806
|
-
* All LLM-related events.
|
|
807
|
-
*/
|
|
808
|
-
type LLMEvent = LLMCallStartEvent | LLMCallStreamEvent | LLMCallCompleteEvent | LLMCallErrorEvent;
|
|
809
|
-
/**
|
|
810
|
-
* All gadget-related events.
|
|
811
|
-
*/
|
|
812
|
-
type GadgetEvent = GadgetCallEvent | GadgetStartEvent | GadgetCompleteEvent | GadgetErrorEvent | GadgetSkippedEvent$1;
|
|
813
|
-
/**
|
|
814
|
-
* Union of all execution events.
|
|
815
|
-
*/
|
|
816
|
-
type ExecutionEvent = LLMCallStartEvent | LLMCallStreamEvent | LLMCallCompleteEvent | LLMCallErrorEvent | GadgetCallEvent | GadgetStartEvent | GadgetCompleteEvent | GadgetErrorEvent | GadgetSkippedEvent$1 | TextEvent | CompactionEvent | HumanInputRequiredEvent | StreamCompleteEvent;
|
|
817
|
-
/**
|
|
818
|
-
* Event type discriminator.
|
|
819
|
-
*/
|
|
820
|
-
type ExecutionEventType = ExecutionEvent["type"] | "*";
|
|
821
|
-
/**
|
|
822
|
-
* Check if an event is an LLM event.
|
|
823
|
-
*/
|
|
824
|
-
declare function isLLMEvent(event: ExecutionEvent): event is LLMEvent;
|
|
825
|
-
/**
|
|
826
|
-
* Check if an event is a gadget event.
|
|
827
|
-
*/
|
|
828
|
-
declare function isGadgetEvent(event: ExecutionEvent): event is GadgetEvent;
|
|
829
|
-
/**
|
|
830
|
-
* Check if an event is from a subagent (nested execution).
|
|
831
|
-
*/
|
|
832
|
-
declare function isSubagentEvent(event: ExecutionEvent): boolean;
|
|
833
|
-
/**
|
|
834
|
-
* Check if an event is from the root agent.
|
|
835
|
-
*/
|
|
836
|
-
declare function isRootEvent(event: ExecutionEvent): boolean;
|
|
837
|
-
/**
|
|
838
|
-
* Filter events by depth.
|
|
839
|
-
*/
|
|
840
|
-
declare function filterByDepth(events: ExecutionEvent[], depth: number): ExecutionEvent[];
|
|
841
|
-
/**
|
|
842
|
-
* Filter events by parent node.
|
|
843
|
-
*/
|
|
844
|
-
declare function filterByParent(events: ExecutionEvent[], parentId: string): ExecutionEvent[];
|
|
845
|
-
/**
|
|
846
|
-
* Filter events to only root-level events.
|
|
847
|
-
*/
|
|
848
|
-
declare function filterRootEvents(events: ExecutionEvent[]): ExecutionEvent[];
|
|
849
|
-
/**
|
|
850
|
-
* Group events by their parent node.
|
|
851
|
-
*/
|
|
852
|
-
declare function groupByParent(events: ExecutionEvent[]): Map<string | null, ExecutionEvent[]>;
|
|
853
|
-
|
|
854
|
-
/**
|
|
855
|
-
* First-class Execution Tree model for nested subagent support.
|
|
856
|
-
*
|
|
857
|
-
* The ExecutionTree is THE single source of truth for execution state.
|
|
858
|
-
* All nodes (including nested subagent nodes) live in one tree.
|
|
859
|
-
* Events are projections of tree changes.
|
|
860
|
-
*
|
|
861
|
-
* @module core/execution-tree
|
|
862
|
-
*/
|
|
863
|
-
|
|
864
|
-
/**
|
|
865
|
-
* Unique identifier for any execution node.
|
|
866
|
-
* Format examples: "llm_1", "gadget_abc123", "llm_1_2" (nested)
|
|
867
|
-
*/
|
|
868
|
-
type NodeId = string;
|
|
869
|
-
/**
|
|
870
|
-
* Node type discriminator.
|
|
871
|
-
*/
|
|
872
|
-
type ExecutionNodeType = "llm_call" | "gadget";
|
|
873
|
-
/**
|
|
874
|
-
* Base properties shared by all execution nodes.
|
|
875
|
-
*/
|
|
876
|
-
interface BaseExecutionNode {
|
|
877
|
-
/** Unique identifier for this node */
|
|
878
|
-
id: NodeId;
|
|
879
|
-
/** Node type discriminator */
|
|
880
|
-
type: ExecutionNodeType;
|
|
881
|
-
/** Parent node ID (null for root nodes) */
|
|
882
|
-
parentId: NodeId | null;
|
|
883
|
-
/** Nesting depth (0 = root, 1 = child of gadget, etc.) */
|
|
884
|
-
depth: number;
|
|
885
|
-
/** Path from root to this node: ["llm_1", "gadget_abc", "llm_1_1"] */
|
|
886
|
-
path: NodeId[];
|
|
887
|
-
/** Creation timestamp */
|
|
888
|
-
createdAt: number;
|
|
889
|
-
/** Completion timestamp (null if in progress) */
|
|
890
|
-
completedAt: number | null;
|
|
891
|
-
}
|
|
892
|
-
/**
|
|
893
|
-
* LLM call execution node.
|
|
894
|
-
*/
|
|
895
|
-
interface LLMCallNode extends BaseExecutionNode {
|
|
896
|
-
type: "llm_call";
|
|
897
|
-
/** Iteration number within the agent loop (1-indexed for display) */
|
|
898
|
-
iteration: number;
|
|
899
|
-
/** Model identifier */
|
|
900
|
-
model: string;
|
|
901
|
-
/** Request messages (set when call starts) */
|
|
902
|
-
request?: LLMMessage[];
|
|
903
|
-
/** Accumulated response text */
|
|
904
|
-
response: string;
|
|
905
|
-
/** Token usage (set on completion) */
|
|
906
|
-
usage?: TokenUsage;
|
|
907
|
-
/** Finish reason from LLM */
|
|
908
|
-
finishReason?: string | null;
|
|
909
|
-
/** Cost in USD */
|
|
910
|
-
cost?: number;
|
|
911
|
-
/** Child node IDs (gadgets spawned by this LLM call) */
|
|
912
|
-
children: NodeId[];
|
|
913
|
-
}
|
|
914
|
-
/**
|
|
915
|
-
* Gadget execution state.
|
|
916
|
-
*/
|
|
917
|
-
type GadgetState = "pending" | "running" | "completed" | "failed" | "skipped";
|
|
918
|
-
/**
|
|
919
|
-
* Gadget execution node.
|
|
920
|
-
*/
|
|
921
|
-
interface GadgetNode extends BaseExecutionNode {
|
|
922
|
-
type: "gadget";
|
|
923
|
-
/** Invocation ID (LLM-generated or auto) */
|
|
924
|
-
invocationId: string;
|
|
925
|
-
/** Gadget name */
|
|
926
|
-
name: string;
|
|
927
|
-
/** Parameters passed to the gadget */
|
|
928
|
-
parameters: Record<string, unknown>;
|
|
929
|
-
/** Dependencies (other invocation IDs this gadget waits for) */
|
|
930
|
-
dependencies: string[];
|
|
931
|
-
/** Execution state */
|
|
932
|
-
state: GadgetState;
|
|
933
|
-
/** Result string (if completed successfully) */
|
|
934
|
-
result?: string;
|
|
935
|
-
/** Error message (if failed or skipped) */
|
|
936
|
-
error?: string;
|
|
937
|
-
/** Failed dependency invocation ID (if skipped due to dependency) */
|
|
938
|
-
failedDependency?: string;
|
|
939
|
-
/** Execution time in milliseconds */
|
|
940
|
-
executionTimeMs?: number;
|
|
941
|
-
/** Cost in USD */
|
|
942
|
-
cost?: number;
|
|
943
|
-
/** Media outputs from this gadget */
|
|
944
|
-
media?: GadgetMediaOutput[];
|
|
945
|
-
/** Child node IDs (nested LLM calls for subagent gadgets) */
|
|
946
|
-
children: NodeId[];
|
|
947
|
-
/** Whether this gadget is a subagent (has nested LLM calls) */
|
|
948
|
-
isSubagent: boolean;
|
|
949
|
-
}
|
|
950
|
-
/**
|
|
951
|
-
* Union of all execution node types.
|
|
952
|
-
*/
|
|
953
|
-
type ExecutionNode = LLMCallNode | GadgetNode;
|
|
954
|
-
interface AddLLMCallParams {
|
|
955
|
-
/** Iteration number (1-indexed) */
|
|
956
|
-
iteration: number;
|
|
957
|
-
/** Model identifier */
|
|
958
|
-
model: string;
|
|
959
|
-
/** Request messages */
|
|
960
|
-
request?: LLMMessage[];
|
|
961
|
-
/** Parent node ID (for subagent LLM calls) */
|
|
962
|
-
parentId?: NodeId | null;
|
|
963
|
-
}
|
|
964
|
-
interface AddGadgetParams {
|
|
640
|
+
type ExecutionNode = LLMCallNode | GadgetNode;
|
|
641
|
+
interface AddLLMCallParams {
|
|
642
|
+
/** Iteration number (1-indexed) */
|
|
643
|
+
iteration: number;
|
|
644
|
+
/** Model identifier */
|
|
645
|
+
model: string;
|
|
646
|
+
/** Request messages */
|
|
647
|
+
request?: LLMMessage[];
|
|
648
|
+
/** Parent node ID (for subagent LLM calls) */
|
|
649
|
+
parentId?: NodeId | null;
|
|
650
|
+
}
|
|
651
|
+
interface AddGadgetParams {
|
|
965
652
|
/** Invocation ID */
|
|
966
653
|
invocationId: string;
|
|
967
654
|
/** Gadget name */
|
|
@@ -1100,103 +787,682 @@ declare class ExecutionTree {
|
|
|
1100
787
|
*/
|
|
1101
788
|
getNode(id: NodeId): ExecutionNode | undefined;
|
|
1102
789
|
/**
|
|
1103
|
-
* Get a gadget node by invocation ID.
|
|
790
|
+
* Get a gadget node by invocation ID.
|
|
791
|
+
*/
|
|
792
|
+
getNodeByInvocationId(invocationId: string): GadgetNode | undefined;
|
|
793
|
+
/**
|
|
794
|
+
* Get all root nodes (depth 0 for this tree).
|
|
795
|
+
*/
|
|
796
|
+
getRoots(): ExecutionNode[];
|
|
797
|
+
/**
|
|
798
|
+
* Get children of a node.
|
|
799
|
+
*/
|
|
800
|
+
getChildren(id: NodeId): ExecutionNode[];
|
|
801
|
+
/**
|
|
802
|
+
* Get ancestors of a node (from root to parent).
|
|
803
|
+
*/
|
|
804
|
+
getAncestors(id: NodeId): ExecutionNode[];
|
|
805
|
+
/**
|
|
806
|
+
* Get all descendants of a node.
|
|
807
|
+
*/
|
|
808
|
+
getDescendants(id: NodeId, type?: ExecutionNodeType): ExecutionNode[];
|
|
809
|
+
/**
|
|
810
|
+
* Get the current (most recent incomplete) LLM call node.
|
|
811
|
+
*/
|
|
812
|
+
getCurrentLLMCallId(): NodeId | undefined;
|
|
813
|
+
/**
|
|
814
|
+
* Get total cost for entire tree.
|
|
815
|
+
*/
|
|
816
|
+
getTotalCost(): number;
|
|
817
|
+
/**
|
|
818
|
+
* Get total cost for a subtree (node and all descendants).
|
|
819
|
+
*/
|
|
820
|
+
getSubtreeCost(nodeId: NodeId): number;
|
|
821
|
+
/**
|
|
822
|
+
* Get token usage for entire tree.
|
|
823
|
+
*/
|
|
824
|
+
getTotalTokens(): {
|
|
825
|
+
input: number;
|
|
826
|
+
output: number;
|
|
827
|
+
cached: number;
|
|
828
|
+
};
|
|
829
|
+
/**
|
|
830
|
+
* Get token usage for a subtree.
|
|
831
|
+
*/
|
|
832
|
+
getSubtreeTokens(nodeId: NodeId): {
|
|
833
|
+
input: number;
|
|
834
|
+
output: number;
|
|
835
|
+
cached: number;
|
|
836
|
+
};
|
|
837
|
+
/**
|
|
838
|
+
* Collect all media from a subtree.
|
|
1104
839
|
*/
|
|
1105
|
-
|
|
840
|
+
getSubtreeMedia(nodeId: NodeId): GadgetMediaOutput[];
|
|
1106
841
|
/**
|
|
1107
|
-
*
|
|
842
|
+
* Check if a subtree is complete (all nodes finished).
|
|
1108
843
|
*/
|
|
1109
|
-
|
|
844
|
+
isSubtreeComplete(nodeId: NodeId): boolean;
|
|
1110
845
|
/**
|
|
1111
|
-
* Get
|
|
846
|
+
* Get node counts.
|
|
1112
847
|
*/
|
|
1113
|
-
|
|
848
|
+
getNodeCount(): {
|
|
849
|
+
llmCalls: number;
|
|
850
|
+
gadgets: number;
|
|
851
|
+
};
|
|
1114
852
|
/**
|
|
1115
|
-
*
|
|
853
|
+
* Subscribe to events of a specific type.
|
|
854
|
+
* Returns unsubscribe function.
|
|
855
|
+
*
|
|
856
|
+
* @param type - Event type to subscribe to (use "*" for all events)
|
|
857
|
+
* @param listener - Callback function that receives matching events
|
|
858
|
+
* @returns Unsubscribe function
|
|
859
|
+
*
|
|
860
|
+
* @example
|
|
861
|
+
* ```typescript
|
|
862
|
+
* const unsubscribe = tree.on("gadget_complete", (event) => {
|
|
863
|
+
* if (event.type === "gadget_complete") {
|
|
864
|
+
* console.log(`Gadget ${event.name} completed`);
|
|
865
|
+
* }
|
|
866
|
+
* });
|
|
867
|
+
* ```
|
|
1116
868
|
*/
|
|
1117
|
-
|
|
869
|
+
on(type: ExecutionEventType, listener: EventListener): () => void;
|
|
1118
870
|
/**
|
|
1119
|
-
*
|
|
871
|
+
* Subscribe to all events.
|
|
1120
872
|
*/
|
|
1121
|
-
|
|
873
|
+
onAll(listener: EventListener): () => void;
|
|
1122
874
|
/**
|
|
1123
|
-
* Get
|
|
875
|
+
* Get async iterable of all events.
|
|
876
|
+
* Events are yielded as they occur.
|
|
1124
877
|
*/
|
|
1125
|
-
|
|
878
|
+
events(): AsyncGenerator<ExecutionEvent>;
|
|
1126
879
|
/**
|
|
1127
|
-
*
|
|
880
|
+
* Mark the tree as complete (no more events will be emitted).
|
|
1128
881
|
*/
|
|
1129
|
-
|
|
882
|
+
complete(): void;
|
|
1130
883
|
/**
|
|
1131
|
-
*
|
|
884
|
+
* Check if the tree is complete.
|
|
1132
885
|
*/
|
|
1133
|
-
|
|
886
|
+
isComplete(): boolean;
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
/**
|
|
890
|
+
* Function-based gadget creation helper.
|
|
891
|
+
*
|
|
892
|
+
* For simple gadgets, use createGadget() instead of defining a class.
|
|
893
|
+
* Parameters are automatically typed from the Zod schema.
|
|
894
|
+
*
|
|
895
|
+
* @example
|
|
896
|
+
* ```typescript
|
|
897
|
+
* const calculator = createGadget({
|
|
898
|
+
* description: "Performs arithmetic operations",
|
|
899
|
+
* schema: z.object({
|
|
900
|
+
* operation: z.enum(["add", "subtract"]),
|
|
901
|
+
* a: z.number(),
|
|
902
|
+
* b: z.number(),
|
|
903
|
+
* }),
|
|
904
|
+
* execute: ({ operation, a, b }) => {
|
|
905
|
+
* // Automatically typed!
|
|
906
|
+
* return operation === "add" ? String(a + b) : String(a - b);
|
|
907
|
+
* },
|
|
908
|
+
* });
|
|
909
|
+
* ```
|
|
910
|
+
*/
|
|
911
|
+
|
|
912
|
+
/**
|
|
913
|
+
* Infer the TypeScript type from a Zod schema.
|
|
914
|
+
*/
|
|
915
|
+
type InferSchema$1<T> = T extends ZodType<infer U> ? U : never;
|
|
916
|
+
/**
|
|
917
|
+
* Configuration for creating a function-based gadget.
|
|
918
|
+
*/
|
|
919
|
+
interface CreateGadgetConfig<TSchema extends ZodType> {
|
|
920
|
+
/** Optional custom name (defaults to "FunctionGadget") */
|
|
921
|
+
name?: string;
|
|
922
|
+
/** Human-readable description of what the gadget does */
|
|
923
|
+
description: string;
|
|
924
|
+
/** Zod schema for parameter validation */
|
|
925
|
+
schema: TSchema;
|
|
1134
926
|
/**
|
|
1135
|
-
*
|
|
927
|
+
* Execution function with typed parameters.
|
|
928
|
+
* Can return string or { result, cost? }.
|
|
929
|
+
* Optionally receives ExecutionContext for callback-based cost reporting.
|
|
1136
930
|
*/
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
931
|
+
execute: (params: InferSchema$1<TSchema>, ctx?: ExecutionContext) => GadgetExecuteReturn | Promise<GadgetExecuteReturn>;
|
|
932
|
+
/** Optional timeout in milliseconds */
|
|
933
|
+
timeoutMs?: number;
|
|
934
|
+
/** Optional usage examples to help LLMs understand proper invocation */
|
|
935
|
+
examples?: GadgetExample<InferSchema$1<TSchema>>[];
|
|
936
|
+
}
|
|
937
|
+
/**
|
|
938
|
+
* Creates a gadget from a function (simpler than class-based approach).
|
|
939
|
+
*
|
|
940
|
+
* This is perfect for simple gadgets where you don't need the full
|
|
941
|
+
* power of a class. Parameters are automatically typed from the schema.
|
|
942
|
+
*
|
|
943
|
+
* @param config - Configuration with execute function and schema
|
|
944
|
+
* @returns Gadget instance ready to be registered
|
|
945
|
+
*
|
|
946
|
+
* @example
|
|
947
|
+
* ```typescript
|
|
948
|
+
* import { z } from 'zod';
|
|
949
|
+
* import { createGadget } from 'llmist';
|
|
950
|
+
*
|
|
951
|
+
* // Simple calculator gadget
|
|
952
|
+
* const calculator = createGadget({
|
|
953
|
+
* description: "Performs arithmetic operations",
|
|
954
|
+
* schema: z.object({
|
|
955
|
+
* operation: z.enum(["add", "subtract", "multiply", "divide"]),
|
|
956
|
+
* a: z.number().describe("First number"),
|
|
957
|
+
* b: z.number().describe("Second number"),
|
|
958
|
+
* }),
|
|
959
|
+
* execute: ({ operation, a, b }) => {
|
|
960
|
+
* // Parameters are automatically typed!
|
|
961
|
+
* switch (operation) {
|
|
962
|
+
* case "add": return String(a + b);
|
|
963
|
+
* case "subtract": return String(a - b);
|
|
964
|
+
* case "multiply": return String(a * b);
|
|
965
|
+
* case "divide": return String(a / b);
|
|
966
|
+
* }
|
|
967
|
+
* },
|
|
968
|
+
* });
|
|
969
|
+
* ```
|
|
970
|
+
*
|
|
971
|
+
* @example
|
|
972
|
+
* ```typescript
|
|
973
|
+
* // Async gadget with custom name and timeout
|
|
974
|
+
* const weather = createGadget({
|
|
975
|
+
* name: "weather",
|
|
976
|
+
* description: "Fetches current weather for a city",
|
|
977
|
+
* schema: z.object({
|
|
978
|
+
* city: z.string().min(1).describe("City name"),
|
|
979
|
+
* }),
|
|
980
|
+
* timeoutMs: 10000,
|
|
981
|
+
* execute: async ({ city }) => {
|
|
982
|
+
* const response = await fetch(`https://api.weather.com/${city}`);
|
|
983
|
+
* const data = await response.json();
|
|
984
|
+
* return `Weather in ${city}: ${data.description}, ${data.temp}°C`;
|
|
985
|
+
* },
|
|
986
|
+
* });
|
|
987
|
+
* ```
|
|
988
|
+
*
|
|
989
|
+
* @example
|
|
990
|
+
* ```typescript
|
|
991
|
+
* // Use with agent
|
|
992
|
+
* const agent = LLMist.createAgent()
|
|
993
|
+
* .withGadgets(calculator, weather)
|
|
994
|
+
* .ask("What's the weather in Paris and what's 10 + 5?");
|
|
995
|
+
* ```
|
|
996
|
+
*/
|
|
997
|
+
declare function createGadget<TSchema extends ZodType>(config: CreateGadgetConfig<TSchema>): AbstractGadget;
|
|
998
|
+
|
|
999
|
+
/**
|
|
1000
|
+
* Type-safe gadget factory with automatic parameter inference.
|
|
1001
|
+
*
|
|
1002
|
+
* Gadget eliminates the need for manual type assertions
|
|
1003
|
+
* by automatically inferring parameter types from the Zod schema.
|
|
1004
|
+
*
|
|
1005
|
+
* @example
|
|
1006
|
+
* ```typescript
|
|
1007
|
+
* class Calculator extends Gadget({
|
|
1008
|
+
* description: "Performs arithmetic operations",
|
|
1009
|
+
* schema: z.object({
|
|
1010
|
+
* operation: z.enum(["add", "subtract"]),
|
|
1011
|
+
* a: z.number(),
|
|
1012
|
+
* b: z.number(),
|
|
1013
|
+
* }),
|
|
1014
|
+
* }) {
|
|
1015
|
+
* // ✨ params is automatically typed!
|
|
1016
|
+
* execute(params: this['params']): string {
|
|
1017
|
+
* const { operation, a, b } = params; // All typed!
|
|
1018
|
+
* return operation === "add" ? String(a + b) : String(a - b);
|
|
1019
|
+
* }
|
|
1020
|
+
* }
|
|
1021
|
+
* ```
|
|
1022
|
+
*/
|
|
1023
|
+
|
|
1024
|
+
/**
|
|
1025
|
+
* Infer the TypeScript type from a Zod schema.
|
|
1026
|
+
*/
|
|
1027
|
+
type InferSchema<T> = T extends ZodType<infer U> ? U : never;
|
|
1028
|
+
/**
|
|
1029
|
+
* Configuration for creating a typed gadget.
|
|
1030
|
+
*/
|
|
1031
|
+
interface GadgetConfig<TSchema extends ZodType> {
|
|
1032
|
+
/** Human-readable description of what the gadget does */
|
|
1033
|
+
description: string;
|
|
1034
|
+
/** Zod schema for parameter validation */
|
|
1035
|
+
schema: TSchema;
|
|
1036
|
+
/** Optional custom name (defaults to class name) */
|
|
1037
|
+
name?: string;
|
|
1038
|
+
/** Optional timeout in milliseconds */
|
|
1039
|
+
timeoutMs?: number;
|
|
1040
|
+
/** Optional usage examples to help LLMs understand proper invocation */
|
|
1041
|
+
examples?: GadgetExample<InferSchema<TSchema>>[];
|
|
1042
|
+
}
|
|
1043
|
+
/**
|
|
1044
|
+
* Factory function to create a typed gadget base class.
|
|
1045
|
+
*
|
|
1046
|
+
* The returned class automatically infers parameter types from the Zod schema,
|
|
1047
|
+
* eliminating the need for manual type assertions in the execute method.
|
|
1048
|
+
*
|
|
1049
|
+
* @param config - Configuration with description and schema
|
|
1050
|
+
* @returns Base class to extend with typed execute method
|
|
1051
|
+
*
|
|
1052
|
+
* @example
|
|
1053
|
+
* ```typescript
|
|
1054
|
+
* import { z } from 'zod';
|
|
1055
|
+
* import { Gadget } from 'llmist';
|
|
1056
|
+
*
|
|
1057
|
+
* class Calculator extends Gadget({
|
|
1058
|
+
* description: "Performs arithmetic operations",
|
|
1059
|
+
* schema: z.object({
|
|
1060
|
+
* operation: z.enum(["add", "subtract", "multiply", "divide"]),
|
|
1061
|
+
* a: z.number().describe("First number"),
|
|
1062
|
+
* b: z.number().describe("Second number"),
|
|
1063
|
+
* }),
|
|
1064
|
+
* }) {
|
|
1065
|
+
* execute(params: this['params']): string {
|
|
1066
|
+
* // params is automatically typed as:
|
|
1067
|
+
* // { operation: "add" | "subtract" | "multiply" | "divide"; a: number; b: number }
|
|
1068
|
+
* const { operation, a, b } = params;
|
|
1069
|
+
*
|
|
1070
|
+
* switch (operation) {
|
|
1071
|
+
* case "add": return String(a + b);
|
|
1072
|
+
* case "subtract": return String(a - b);
|
|
1073
|
+
* case "multiply": return String(a * b);
|
|
1074
|
+
* case "divide": return String(a / b);
|
|
1075
|
+
* }
|
|
1076
|
+
* }
|
|
1077
|
+
* }
|
|
1078
|
+
* ```
|
|
1079
|
+
*
|
|
1080
|
+
* @example
|
|
1081
|
+
* ```typescript
|
|
1082
|
+
* // With async execution
|
|
1083
|
+
* class WeatherGadget extends Gadget({
|
|
1084
|
+
* description: "Fetches weather for a city",
|
|
1085
|
+
* schema: z.object({
|
|
1086
|
+
* city: z.string().min(1).describe("City name"),
|
|
1087
|
+
* }),
|
|
1088
|
+
* timeoutMs: 10000,
|
|
1089
|
+
* }) {
|
|
1090
|
+
* async execute(params: this['params']): Promise<string> {
|
|
1091
|
+
* const { city } = params; // Automatically typed as { city: string }
|
|
1092
|
+
* const weather = await fetchWeather(city);
|
|
1093
|
+
* return `Weather in ${city}: ${weather}`;
|
|
1094
|
+
* }
|
|
1095
|
+
* }
|
|
1096
|
+
* ```
|
|
1097
|
+
*/
|
|
1098
|
+
declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>): {
|
|
1099
|
+
new (): {
|
|
1100
|
+
description: string;
|
|
1101
|
+
parameterSchema: TSchema;
|
|
1102
|
+
name: string | undefined;
|
|
1103
|
+
timeoutMs: number | undefined;
|
|
1104
|
+
examples: GadgetExample<InferSchema<TSchema>>[] | undefined;
|
|
1105
|
+
/**
|
|
1106
|
+
* Type helper property for accessing inferred parameter type.
|
|
1107
|
+
* This is used in the execute method signature: `execute(params: this['params'])`
|
|
1108
|
+
*
|
|
1109
|
+
* Note: This is just for type inference - the actual params in execute()
|
|
1110
|
+
* will be Record<string, unknown> which you can safely cast to this['params']
|
|
1111
|
+
*/
|
|
1112
|
+
readonly params: InferSchema<TSchema>;
|
|
1113
|
+
/**
|
|
1114
|
+
* Execute the gadget. Subclasses should cast params to this['params'].
|
|
1115
|
+
*
|
|
1116
|
+
* @param params - Validated parameters from the LLM
|
|
1117
|
+
* @param ctx - Optional execution context for cost reporting and LLM access
|
|
1118
|
+
* @returns Result as a string, or an object with result and optional cost
|
|
1119
|
+
*
|
|
1120
|
+
* @example
|
|
1121
|
+
* ```typescript
|
|
1122
|
+
* // Simple string return (free gadget)
|
|
1123
|
+
* execute(params: this['params']) {
|
|
1124
|
+
* return String(params.a + params.b);
|
|
1125
|
+
* }
|
|
1126
|
+
*
|
|
1127
|
+
* // Using context for callback-based cost reporting
|
|
1128
|
+
* execute(params: this['params'], ctx) {
|
|
1129
|
+
* ctx.reportCost(0.001);
|
|
1130
|
+
* return "result";
|
|
1131
|
+
* }
|
|
1132
|
+
*
|
|
1133
|
+
* // Using wrapped LLMist for automatic cost tracking
|
|
1134
|
+
* async execute(params: this['params'], ctx) {
|
|
1135
|
+
* return ctx.llmist.complete('Summarize: ' + params.text);
|
|
1136
|
+
* }
|
|
1137
|
+
* ```
|
|
1138
|
+
*/
|
|
1139
|
+
execute(params: Record<string, unknown>, ctx?: ExecutionContext): GadgetExecuteReturn | Promise<GadgetExecuteReturn>;
|
|
1140
|
+
throwIfAborted(ctx?: ExecutionContext): void;
|
|
1141
|
+
onAbort(ctx: ExecutionContext | undefined, cleanup: () => void | Promise<void>): void;
|
|
1142
|
+
createLinkedAbortController(ctx?: ExecutionContext): AbortController;
|
|
1143
|
+
get instruction(): string;
|
|
1144
|
+
getInstruction(optionsOrArgPrefix?: string | {
|
|
1145
|
+
argPrefix?: string;
|
|
1146
|
+
startPrefix?: string;
|
|
1147
|
+
endPrefix?: string;
|
|
1148
|
+
}): string;
|
|
1149
|
+
} & {
|
|
1150
|
+
params: InferSchema<TSchema>;
|
|
1151
|
+
};
|
|
1152
|
+
};
|
|
1153
|
+
|
|
1154
|
+
/**
|
|
1155
|
+
* Model Catalog Types
|
|
1156
|
+
*
|
|
1157
|
+
* Type definitions for LLM model specifications including
|
|
1158
|
+
* context windows, pricing, features, and capabilities.
|
|
1159
|
+
*/
|
|
1160
|
+
interface ModelPricing {
|
|
1161
|
+
/** Price per 1 million input tokens in USD */
|
|
1162
|
+
input: number;
|
|
1163
|
+
/** Price per 1 million output tokens in USD */
|
|
1164
|
+
output: number;
|
|
1165
|
+
/** Price per 1 million cached input tokens in USD (if supported) */
|
|
1166
|
+
cachedInput?: number;
|
|
1167
|
+
/** Price per 1 million cache write tokens in USD (Anthropic: 1.25x input price) */
|
|
1168
|
+
cacheWriteInput?: number;
|
|
1169
|
+
}
|
|
1170
|
+
interface ModelFeatures {
|
|
1171
|
+
/** Supports streaming responses */
|
|
1172
|
+
streaming: boolean;
|
|
1173
|
+
/** Supports function/tool calling */
|
|
1174
|
+
functionCalling: boolean;
|
|
1175
|
+
/** Supports vision/image input */
|
|
1176
|
+
vision: boolean;
|
|
1177
|
+
/** Supports extended thinking/reasoning */
|
|
1178
|
+
reasoning?: boolean;
|
|
1179
|
+
/** Supports structured outputs */
|
|
1180
|
+
structuredOutputs?: boolean;
|
|
1181
|
+
/** Supports fine-tuning */
|
|
1182
|
+
fineTuning?: boolean;
|
|
1183
|
+
}
|
|
1184
|
+
interface ModelSpec {
|
|
1185
|
+
/** Provider identifier (e.g., 'openai', 'anthropic', 'gemini') */
|
|
1186
|
+
provider: string;
|
|
1187
|
+
/** Full model identifier used in API calls */
|
|
1188
|
+
modelId: string;
|
|
1189
|
+
/** Human-readable display name */
|
|
1190
|
+
displayName: string;
|
|
1191
|
+
/** Maximum context window size in tokens */
|
|
1192
|
+
contextWindow: number;
|
|
1193
|
+
/** Maximum output tokens per request */
|
|
1194
|
+
maxOutputTokens: number;
|
|
1195
|
+
/** Pricing per 1M tokens */
|
|
1196
|
+
pricing: ModelPricing;
|
|
1197
|
+
/** Training data knowledge cutoff date (YYYY-MM-DD or description) */
|
|
1198
|
+
knowledgeCutoff: string;
|
|
1199
|
+
/** Supported features and capabilities */
|
|
1200
|
+
features: ModelFeatures;
|
|
1201
|
+
/** Additional metadata */
|
|
1202
|
+
metadata?: {
|
|
1203
|
+
/** Model family/series */
|
|
1204
|
+
family?: string;
|
|
1205
|
+
/** Release date */
|
|
1206
|
+
releaseDate?: string;
|
|
1207
|
+
/** Deprecation date if applicable */
|
|
1208
|
+
deprecationDate?: string;
|
|
1209
|
+
/** Notes or special information */
|
|
1210
|
+
notes?: string;
|
|
1211
|
+
/** Whether manual temperature configuration is supported (defaults to true) */
|
|
1212
|
+
supportsTemperature?: boolean;
|
|
1213
|
+
};
|
|
1214
|
+
}
|
|
1215
|
+
interface ModelLimits {
|
|
1216
|
+
contextWindow: number;
|
|
1217
|
+
maxOutputTokens: number;
|
|
1218
|
+
}
|
|
1219
|
+
interface CostEstimate {
|
|
1220
|
+
inputCost: number;
|
|
1221
|
+
/** Cost for cached input tokens (already included in inputCost calculation) */
|
|
1222
|
+
cachedInputCost: number;
|
|
1223
|
+
/** Cost for cache creation tokens (already included in inputCost calculation, Anthropic only) */
|
|
1224
|
+
cacheCreationCost: number;
|
|
1225
|
+
outputCost: number;
|
|
1226
|
+
totalCost: number;
|
|
1227
|
+
currency: "USD";
|
|
1228
|
+
}
|
|
1229
|
+
|
|
1230
|
+
/**
|
|
1231
|
+
* Strategy interface for context compaction.
|
|
1232
|
+
*
|
|
1233
|
+
* Strategies define how conversation history is compressed to fit within
|
|
1234
|
+
* context window limits. Different strategies trade off between:
|
|
1235
|
+
* - Speed (LLM calls vs local processing)
|
|
1236
|
+
* - Context preservation (summary quality vs simple truncation)
|
|
1237
|
+
* - Cost (summarization model usage)
|
|
1238
|
+
*/
|
|
1239
|
+
|
|
1240
|
+
/**
|
|
1241
|
+
* Context provided to compaction strategies.
|
|
1242
|
+
*/
|
|
1243
|
+
interface CompactionContext {
|
|
1244
|
+
/** Current token count of the conversation */
|
|
1245
|
+
currentTokens: number;
|
|
1246
|
+
/** Target token count after compaction */
|
|
1247
|
+
targetTokens: number;
|
|
1248
|
+
/** Model's context window limits */
|
|
1249
|
+
modelLimits: ModelLimits;
|
|
1250
|
+
/** LLMist client for summarization calls */
|
|
1251
|
+
client: LLMist;
|
|
1252
|
+
/** Model identifier for token counting and summarization */
|
|
1253
|
+
model: string;
|
|
1254
|
+
}
|
|
1255
|
+
/**
|
|
1256
|
+
* Result of a compaction operation.
|
|
1257
|
+
*/
|
|
1258
|
+
interface CompactionResult {
|
|
1259
|
+
/** Compacted messages to replace history with */
|
|
1260
|
+
messages: LLMMessage[];
|
|
1261
|
+
/** Summary text if summarization was used */
|
|
1262
|
+
summary?: string;
|
|
1263
|
+
/** The name of the strategy that was ultimately executed */
|
|
1264
|
+
strategyName: string;
|
|
1265
|
+
/** Metadata about the compaction */
|
|
1266
|
+
metadata: {
|
|
1267
|
+
/** Number of messages before compaction */
|
|
1268
|
+
originalCount: number;
|
|
1269
|
+
/** Number of messages after compaction */
|
|
1270
|
+
compactedCount: number;
|
|
1271
|
+
/** Estimated tokens before compaction */
|
|
1272
|
+
tokensBefore: number;
|
|
1273
|
+
/** Estimated tokens after compaction */
|
|
1274
|
+
tokensAfter: number;
|
|
1141
1275
|
};
|
|
1276
|
+
}
|
|
1277
|
+
/**
|
|
1278
|
+
* Interface for compaction strategy implementations.
|
|
1279
|
+
*
|
|
1280
|
+
* Strategies receive the conversation history (excluding base messages like
|
|
1281
|
+
* system prompt and gadget instructions) and must return a compacted version.
|
|
1282
|
+
*
|
|
1283
|
+
* @example
|
|
1284
|
+
* ```typescript
|
|
1285
|
+
* class MyCustomStrategy implements CompactionStrategy {
|
|
1286
|
+
* readonly name = 'my-custom';
|
|
1287
|
+
*
|
|
1288
|
+
* async compact(
|
|
1289
|
+
* messages: LLMMessage[],
|
|
1290
|
+
* config: ResolvedCompactionConfig,
|
|
1291
|
+
* context: CompactionContext
|
|
1292
|
+
* ): Promise<CompactionResult> {
|
|
1293
|
+
* // Custom compaction logic
|
|
1294
|
+
* return {
|
|
1295
|
+
* messages: compactedMessages,
|
|
1296
|
+
* metadata: { ... }
|
|
1297
|
+
* };
|
|
1298
|
+
* }
|
|
1299
|
+
* }
|
|
1300
|
+
* ```
|
|
1301
|
+
*/
|
|
1302
|
+
interface CompactionStrategy {
|
|
1303
|
+
/** Human-readable name of the strategy */
|
|
1304
|
+
readonly name: string;
|
|
1142
1305
|
/**
|
|
1143
|
-
*
|
|
1306
|
+
* Compact the given messages to fit within target token count.
|
|
1307
|
+
*
|
|
1308
|
+
* @param messages - Conversation history messages (excludes system/gadget base)
|
|
1309
|
+
* @param config - Resolved compaction configuration
|
|
1310
|
+
* @param context - Context including token counts and LLM client
|
|
1311
|
+
* @returns Compacted messages with metadata
|
|
1144
1312
|
*/
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1313
|
+
compact(messages: LLMMessage[], config: ResolvedCompactionConfig, context: CompactionContext): Promise<CompactionResult>;
|
|
1314
|
+
}
|
|
1315
|
+
/**
|
|
1316
|
+
* Utility to group messages into logical conversation turns.
|
|
1317
|
+
*
|
|
1318
|
+
* A "turn" is typically a user message followed by an assistant response.
|
|
1319
|
+
* Gadget calls are grouped with the preceding assistant message.
|
|
1320
|
+
*/
|
|
1321
|
+
interface MessageTurn {
|
|
1322
|
+
/** Messages in this turn (user + assistant + any gadget results) */
|
|
1323
|
+
messages: LLMMessage[];
|
|
1324
|
+
/** Estimated token count for this turn */
|
|
1325
|
+
tokenEstimate: number;
|
|
1326
|
+
}
|
|
1327
|
+
|
|
1328
|
+
/**
|
|
1329
|
+
* Configuration types for the context compaction system.
|
|
1330
|
+
*
|
|
1331
|
+
* Context compaction automatically manages conversation history to prevent
|
|
1332
|
+
* context window overflow in long-running agent conversations.
|
|
1333
|
+
*/
|
|
1334
|
+
|
|
1335
|
+
/**
|
|
1336
|
+
* Event emitted when compaction occurs.
|
|
1337
|
+
* This is included in StreamEvent for UI visibility.
|
|
1338
|
+
*/
|
|
1339
|
+
interface CompactionEvent {
|
|
1340
|
+
/** The strategy that performed the compaction */
|
|
1341
|
+
strategy: string;
|
|
1342
|
+
/** Token count before compaction */
|
|
1343
|
+
tokensBefore: number;
|
|
1344
|
+
/** Token count after compaction */
|
|
1345
|
+
tokensAfter: number;
|
|
1346
|
+
/** Number of messages before compaction */
|
|
1347
|
+
messagesBefore: number;
|
|
1348
|
+
/** Number of messages after compaction */
|
|
1349
|
+
messagesAfter: number;
|
|
1350
|
+
/** Summary text if summarization was used */
|
|
1351
|
+
summary?: string;
|
|
1352
|
+
/** Agent iteration when compaction occurred */
|
|
1353
|
+
iteration: number;
|
|
1354
|
+
}
|
|
1355
|
+
/**
|
|
1356
|
+
* Statistics about compaction activity.
|
|
1357
|
+
*/
|
|
1358
|
+
interface CompactionStats {
|
|
1359
|
+
/** Total number of compactions performed */
|
|
1360
|
+
totalCompactions: number;
|
|
1361
|
+
/** Total tokens saved across all compactions */
|
|
1362
|
+
totalTokensSaved: number;
|
|
1363
|
+
/** Current context usage */
|
|
1364
|
+
currentUsage: {
|
|
1365
|
+
tokens: number;
|
|
1366
|
+
percent: number;
|
|
1149
1367
|
};
|
|
1368
|
+
/** Model's context window size */
|
|
1369
|
+
contextWindow: number;
|
|
1370
|
+
}
|
|
1371
|
+
/**
|
|
1372
|
+
* Configuration for the context compaction system.
|
|
1373
|
+
*
|
|
1374
|
+
* @example
|
|
1375
|
+
* ```typescript
|
|
1376
|
+
* // Custom configuration
|
|
1377
|
+
* const agent = await LLMist.createAgent()
|
|
1378
|
+
* .withModel('sonnet')
|
|
1379
|
+
* .withCompaction({
|
|
1380
|
+
* triggerThresholdPercent: 70,
|
|
1381
|
+
* targetPercent: 40,
|
|
1382
|
+
* preserveRecentTurns: 10,
|
|
1383
|
+
* })
|
|
1384
|
+
* .ask('...');
|
|
1385
|
+
*
|
|
1386
|
+
* // Disable compaction
|
|
1387
|
+
* const agent = await LLMist.createAgent()
|
|
1388
|
+
* .withModel('sonnet')
|
|
1389
|
+
* .withoutCompaction()
|
|
1390
|
+
* .ask('...');
|
|
1391
|
+
* ```
|
|
1392
|
+
*/
|
|
1393
|
+
interface CompactionConfig {
|
|
1150
1394
|
/**
|
|
1151
|
-
*
|
|
1395
|
+
* Enable or disable compaction.
|
|
1396
|
+
* @default true
|
|
1152
1397
|
*/
|
|
1153
|
-
|
|
1398
|
+
enabled?: boolean;
|
|
1154
1399
|
/**
|
|
1155
|
-
*
|
|
1400
|
+
* The compaction strategy to use.
|
|
1401
|
+
* - 'sliding-window': Fast, drops oldest turns (no LLM call)
|
|
1402
|
+
* - 'summarization': LLM-based compression of old messages
|
|
1403
|
+
* - 'hybrid': Summarizes old messages + keeps recent turns (recommended)
|
|
1404
|
+
* - Or provide a custom CompactionStrategy instance
|
|
1405
|
+
* @default 'hybrid'
|
|
1156
1406
|
*/
|
|
1157
|
-
|
|
1407
|
+
strategy?: "sliding-window" | "summarization" | "hybrid" | CompactionStrategy;
|
|
1158
1408
|
/**
|
|
1159
|
-
*
|
|
1409
|
+
* Context usage percentage that triggers compaction.
|
|
1410
|
+
* When token count exceeds this percentage of the context window,
|
|
1411
|
+
* compaction is performed before the next LLM call.
|
|
1412
|
+
* @default 80
|
|
1160
1413
|
*/
|
|
1161
|
-
|
|
1162
|
-
llmCalls: number;
|
|
1163
|
-
gadgets: number;
|
|
1164
|
-
};
|
|
1414
|
+
triggerThresholdPercent?: number;
|
|
1165
1415
|
/**
|
|
1166
|
-
*
|
|
1167
|
-
*
|
|
1168
|
-
*
|
|
1169
|
-
* @param type - Event type to subscribe to (use "*" for all events)
|
|
1170
|
-
* @param listener - Callback function that receives matching events
|
|
1171
|
-
* @returns Unsubscribe function
|
|
1172
|
-
*
|
|
1173
|
-
* @example
|
|
1174
|
-
* ```typescript
|
|
1175
|
-
* const unsubscribe = tree.on("gadget_complete", (event) => {
|
|
1176
|
-
* if (event.type === "gadget_complete") {
|
|
1177
|
-
* console.log(`Gadget ${event.name} completed`);
|
|
1178
|
-
* }
|
|
1179
|
-
* });
|
|
1180
|
-
* ```
|
|
1416
|
+
* Target context usage percentage after compaction.
|
|
1417
|
+
* The compaction will aim to reduce tokens to this percentage.
|
|
1418
|
+
* @default 50
|
|
1181
1419
|
*/
|
|
1182
|
-
|
|
1420
|
+
targetPercent?: number;
|
|
1183
1421
|
/**
|
|
1184
|
-
*
|
|
1422
|
+
* Number of recent turns to preserve during compaction.
|
|
1423
|
+
* A "turn" is a user message + assistant response pair.
|
|
1424
|
+
* Recent turns are kept verbatim while older ones are summarized/dropped.
|
|
1425
|
+
* @default 5
|
|
1185
1426
|
*/
|
|
1186
|
-
|
|
1427
|
+
preserveRecentTurns?: number;
|
|
1187
1428
|
/**
|
|
1188
|
-
*
|
|
1189
|
-
*
|
|
1429
|
+
* Model to use for summarization.
|
|
1430
|
+
* If not specified, uses the agent's model.
|
|
1431
|
+
* @default undefined (uses agent's model)
|
|
1190
1432
|
*/
|
|
1191
|
-
|
|
1433
|
+
summarizationModel?: string;
|
|
1192
1434
|
/**
|
|
1193
|
-
*
|
|
1435
|
+
* Custom system prompt for summarization.
|
|
1436
|
+
* If not specified, uses a default prompt optimized for context preservation.
|
|
1194
1437
|
*/
|
|
1195
|
-
|
|
1438
|
+
summarizationPrompt?: string;
|
|
1196
1439
|
/**
|
|
1197
|
-
*
|
|
1440
|
+
* Callback invoked when compaction occurs.
|
|
1441
|
+
* Useful for logging or analytics.
|
|
1198
1442
|
*/
|
|
1199
|
-
|
|
1443
|
+
onCompaction?: (event: CompactionEvent) => void;
|
|
1444
|
+
}
|
|
1445
|
+
/**
|
|
1446
|
+
* Default configuration values for compaction.
|
|
1447
|
+
* Compaction is enabled by default with the hybrid strategy.
|
|
1448
|
+
*/
|
|
1449
|
+
declare const DEFAULT_COMPACTION_CONFIG: Required<Omit<CompactionConfig, "summarizationModel" | "summarizationPrompt" | "onCompaction">>;
|
|
1450
|
+
/**
|
|
1451
|
+
* Default prompt used for summarization strategy.
|
|
1452
|
+
*/
|
|
1453
|
+
declare const DEFAULT_SUMMARIZATION_PROMPT = "Summarize this conversation history concisely, preserving:\n1. Key decisions made and their rationale\n2. Important facts and data discovered\n3. Errors encountered and how they were resolved\n4. Current task context and goals\n\nFormat as a brief narrative paragraph, not bullet points.\nPrevious conversation:";
|
|
1454
|
+
/**
|
|
1455
|
+
* Resolved configuration with all defaults applied.
|
|
1456
|
+
*/
|
|
1457
|
+
interface ResolvedCompactionConfig {
|
|
1458
|
+
enabled: boolean;
|
|
1459
|
+
strategy: "sliding-window" | "summarization" | "hybrid";
|
|
1460
|
+
triggerThresholdPercent: number;
|
|
1461
|
+
targetPercent: number;
|
|
1462
|
+
preserveRecentTurns: number;
|
|
1463
|
+
summarizationModel?: string;
|
|
1464
|
+
summarizationPrompt: string;
|
|
1465
|
+
onCompaction?: (event: CompactionEvent) => void;
|
|
1200
1466
|
}
|
|
1201
1467
|
|
|
1202
1468
|
/**
|
|
@@ -1258,7 +1524,8 @@ declare class ModelRegistry {
|
|
|
1258
1524
|
registerModels(specs: ModelSpec[]): void;
|
|
1259
1525
|
/**
|
|
1260
1526
|
* Get model specification by model ID
|
|
1261
|
-
* @param modelId - Full model identifier
|
|
1527
|
+
* @param modelId - Full model identifier, optionally with provider prefix
|
|
1528
|
+
* (e.g., 'gpt-5', 'claude-sonnet-4-5-20250929', 'anthropic:claude-sonnet-4-5')
|
|
1262
1529
|
* @returns ModelSpec if found, undefined otherwise
|
|
1263
1530
|
*/
|
|
1264
1531
|
getModelSpec(modelId: string): ModelSpec | undefined;
|
|
@@ -1610,7 +1877,7 @@ type StreamEvent = {
|
|
|
1610
1877
|
invocationId: string;
|
|
1611
1878
|
} | {
|
|
1612
1879
|
type: "compaction";
|
|
1613
|
-
event: CompactionEvent
|
|
1880
|
+
event: CompactionEvent;
|
|
1614
1881
|
} | SubagentStreamEvent | StreamCompletionEvent;
|
|
1615
1882
|
/** Event for forwarding subagent activity through the stream */
|
|
1616
1883
|
interface SubagentStreamEvent {
|
|
@@ -2110,6 +2377,73 @@ interface ExecutionContext {
|
|
|
2110
2377
|
* ```
|
|
2111
2378
|
*/
|
|
2112
2379
|
depth?: number;
|
|
2380
|
+
/**
|
|
2381
|
+
* Host llmist exports for external gadgets.
|
|
2382
|
+
*
|
|
2383
|
+
* External gadgets MUST use these instead of importing from 'llmist'
|
|
2384
|
+
* to ensure they use the same version as the host CLI, enabling proper
|
|
2385
|
+
* tree sharing and feature compatibility.
|
|
2386
|
+
*
|
|
2387
|
+
* Use the `getHostExports(ctx)` helper function to access these exports
|
|
2388
|
+
* with proper error handling.
|
|
2389
|
+
*
|
|
2390
|
+
* @example
|
|
2391
|
+
* ```typescript
|
|
2392
|
+
* import { getHostExports, Gadget, z } from 'llmist';
|
|
2393
|
+
*
|
|
2394
|
+
* class BrowseWeb extends Gadget({...}) {
|
|
2395
|
+
* async execute(params, ctx) {
|
|
2396
|
+
* const { AgentBuilder } = getHostExports(ctx);
|
|
2397
|
+
* const agent = new AgentBuilder()
|
|
2398
|
+
* .withParentContext(ctx)
|
|
2399
|
+
* .ask(params.task);
|
|
2400
|
+
* }
|
|
2401
|
+
* }
|
|
2402
|
+
* ```
|
|
2403
|
+
*/
|
|
2404
|
+
hostExports?: HostExports;
|
|
2405
|
+
/**
|
|
2406
|
+
* Logger instance for structured logging.
|
|
2407
|
+
*
|
|
2408
|
+
* External gadgets should use this for logging instead of importing
|
|
2409
|
+
* defaultLogger directly. This ensures logs respect the CLI's configured
|
|
2410
|
+
* log level, format, and destination (file/console).
|
|
2411
|
+
*
|
|
2412
|
+
* The logger is optional to support standalone gadget execution and testing.
|
|
2413
|
+
* Use optional chaining when logging: `ctx.logger?.debug(...)`.
|
|
2414
|
+
*
|
|
2415
|
+
* @example
|
|
2416
|
+
* ```typescript
|
|
2417
|
+
* execute: async (params, ctx) => {
|
|
2418
|
+
* ctx.logger?.debug("[MyGadget] Starting operation", { itemId: params.id });
|
|
2419
|
+
* // ... do work ...
|
|
2420
|
+
* ctx.logger?.info("[MyGadget] Completed successfully");
|
|
2421
|
+
* return "done";
|
|
2422
|
+
* }
|
|
2423
|
+
* ```
|
|
2424
|
+
*/
|
|
2425
|
+
logger?: Logger<ILogObj>;
|
|
2426
|
+
}
|
|
2427
|
+
/**
|
|
2428
|
+
* Host llmist exports provided to external gadgets via ExecutionContext.
|
|
2429
|
+
*
|
|
2430
|
+
* This ensures external gadgets use the same class instances as the host CLI,
|
|
2431
|
+
* enabling proper tree sharing and avoiding the "dual-package problem" where
|
|
2432
|
+
* different versions of llmist have incompatible classes.
|
|
2433
|
+
*/
|
|
2434
|
+
interface HostExports {
|
|
2435
|
+
/** AgentBuilder for creating subagents with proper tree sharing */
|
|
2436
|
+
AgentBuilder: typeof AgentBuilder;
|
|
2437
|
+
/** Gadget factory for defining gadgets */
|
|
2438
|
+
Gadget: typeof Gadget;
|
|
2439
|
+
/** createGadget for functional gadget definitions */
|
|
2440
|
+
createGadget: typeof createGadget;
|
|
2441
|
+
/** ExecutionTree for tree operations */
|
|
2442
|
+
ExecutionTree: typeof ExecutionTree;
|
|
2443
|
+
/** LLMist client */
|
|
2444
|
+
LLMist: typeof LLMist;
|
|
2445
|
+
/** Zod schema builder */
|
|
2446
|
+
z: typeof zod.z;
|
|
2113
2447
|
}
|
|
2114
2448
|
/**
|
|
2115
2449
|
* Parent agent configuration passed to gadgets.
|
|
@@ -3929,7 +4263,7 @@ interface ObserveCompactionContext {
|
|
|
3929
4263
|
/** Agent iteration when compaction occurred */
|
|
3930
4264
|
iteration: number;
|
|
3931
4265
|
/** Details of the compaction event */
|
|
3932
|
-
event: CompactionEvent
|
|
4266
|
+
event: CompactionEvent;
|
|
3933
4267
|
/** Cumulative compaction statistics */
|
|
3934
4268
|
stats: CompactionStats;
|
|
3935
4269
|
/** Logger instance */
|
|
@@ -4377,6 +4711,8 @@ declare class Agent {
|
|
|
4377
4711
|
private readonly pendingSubagentEvents;
|
|
4378
4712
|
private readonly onSubagentEvent;
|
|
4379
4713
|
private syntheticInvocationCounter;
|
|
4714
|
+
private readonly completedInvocationIds;
|
|
4715
|
+
private readonly failedInvocationIds;
|
|
4380
4716
|
private readonly tree;
|
|
4381
4717
|
private readonly parentNodeId;
|
|
4382
4718
|
private readonly baseDepth;
|
|
@@ -4500,7 +4836,7 @@ declare class Agent {
|
|
|
4500
4836
|
* }
|
|
4501
4837
|
* ```
|
|
4502
4838
|
*/
|
|
4503
|
-
compact(): Promise<CompactionEvent
|
|
4839
|
+
compact(): Promise<CompactionEvent | null>;
|
|
4504
4840
|
/**
|
|
4505
4841
|
* Get compaction statistics.
|
|
4506
4842
|
*
|
|
@@ -4545,6 +4881,34 @@ declare class Agent {
|
|
|
4545
4881
|
* The limiter runs first, then chains to any user interceptor.
|
|
4546
4882
|
*/
|
|
4547
4883
|
private chainOutputLimiterWithUserHooks;
|
|
4884
|
+
/**
|
|
4885
|
+
* Check abort signal and notify observers if aborted.
|
|
4886
|
+
* @returns true if agent should terminate
|
|
4887
|
+
*/
|
|
4888
|
+
private checkAbortAndNotify;
|
|
4889
|
+
/**
|
|
4890
|
+
* Check and perform context compaction if needed.
|
|
4891
|
+
* @returns compaction stream event if compaction occurred, null otherwise
|
|
4892
|
+
*/
|
|
4893
|
+
private checkAndPerformCompaction;
|
|
4894
|
+
/**
|
|
4895
|
+
* Prepare LLM call options and process beforeLLMCall controller.
|
|
4896
|
+
* @returns options and optional skipWithSynthetic response if controller wants to skip
|
|
4897
|
+
*/
|
|
4898
|
+
private prepareLLMCall;
|
|
4899
|
+
/**
|
|
4900
|
+
* Calculate cost and complete LLM call in execution tree.
|
|
4901
|
+
*/
|
|
4902
|
+
private completeLLMCallInTree;
|
|
4903
|
+
/**
|
|
4904
|
+
* Process afterLLMCall controller and return modified final message.
|
|
4905
|
+
*/
|
|
4906
|
+
private processAfterLLMCallController;
|
|
4907
|
+
/**
|
|
4908
|
+
* Update conversation history with gadget results or text-only response.
|
|
4909
|
+
* @returns true if loop should break (text-only handler requested termination)
|
|
4910
|
+
*/
|
|
4911
|
+
private updateConversationWithResults;
|
|
4548
4912
|
/**
|
|
4549
4913
|
* Run agent with named event handlers (syntactic sugar).
|
|
4550
4914
|
*
|
|
@@ -6030,4 +6394,4 @@ declare function createTextMockStream(text: string, options?: {
|
|
|
6030
6394
|
usage?: MockResponse["usage"];
|
|
6031
6395
|
}): LLMStream;
|
|
6032
6396
|
|
|
6033
|
-
export { type
|
|
6397
|
+
export { type LLMGenerationOptions as $, AbstractGadget as A, type MessageContent as B, type CompactionConfig as C, GadgetRegistry as D, MediaStore as E, type AgentContextConfig as F, type GadgetMediaOutput as G, type HintTemplate as H, type IConversationManager as I, type SubagentConfigMap as J, type SubagentEvent as K, type LLMMessage as L, MockProviderAdapter as M, ExecutionTree as N, type NodeId as O, type ParsedGadgetCall as P, type GadgetExecutionResult as Q, type ResolvedCompactionConfig as R, type StreamEvent as S, type TokenUsage as T, type MediaKind as U, type MediaMetadata as V, type GadgetExecuteResultWithMedia as W, type ExecutionContext as X, type ProviderAdapter as Y, type ModelDescriptor as Z, type ModelSpec as _, type LLMStream as a, type GadgetStartEvent as a$, type ImageModelSpec as a0, type ImageGenerationOptions as a1, type ImageGenerationResult as a2, type SpeechModelSpec as a3, type SpeechGenerationOptions as a4, type SpeechGenerationResult as a5, type HostExports as a6, type HistoryMessage as a7, type TrailingMessage as a8, type TrailingMessageContext as a9, type ObserveGadgetStartContext as aA, type ObserveLLMCallContext as aB, type ObserveLLMCompleteContext as aC, type ObserveLLMErrorContext as aD, type Observers as aE, type SubagentContext as aF, DEFAULT_COMPACTION_CONFIG as aG, DEFAULT_SUMMARIZATION_PROMPT as aH, type LLMistOptions as aI, type AddGadgetParams as aJ, type AddLLMCallParams as aK, type CompleteGadgetParams as aL, type CompleteLLMCallParams as aM, type ExecutionNode as aN, type ExecutionNodeType as aO, type GadgetNode as aP, type GadgetState as aQ, type LLMCallNode as aR, type BaseExecutionEvent as aS, type CompactionEvent$1 as aT, type ExecutionEvent as aU, type ExecutionEventType as aV, type GadgetCallEvent as aW, type GadgetCompleteEvent as aX, type GadgetErrorEvent as aY, type GadgetEvent as aZ, type GadgetSkippedEvent$1 as a_, AgentBuilder as aa, type EventHandlers as ab, collectEvents as ac, collectText as ad, runWithHandlers as ae, type AfterGadgetExecutionAction as af, type AfterGadgetExecutionControllerContext as ag, type AfterLLMCallAction as ah, type AfterLLMCallControllerContext as ai, type AfterLLMErrorAction as aj, type AgentOptions as ak, type BeforeGadgetExecutionAction as al, type BeforeLLMCallAction as am, type ChunkInterceptorContext as an, type Controllers as ao, type GadgetExecutionControllerContext as ap, type GadgetParameterInterceptorContext as aq, type GadgetResultInterceptorContext as ar, type Interceptors as as, type LLMCallControllerContext as at, type LLMErrorControllerContext as au, type MessageInterceptorContext as av, type MessageTurn as aw, type ObserveChunkContext as ax, type ObserveCompactionContext as ay, type ObserveGadgetCompleteContext as az, type LLMStreamChunk as b, stream as b$, type HumanInputRequiredEvent as b0, type LLMCallCompleteEvent as b1, type LLMCallErrorEvent as b2, type LLMCallStartEvent as b3, type LLMCallStreamEvent as b4, type LLMEvent as b5, type StreamCompleteEvent as b6, type TextEvent as b7, filterByDepth as b8, filterByParent as b9, isTextPart as bA, parseDataUrl as bB, text as bC, toBase64 as bD, type MessageRole as bE, extractMessageText as bF, LLMMessageBuilder as bG, normalizeMessageContent as bH, type CostEstimate as bI, type ModelFeatures as bJ, type ModelLimits as bK, type ModelPricing as bL, type VisionAnalyzeOptions as bM, type VisionAnalyzeResult as bN, type ProviderIdentifier as bO, ModelIdentifierParser as bP, type HintContext as bQ, type PromptContext as bR, type PromptTemplate as bS, type PromptTemplateConfig as bT, DEFAULT_HINTS as bU, DEFAULT_PROMPTS as bV, resolveHintTemplate as bW, resolvePromptTemplate as bX, resolveRulesTemplate as bY, type TextGenerationOptions as bZ, complete as b_, filterRootEvents as ba, groupByParent as bb, isGadgetEvent as bc, isLLMEvent as bd, isRootEvent as be, isSubagentEvent as bf, type AudioContentPart as bg, type AudioMimeType as bh, type AudioSource as bi, type ContentPart as bj, type ImageBase64Source as bk, type ImageContentPart as bl, type ImageMimeType as bm, type ImageSource as bn, type ImageUrlSource as bo, type TextContentPart as bp, audioFromBase64 as bq, audioFromBuffer as br, detectAudioMimeType as bs, detectImageMimeType as bt, imageFromBase64 as bu, imageFromBuffer as bv, imageFromUrl as bw, isAudioPart as bx, isDataUrl as by, isImagePart as bz, createMockAdapter as c, type CreateGadgetConfig as c0, createGadget as c1, type GadgetClass as c2, type GadgetOrClass as c3, type GadgetConfig as c4, Gadget as c5, type CostReportingLLMist as c6, type GadgetExample as c7, type GadgetExecuteResult as c8, type GadgetExecuteReturn as c9, type GadgetSkippedEvent as ca, type StoredMedia as cb, type SubagentStreamEvent as cc, type TextOnlyAction as cd, type TextOnlyContext as ce, type TextOnlyCustomHandler as cf, type TextOnlyGadgetConfig as cg, type TextOnlyHandler as ch, type TextOnlyStrategy as ci, MockBuilder as d, createMockClient as e, MockManager as f, getMockManager as g, createMockStream as h, createTextMockStream as i, type MockAudioData as j, type MockImageData as k, type MockMatcher as l, mockLLM as m, type MockMatcherContext as n, type MockOptions as o, type MockRegistration as p, type MockResponse as q, type MockStats as r, type AgentHooks as s, ModelRegistry as t, LLMist as u, type CompactionEvent as v, type CompactionStats as w, type CompactionStrategy as x, type CompactionContext as y, type CompactionResult as z };
|