llmist 4.0.0 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-RHR2M6T6.js → chunk-3SZIQI45.js} +16 -7
- package/dist/chunk-3SZIQI45.js.map +1 -0
- package/dist/{chunk-Q6NQRMYD.js → chunk-UBPZUVIN.js} +2 -2
- package/dist/cli.cjs +327 -215
- package/dist/cli.cjs.map +1 -1
- package/dist/cli.js +284 -181
- package/dist/cli.js.map +1 -1
- package/dist/index.cjs +15 -6
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +2 -2
- package/dist/{mock-stream-BvNYtrlG.d.cts → mock-stream-CAY53Q6u.d.cts} +23 -2
- package/dist/{mock-stream-BvNYtrlG.d.ts → mock-stream-CAY53Q6u.d.ts} +23 -2
- package/dist/testing/index.cjs +15 -6
- package/dist/testing/index.cjs.map +1 -1
- package/dist/testing/index.d.cts +2 -2
- package/dist/testing/index.d.ts +2 -2
- package/dist/testing/index.js +1 -1
- package/package.json +3 -2
- package/dist/chunk-RHR2M6T6.js.map +0 -1
- /package/dist/{chunk-Q6NQRMYD.js.map → chunk-UBPZUVIN.js.map} +0 -0
package/dist/index.d.cts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { ZodType, ZodTypeAny } from 'zod';
|
|
2
2
|
export { z } from 'zod';
|
|
3
|
-
import { s as AgentHooks, t as ModelRegistry, u as LLMist, C as CompactionConfig, I as IConversationManager, v as CompactionEvent, L as LLMMessage, w as CompactionStats, x as CompactionStrategy, R as ResolvedCompactionConfig, y as CompactionContext, z as CompactionResult, B as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, F as AgentContextConfig, J as SubagentConfigMap, K as SubagentEvent, b as LLMStreamChunk, N as ExecutionContext, O as GadgetExecuteReturn, P as GadgetExample, A as AbstractGadget, Q as ParsedGadgetCall, U as GadgetExecutionResult, V as MediaKind, W as MediaMetadata, X as GadgetExecuteResultWithMedia, Y as ProviderAdapter, Z as ModelDescriptor, _ as ModelSpec, $ as LLMGenerationOptions, a as LLMStream, a0 as ImageModelSpec, a1 as ImageGenerationOptions, a2 as ImageGenerationResult, a3 as SpeechModelSpec, a4 as SpeechGenerationOptions, a5 as SpeechGenerationResult } from './mock-stream-
|
|
4
|
-
export { ae as AfterGadgetExecutionAction, af as AfterGadgetExecutionControllerContext, ag as AfterLLMCallAction, ah as AfterLLMCallControllerContext, ai as AfterLLMErrorAction, a9 as AgentBuilder, aj as AgentOptions, aI as AudioContentPart, aJ as AudioMimeType, aK as AudioSource, ak as BeforeGadgetExecutionAction, al as BeforeLLMCallAction, am as ChunkInterceptorContext, aL as ContentPart, an as Controllers, b8 as CostEstimate, bu as CostReportingLLMist, aF as DEFAULT_COMPACTION_CONFIG, bk as DEFAULT_HINTS, bl as DEFAULT_PROMPTS, aG as DEFAULT_SUMMARIZATION_PROMPT, aa as EventHandlers, bs as GadgetClass, bv as GadgetExecuteResult, ao as GadgetExecutionControllerContext, bt as GadgetOrClass, ap as GadgetParameterInterceptorContext, aq as GadgetResultInterceptorContext, bw as GadgetSkippedEvent, bg as HintContext, a6 as HistoryMessage, aM as ImageBase64Source, aN as ImageContentPart, aO as ImageMimeType, aP as ImageSource, aQ as ImageUrlSource, ar as Interceptors, as as LLMCallControllerContext, at as LLMErrorControllerContext, b6 as LLMMessageBuilder, aH as LLMistOptions, au as MessageInterceptorContext, b4 as MessageRole, av as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b9 as ModelFeatures, bf as ModelIdentifierParser, ba as ModelLimits, bb as ModelPricing, aw as ObserveChunkContext, ax as ObserveCompactionContext, ay as ObserveGadgetCompleteContext, az as ObserveGadgetStartContext, aA as ObserveLLMCallContext, aB as ObserveLLMCompleteContext, aC as ObserveLLMErrorContext, aD as Observers, bh as PromptContext, bi as PromptTemplate, bj as PromptTemplateConfig, be as ProviderIdentifier, bx as StoredMedia, aE as SubagentContext, by as SubagentStreamEvent, aR as TextContentPart, bp as TextGenerationOptions, bz as TextOnlyAction, bA as TextOnlyContext, bB as TextOnlyCustomHandler, bC as TextOnlyGadgetConfig, bD as TextOnlyHandler, bE as TextOnlyStrategy, a7 as TrailingMessage, a8 as TrailingMessageContext, bc as VisionAnalyzeOptions, bd as VisionAnalyzeResult, aS as audioFromBase64, aT as audioFromBuffer, ab as collectEvents, ac as collectText, bq as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aU as detectAudioMimeType, aV as detectImageMimeType, b5 as extractMessageText, g as getMockManager, aW as imageFromBase64, aX as imageFromBuffer, aY as imageFromUrl, aZ as isAudioPart, a_ as isDataUrl, a$ as isImagePart, b0 as isTextPart, m as mockLLM, b7 as normalizeMessageContent, b1 as parseDataUrl, bm as resolveHintTemplate, bn as resolvePromptTemplate, bo as resolveRulesTemplate, ad as runWithHandlers, br as stream, b2 as text, b3 as toBase64 } from './mock-stream-
|
|
3
|
+
import { s as AgentHooks, t as ModelRegistry, u as LLMist, C as CompactionConfig, I as IConversationManager, v as CompactionEvent, L as LLMMessage, w as CompactionStats, x as CompactionStrategy, R as ResolvedCompactionConfig, y as CompactionContext, z as CompactionResult, B as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, F as AgentContextConfig, J as SubagentConfigMap, K as SubagentEvent, b as LLMStreamChunk, N as ExecutionContext, O as GadgetExecuteReturn, P as GadgetExample, A as AbstractGadget, Q as ParsedGadgetCall, U as GadgetExecutionResult, V as MediaKind, W as MediaMetadata, X as GadgetExecuteResultWithMedia, Y as ProviderAdapter, Z as ModelDescriptor, _ as ModelSpec, $ as LLMGenerationOptions, a as LLMStream, a0 as ImageModelSpec, a1 as ImageGenerationOptions, a2 as ImageGenerationResult, a3 as SpeechModelSpec, a4 as SpeechGenerationOptions, a5 as SpeechGenerationResult } from './mock-stream-CAY53Q6u.cjs';
|
|
4
|
+
export { ae as AfterGadgetExecutionAction, af as AfterGadgetExecutionControllerContext, ag as AfterLLMCallAction, ah as AfterLLMCallControllerContext, ai as AfterLLMErrorAction, a9 as AgentBuilder, aj as AgentOptions, aI as AudioContentPart, aJ as AudioMimeType, aK as AudioSource, ak as BeforeGadgetExecutionAction, al as BeforeLLMCallAction, am as ChunkInterceptorContext, aL as ContentPart, an as Controllers, b8 as CostEstimate, bu as CostReportingLLMist, aF as DEFAULT_COMPACTION_CONFIG, bk as DEFAULT_HINTS, bl as DEFAULT_PROMPTS, aG as DEFAULT_SUMMARIZATION_PROMPT, aa as EventHandlers, bs as GadgetClass, bv as GadgetExecuteResult, ao as GadgetExecutionControllerContext, bt as GadgetOrClass, ap as GadgetParameterInterceptorContext, aq as GadgetResultInterceptorContext, bw as GadgetSkippedEvent, bg as HintContext, a6 as HistoryMessage, aM as ImageBase64Source, aN as ImageContentPart, aO as ImageMimeType, aP as ImageSource, aQ as ImageUrlSource, ar as Interceptors, as as LLMCallControllerContext, at as LLMErrorControllerContext, b6 as LLMMessageBuilder, aH as LLMistOptions, au as MessageInterceptorContext, b4 as MessageRole, av as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b9 as ModelFeatures, bf as ModelIdentifierParser, ba as ModelLimits, bb as ModelPricing, aw as ObserveChunkContext, ax as ObserveCompactionContext, ay as ObserveGadgetCompleteContext, az as ObserveGadgetStartContext, aA as ObserveLLMCallContext, aB as ObserveLLMCompleteContext, aC as ObserveLLMErrorContext, aD as Observers, bh as PromptContext, bi as PromptTemplate, bj as PromptTemplateConfig, be as ProviderIdentifier, bx as StoredMedia, aE as SubagentContext, by as SubagentStreamEvent, aR as TextContentPart, bp as TextGenerationOptions, bz as TextOnlyAction, bA as TextOnlyContext, bB as TextOnlyCustomHandler, bC as TextOnlyGadgetConfig, bD as TextOnlyHandler, bE as TextOnlyStrategy, a7 as TrailingMessage, a8 as TrailingMessageContext, bc as VisionAnalyzeOptions, bd as VisionAnalyzeResult, aS as audioFromBase64, aT as audioFromBuffer, ab as collectEvents, ac as collectText, bq as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aU as detectAudioMimeType, aV as detectImageMimeType, b5 as extractMessageText, g as getMockManager, aW as imageFromBase64, aX as imageFromBuffer, aY as imageFromUrl, aZ as isAudioPart, a_ as isDataUrl, a$ as isImagePart, b0 as isTextPart, m as mockLLM, b7 as normalizeMessageContent, b1 as parseDataUrl, bm as resolveHintTemplate, bn as resolvePromptTemplate, bo as resolveRulesTemplate, ad as runWithHandlers, br as stream, b2 as text, b3 as toBase64 } from './mock-stream-CAY53Q6u.cjs';
|
|
5
5
|
import { Logger, ILogObj } from 'tslog';
|
|
6
6
|
import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
|
|
7
7
|
import OpenAI from 'openai';
|
package/dist/index.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { ZodType, ZodTypeAny } from 'zod';
|
|
2
2
|
export { z } from 'zod';
|
|
3
|
-
import { s as AgentHooks, t as ModelRegistry, u as LLMist, C as CompactionConfig, I as IConversationManager, v as CompactionEvent, L as LLMMessage, w as CompactionStats, x as CompactionStrategy, R as ResolvedCompactionConfig, y as CompactionContext, z as CompactionResult, B as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, F as AgentContextConfig, J as SubagentConfigMap, K as SubagentEvent, b as LLMStreamChunk, N as ExecutionContext, O as GadgetExecuteReturn, P as GadgetExample, A as AbstractGadget, Q as ParsedGadgetCall, U as GadgetExecutionResult, V as MediaKind, W as MediaMetadata, X as GadgetExecuteResultWithMedia, Y as ProviderAdapter, Z as ModelDescriptor, _ as ModelSpec, $ as LLMGenerationOptions, a as LLMStream, a0 as ImageModelSpec, a1 as ImageGenerationOptions, a2 as ImageGenerationResult, a3 as SpeechModelSpec, a4 as SpeechGenerationOptions, a5 as SpeechGenerationResult } from './mock-stream-
|
|
4
|
-
export { ae as AfterGadgetExecutionAction, af as AfterGadgetExecutionControllerContext, ag as AfterLLMCallAction, ah as AfterLLMCallControllerContext, ai as AfterLLMErrorAction, a9 as AgentBuilder, aj as AgentOptions, aI as AudioContentPart, aJ as AudioMimeType, aK as AudioSource, ak as BeforeGadgetExecutionAction, al as BeforeLLMCallAction, am as ChunkInterceptorContext, aL as ContentPart, an as Controllers, b8 as CostEstimate, bu as CostReportingLLMist, aF as DEFAULT_COMPACTION_CONFIG, bk as DEFAULT_HINTS, bl as DEFAULT_PROMPTS, aG as DEFAULT_SUMMARIZATION_PROMPT, aa as EventHandlers, bs as GadgetClass, bv as GadgetExecuteResult, ao as GadgetExecutionControllerContext, bt as GadgetOrClass, ap as GadgetParameterInterceptorContext, aq as GadgetResultInterceptorContext, bw as GadgetSkippedEvent, bg as HintContext, a6 as HistoryMessage, aM as ImageBase64Source, aN as ImageContentPart, aO as ImageMimeType, aP as ImageSource, aQ as ImageUrlSource, ar as Interceptors, as as LLMCallControllerContext, at as LLMErrorControllerContext, b6 as LLMMessageBuilder, aH as LLMistOptions, au as MessageInterceptorContext, b4 as MessageRole, av as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b9 as ModelFeatures, bf as ModelIdentifierParser, ba as ModelLimits, bb as ModelPricing, aw as ObserveChunkContext, ax as ObserveCompactionContext, ay as ObserveGadgetCompleteContext, az as ObserveGadgetStartContext, aA as ObserveLLMCallContext, aB as ObserveLLMCompleteContext, aC as ObserveLLMErrorContext, aD as Observers, bh as PromptContext, bi as PromptTemplate, bj as PromptTemplateConfig, be as ProviderIdentifier, bx as StoredMedia, aE as SubagentContext, by as SubagentStreamEvent, aR as TextContentPart, bp as TextGenerationOptions, bz as TextOnlyAction, bA as TextOnlyContext, bB as TextOnlyCustomHandler, bC as TextOnlyGadgetConfig, bD as TextOnlyHandler, bE as TextOnlyStrategy, a7 as TrailingMessage, a8 as TrailingMessageContext, bc as VisionAnalyzeOptions, bd as VisionAnalyzeResult, aS as audioFromBase64, aT as audioFromBuffer, ab as collectEvents, ac as collectText, bq as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aU as detectAudioMimeType, aV as detectImageMimeType, b5 as extractMessageText, g as getMockManager, aW as imageFromBase64, aX as imageFromBuffer, aY as imageFromUrl, aZ as isAudioPart, a_ as isDataUrl, a$ as isImagePart, b0 as isTextPart, m as mockLLM, b7 as normalizeMessageContent, b1 as parseDataUrl, bm as resolveHintTemplate, bn as resolvePromptTemplate, bo as resolveRulesTemplate, ad as runWithHandlers, br as stream, b2 as text, b3 as toBase64 } from './mock-stream-
|
|
3
|
+
import { s as AgentHooks, t as ModelRegistry, u as LLMist, C as CompactionConfig, I as IConversationManager, v as CompactionEvent, L as LLMMessage, w as CompactionStats, x as CompactionStrategy, R as ResolvedCompactionConfig, y as CompactionContext, z as CompactionResult, B as MessageContent, G as GadgetMediaOutput, H as HintTemplate, S as StreamEvent, T as TokenUsage, D as GadgetRegistry, E as MediaStore, F as AgentContextConfig, J as SubagentConfigMap, K as SubagentEvent, b as LLMStreamChunk, N as ExecutionContext, O as GadgetExecuteReturn, P as GadgetExample, A as AbstractGadget, Q as ParsedGadgetCall, U as GadgetExecutionResult, V as MediaKind, W as MediaMetadata, X as GadgetExecuteResultWithMedia, Y as ProviderAdapter, Z as ModelDescriptor, _ as ModelSpec, $ as LLMGenerationOptions, a as LLMStream, a0 as ImageModelSpec, a1 as ImageGenerationOptions, a2 as ImageGenerationResult, a3 as SpeechModelSpec, a4 as SpeechGenerationOptions, a5 as SpeechGenerationResult } from './mock-stream-CAY53Q6u.js';
|
|
4
|
+
export { ae as AfterGadgetExecutionAction, af as AfterGadgetExecutionControllerContext, ag as AfterLLMCallAction, ah as AfterLLMCallControllerContext, ai as AfterLLMErrorAction, a9 as AgentBuilder, aj as AgentOptions, aI as AudioContentPart, aJ as AudioMimeType, aK as AudioSource, ak as BeforeGadgetExecutionAction, al as BeforeLLMCallAction, am as ChunkInterceptorContext, aL as ContentPart, an as Controllers, b8 as CostEstimate, bu as CostReportingLLMist, aF as DEFAULT_COMPACTION_CONFIG, bk as DEFAULT_HINTS, bl as DEFAULT_PROMPTS, aG as DEFAULT_SUMMARIZATION_PROMPT, aa as EventHandlers, bs as GadgetClass, bv as GadgetExecuteResult, ao as GadgetExecutionControllerContext, bt as GadgetOrClass, ap as GadgetParameterInterceptorContext, aq as GadgetResultInterceptorContext, bw as GadgetSkippedEvent, bg as HintContext, a6 as HistoryMessage, aM as ImageBase64Source, aN as ImageContentPart, aO as ImageMimeType, aP as ImageSource, aQ as ImageUrlSource, ar as Interceptors, as as LLMCallControllerContext, at as LLMErrorControllerContext, b6 as LLMMessageBuilder, aH as LLMistOptions, au as MessageInterceptorContext, b4 as MessageRole, av as MessageTurn, d as MockBuilder, f as MockManager, l as MockMatcher, n as MockMatcherContext, o as MockOptions, M as MockProviderAdapter, p as MockRegistration, q as MockResponse, r as MockStats, b9 as ModelFeatures, bf as ModelIdentifierParser, ba as ModelLimits, bb as ModelPricing, aw as ObserveChunkContext, ax as ObserveCompactionContext, ay as ObserveGadgetCompleteContext, az as ObserveGadgetStartContext, aA as ObserveLLMCallContext, aB as ObserveLLMCompleteContext, aC as ObserveLLMErrorContext, aD as Observers, bh as PromptContext, bi as PromptTemplate, bj as PromptTemplateConfig, be as ProviderIdentifier, bx as StoredMedia, aE as SubagentContext, by as SubagentStreamEvent, aR as TextContentPart, bp as TextGenerationOptions, bz as TextOnlyAction, bA as TextOnlyContext, bB as TextOnlyCustomHandler, bC as TextOnlyGadgetConfig, bD as TextOnlyHandler, bE as TextOnlyStrategy, a7 as TrailingMessage, a8 as TrailingMessageContext, bc as VisionAnalyzeOptions, bd as VisionAnalyzeResult, aS as audioFromBase64, aT as audioFromBuffer, ab as collectEvents, ac as collectText, bq as complete, c as createMockAdapter, e as createMockClient, h as createMockStream, i as createTextMockStream, aU as detectAudioMimeType, aV as detectImageMimeType, b5 as extractMessageText, g as getMockManager, aW as imageFromBase64, aX as imageFromBuffer, aY as imageFromUrl, aZ as isAudioPart, a_ as isDataUrl, a$ as isImagePart, b0 as isTextPart, m as mockLLM, b7 as normalizeMessageContent, b1 as parseDataUrl, bm as resolveHintTemplate, bn as resolvePromptTemplate, bo as resolveRulesTemplate, ad as runWithHandlers, br as stream, b2 as text, b3 as toBase64 } from './mock-stream-CAY53Q6u.js';
|
|
5
5
|
import { Logger, ILogObj } from 'tslog';
|
|
6
6
|
import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
|
|
7
7
|
import OpenAI from 'openai';
|
package/dist/index.js
CHANGED
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
resultWithImages,
|
|
12
12
|
resultWithMedia,
|
|
13
13
|
z
|
|
14
|
-
} from "./chunk-
|
|
14
|
+
} from "./chunk-UBPZUVIN.js";
|
|
15
15
|
import {
|
|
16
16
|
AbortException,
|
|
17
17
|
AbstractGadget,
|
|
@@ -89,7 +89,7 @@ import {
|
|
|
89
89
|
toBase64,
|
|
90
90
|
validateAndApplyDefaults,
|
|
91
91
|
validateGadgetParams
|
|
92
|
-
} from "./chunk-
|
|
92
|
+
} from "./chunk-3SZIQI45.js";
|
|
93
93
|
export {
|
|
94
94
|
AbortException,
|
|
95
95
|
AbstractGadget,
|
|
@@ -1031,20 +1031,41 @@ interface SubagentStreamEvent {
|
|
|
1031
1031
|
/**
|
|
1032
1032
|
* Information about an LLM call within a subagent.
|
|
1033
1033
|
* Used by parent agents to display real-time progress of subagent LLM calls.
|
|
1034
|
+
*
|
|
1035
|
+
* This interface provides full context about subagent LLM calls, enabling
|
|
1036
|
+
* first-class display with the same metrics as top-level agents (cached tokens, cost, etc.).
|
|
1034
1037
|
*/
|
|
1035
1038
|
interface LLMCallInfo {
|
|
1036
1039
|
/** Iteration number within the subagent loop */
|
|
1037
1040
|
iteration: number;
|
|
1038
1041
|
/** Model identifier (e.g., "sonnet", "gpt-4o") */
|
|
1039
1042
|
model: string;
|
|
1040
|
-
/** Input tokens sent to the LLM */
|
|
1043
|
+
/** Input tokens sent to the LLM (for backward compat, prefer usage.inputTokens) */
|
|
1041
1044
|
inputTokens?: number;
|
|
1042
|
-
/** Output tokens received from the LLM */
|
|
1045
|
+
/** Output tokens received from the LLM (for backward compat, prefer usage.outputTokens) */
|
|
1043
1046
|
outputTokens?: number;
|
|
1044
1047
|
/** Reason the LLM stopped generating (e.g., "stop", "tool_use") */
|
|
1045
1048
|
finishReason?: string;
|
|
1046
1049
|
/** Elapsed time in milliseconds */
|
|
1047
1050
|
elapsedMs?: number;
|
|
1051
|
+
/**
|
|
1052
|
+
* Full token usage including cached token counts.
|
|
1053
|
+
* This provides the same level of detail as top-level agent calls.
|
|
1054
|
+
*/
|
|
1055
|
+
usage?: {
|
|
1056
|
+
inputTokens: number;
|
|
1057
|
+
outputTokens: number;
|
|
1058
|
+
totalTokens: number;
|
|
1059
|
+
/** Number of input tokens served from cache (subset of inputTokens) */
|
|
1060
|
+
cachedInputTokens?: number;
|
|
1061
|
+
/** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
|
|
1062
|
+
cacheCreationInputTokens?: number;
|
|
1063
|
+
};
|
|
1064
|
+
/**
|
|
1065
|
+
* Cost of this LLM call in USD.
|
|
1066
|
+
* Calculated by the subagent if it has access to model registry.
|
|
1067
|
+
*/
|
|
1068
|
+
cost?: number;
|
|
1048
1069
|
}
|
|
1049
1070
|
/**
|
|
1050
1071
|
* Event emitted by subagent gadgets to report internal agent activity.
|
|
@@ -1031,20 +1031,41 @@ interface SubagentStreamEvent {
|
|
|
1031
1031
|
/**
|
|
1032
1032
|
* Information about an LLM call within a subagent.
|
|
1033
1033
|
* Used by parent agents to display real-time progress of subagent LLM calls.
|
|
1034
|
+
*
|
|
1035
|
+
* This interface provides full context about subagent LLM calls, enabling
|
|
1036
|
+
* first-class display with the same metrics as top-level agents (cached tokens, cost, etc.).
|
|
1034
1037
|
*/
|
|
1035
1038
|
interface LLMCallInfo {
|
|
1036
1039
|
/** Iteration number within the subagent loop */
|
|
1037
1040
|
iteration: number;
|
|
1038
1041
|
/** Model identifier (e.g., "sonnet", "gpt-4o") */
|
|
1039
1042
|
model: string;
|
|
1040
|
-
/** Input tokens sent to the LLM */
|
|
1043
|
+
/** Input tokens sent to the LLM (for backward compat, prefer usage.inputTokens) */
|
|
1041
1044
|
inputTokens?: number;
|
|
1042
|
-
/** Output tokens received from the LLM */
|
|
1045
|
+
/** Output tokens received from the LLM (for backward compat, prefer usage.outputTokens) */
|
|
1043
1046
|
outputTokens?: number;
|
|
1044
1047
|
/** Reason the LLM stopped generating (e.g., "stop", "tool_use") */
|
|
1045
1048
|
finishReason?: string;
|
|
1046
1049
|
/** Elapsed time in milliseconds */
|
|
1047
1050
|
elapsedMs?: number;
|
|
1051
|
+
/**
|
|
1052
|
+
* Full token usage including cached token counts.
|
|
1053
|
+
* This provides the same level of detail as top-level agent calls.
|
|
1054
|
+
*/
|
|
1055
|
+
usage?: {
|
|
1056
|
+
inputTokens: number;
|
|
1057
|
+
outputTokens: number;
|
|
1058
|
+
totalTokens: number;
|
|
1059
|
+
/** Number of input tokens served from cache (subset of inputTokens) */
|
|
1060
|
+
cachedInputTokens?: number;
|
|
1061
|
+
/** Number of input tokens written to cache (subset of inputTokens, Anthropic only) */
|
|
1062
|
+
cacheCreationInputTokens?: number;
|
|
1063
|
+
};
|
|
1064
|
+
/**
|
|
1065
|
+
* Cost of this LLM call in USD.
|
|
1066
|
+
* Calculated by the subagent if it has access to model registry.
|
|
1067
|
+
*/
|
|
1068
|
+
cost?: number;
|
|
1048
1069
|
}
|
|
1049
1070
|
/**
|
|
1050
1071
|
* Event emitted by subagent gadgets to report internal agent activity.
|
package/dist/testing/index.cjs
CHANGED
|
@@ -4941,15 +4941,16 @@ var init_agent = __esm({
|
|
|
4941
4941
|
});
|
|
4942
4942
|
} else if (event.type === "llm_call_end") {
|
|
4943
4943
|
const info = event.event;
|
|
4944
|
+
const usage = info.usage ?? (info.outputTokens ? {
|
|
4945
|
+
inputTokens: info.inputTokens ?? 0,
|
|
4946
|
+
outputTokens: info.outputTokens,
|
|
4947
|
+
totalTokens: (info.inputTokens ?? 0) + info.outputTokens
|
|
4948
|
+
} : void 0);
|
|
4944
4949
|
void this.hooks?.observers?.onLLMCallComplete?.({
|
|
4945
4950
|
iteration: info.iteration,
|
|
4946
4951
|
options: { model: info.model, messages: [] },
|
|
4947
4952
|
finishReason: info.finishReason ?? null,
|
|
4948
|
-
usage
|
|
4949
|
-
inputTokens: info.inputTokens ?? 0,
|
|
4950
|
-
outputTokens: info.outputTokens,
|
|
4951
|
-
totalTokens: (info.inputTokens ?? 0) + info.outputTokens
|
|
4952
|
-
} : void 0,
|
|
4953
|
+
usage,
|
|
4953
4954
|
rawResponse: "",
|
|
4954
4955
|
finalMessage: "",
|
|
4955
4956
|
logger: this.logger,
|
|
@@ -6214,8 +6215,13 @@ ${endPrefix}`
|
|
|
6214
6215
|
event: {
|
|
6215
6216
|
iteration: context.iteration,
|
|
6216
6217
|
model: context.options.model,
|
|
6218
|
+
// Backward compat fields
|
|
6219
|
+
inputTokens: context.usage?.inputTokens,
|
|
6217
6220
|
outputTokens: context.usage?.outputTokens,
|
|
6218
|
-
finishReason: context.finishReason
|
|
6221
|
+
finishReason: context.finishReason ?? void 0,
|
|
6222
|
+
// Full usage object with cache details (for first-class display)
|
|
6223
|
+
usage: context.usage
|
|
6224
|
+
// Cost will be calculated by parent if it has model registry
|
|
6219
6225
|
}
|
|
6220
6226
|
});
|
|
6221
6227
|
if (existingOnLLMCallComplete) {
|
|
@@ -8069,6 +8075,9 @@ var init_gemini = __esm({
|
|
|
8069
8075
|
async countTokens(messages, descriptor, _spec) {
|
|
8070
8076
|
const client = this.client;
|
|
8071
8077
|
const contents = this.convertMessagesToContents(messages);
|
|
8078
|
+
if (!contents || contents.length === 0) {
|
|
8079
|
+
return 0;
|
|
8080
|
+
}
|
|
8072
8081
|
try {
|
|
8073
8082
|
const response = await client.models.countTokens({
|
|
8074
8083
|
model: descriptor.name,
|