llmist 4.0.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,7 +34,7 @@ import {
34
34
  init_strategy,
35
35
  init_stream_processor,
36
36
  resolveHintTemplate
37
- } from "./chunk-RHR2M6T6.js";
37
+ } from "./chunk-YJKUWFIC.js";
38
38
 
39
39
  // src/index.ts
40
40
  init_builder();
@@ -1096,4 +1096,4 @@ export {
1096
1096
  resultWithFile,
1097
1097
  z
1098
1098
  };
1099
- //# sourceMappingURL=chunk-Q6NQRMYD.js.map
1099
+ //# sourceMappingURL=chunk-F5QK5YVI.js.map
@@ -5005,15 +5005,16 @@ var init_agent = __esm({
5005
5005
  });
5006
5006
  } else if (event.type === "llm_call_end") {
5007
5007
  const info = event.event;
5008
+ const usage = info.usage ?? (info.outputTokens ? {
5009
+ inputTokens: info.inputTokens ?? 0,
5010
+ outputTokens: info.outputTokens,
5011
+ totalTokens: (info.inputTokens ?? 0) + info.outputTokens
5012
+ } : void 0);
5008
5013
  void this.hooks?.observers?.onLLMCallComplete?.({
5009
5014
  iteration: info.iteration,
5010
5015
  options: { model: info.model, messages: [] },
5011
5016
  finishReason: info.finishReason ?? null,
5012
- usage: info.outputTokens ? {
5013
- inputTokens: info.inputTokens ?? 0,
5014
- outputTokens: info.outputTokens,
5015
- totalTokens: (info.inputTokens ?? 0) + info.outputTokens
5016
- } : void 0,
5017
+ usage,
5017
5018
  rawResponse: "",
5018
5019
  finalMessage: "",
5019
5020
  logger: this.logger,
@@ -6257,13 +6258,24 @@ ${endPrefix}`
6257
6258
  observers: {
6258
6259
  ...hooks?.observers,
6259
6260
  onLLMCallStart: async (context) => {
6261
+ let inputTokens;
6262
+ try {
6263
+ if (this.client) {
6264
+ inputTokens = await this.client.countTokens(
6265
+ context.options.model,
6266
+ context.options.messages
6267
+ );
6268
+ }
6269
+ } catch {
6270
+ }
6260
6271
  onSubagentEvent({
6261
6272
  type: "llm_call_start",
6262
6273
  gadgetInvocationId: invocationId,
6263
6274
  depth,
6264
6275
  event: {
6265
6276
  iteration: context.iteration,
6266
- model: context.options.model
6277
+ model: context.options.model,
6278
+ inputTokens
6267
6279
  }
6268
6280
  });
6269
6281
  if (existingOnLLMCallStart) {
@@ -6278,8 +6290,13 @@ ${endPrefix}`
6278
6290
  event: {
6279
6291
  iteration: context.iteration,
6280
6292
  model: context.options.model,
6293
+ // Backward compat fields
6294
+ inputTokens: context.usage?.inputTokens,
6281
6295
  outputTokens: context.usage?.outputTokens,
6282
- finishReason: context.finishReason
6296
+ finishReason: context.finishReason ?? void 0,
6297
+ // Full usage object with cache details (for first-class display)
6298
+ usage: context.usage
6299
+ // Cost will be calculated by parent if it has model registry
6283
6300
  }
6284
6301
  });
6285
6302
  if (existingOnLLMCallComplete) {
@@ -8133,6 +8150,9 @@ var init_gemini = __esm({
8133
8150
  async countTokens(messages, descriptor, _spec) {
8134
8151
  const client = this.client;
8135
8152
  const contents = this.convertMessagesToContents(messages);
8153
+ if (!contents || contents.length === 0) {
8154
+ return 0;
8155
+ }
8136
8156
  try {
8137
8157
  const response = await client.models.countTokens({
8138
8158
  model: descriptor.name,
@@ -11698,4 +11718,4 @@ export {
11698
11718
  createEmptyStream,
11699
11719
  createErrorStream
11700
11720
  };
11701
- //# sourceMappingURL=chunk-RHR2M6T6.js.map
11721
+ //# sourceMappingURL=chunk-YJKUWFIC.js.map