llmist 1.6.2 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -738,6 +738,44 @@ ${this.endPrefix}`
738
738
  }
739
739
  });
740
740
 
741
+ // src/gadgets/exceptions.ts
742
+ var BreakLoopException, HumanInputException, TimeoutException, AbortError;
743
+ var init_exceptions = __esm({
744
+ "src/gadgets/exceptions.ts"() {
745
+ "use strict";
746
+ BreakLoopException = class extends Error {
747
+ constructor(message) {
748
+ super(message ?? "Agent loop terminated by gadget");
749
+ this.name = "BreakLoopException";
750
+ }
751
+ };
752
+ HumanInputException = class extends Error {
753
+ question;
754
+ constructor(question) {
755
+ super(`Human input required: ${question}`);
756
+ this.name = "HumanInputException";
757
+ this.question = question;
758
+ }
759
+ };
760
+ TimeoutException = class extends Error {
761
+ timeoutMs;
762
+ gadgetName;
763
+ constructor(gadgetName, timeoutMs) {
764
+ super(`Gadget '${gadgetName}' execution exceeded timeout of ${timeoutMs}ms`);
765
+ this.name = "TimeoutException";
766
+ this.gadgetName = gadgetName;
767
+ this.timeoutMs = timeoutMs;
768
+ }
769
+ };
770
+ AbortError = class extends Error {
771
+ constructor(message) {
772
+ super(message || "Gadget execution was aborted");
773
+ this.name = "AbortError";
774
+ }
775
+ };
776
+ }
777
+ });
778
+
741
779
  // src/gadgets/schema-to-json.ts
742
780
  import * as z2 from "zod";
743
781
  function schemaToJSONSchema(schema, options) {
@@ -930,6 +968,7 @@ var init_gadget = __esm({
930
968
  "src/gadgets/gadget.ts"() {
931
969
  "use strict";
932
970
  init_constants();
971
+ init_exceptions();
933
972
  init_schema_to_json();
934
973
  init_schema_validator();
935
974
  BaseGadget = class {
@@ -959,6 +998,42 @@ var init_gadget = __esm({
959
998
  * while maintaining runtime compatibility.
960
999
  */
961
1000
  examples;
1001
+ /**
1002
+ * Throws an AbortError if the execution has been aborted.
1003
+ *
1004
+ * Call this at key checkpoints in long-running gadgets to allow early exit
1005
+ * when the gadget has been cancelled (e.g., due to timeout). This enables
1006
+ * resource cleanup and prevents unnecessary work after cancellation.
1007
+ *
1008
+ * @param ctx - The execution context containing the abort signal
1009
+ * @throws AbortError if ctx.signal.aborted is true
1010
+ *
1011
+ * @example
1012
+ * ```typescript
1013
+ * class DataProcessor extends Gadget({
1014
+ * description: 'Processes data in multiple steps',
1015
+ * schema: z.object({ items: z.array(z.string()) }),
1016
+ * }) {
1017
+ * async execute(params: this['params'], ctx?: ExecutionContext): Promise<string> {
1018
+ * const results: string[] = [];
1019
+ *
1020
+ * for (const item of params.items) {
1021
+ * // Check before each expensive operation
1022
+ * this.throwIfAborted(ctx);
1023
+ *
1024
+ * results.push(await this.processItem(item));
1025
+ * }
1026
+ *
1027
+ * return results.join(', ');
1028
+ * }
1029
+ * }
1030
+ * ```
1031
+ */
1032
+ throwIfAborted(ctx) {
1033
+ if (ctx?.signal?.aborted) {
1034
+ throw new AbortError();
1035
+ }
1036
+ }
962
1037
  /**
963
1038
  * Auto-generated instruction text for the LLM.
964
1039
  * Combines name, description, and parameter schema into a formatted instruction.
@@ -1026,8 +1101,8 @@ function createGadget(config) {
1026
1101
  parameterSchema = config.schema;
1027
1102
  timeoutMs = config.timeoutMs;
1028
1103
  examples = config.examples;
1029
- execute(params) {
1030
- return config.execute(params);
1104
+ execute(params, ctx) {
1105
+ return config.execute(params, ctx);
1031
1106
  }
1032
1107
  }
1033
1108
  return new DynamicGadget();
@@ -2363,6 +2438,162 @@ var init_block_params = __esm({
2363
2438
  }
2364
2439
  });
2365
2440
 
2441
+ // src/gadgets/cost-reporting-client.ts
2442
+ var CostReportingLLMistWrapper;
2443
+ var init_cost_reporting_client = __esm({
2444
+ "src/gadgets/cost-reporting-client.ts"() {
2445
+ "use strict";
2446
+ init_model_shortcuts();
2447
+ CostReportingLLMistWrapper = class {
2448
+ constructor(client, reportCost) {
2449
+ this.client = client;
2450
+ this.reportCost = reportCost;
2451
+ }
2452
+ /**
2453
+ * Access to model registry for cost estimation.
2454
+ */
2455
+ get modelRegistry() {
2456
+ return this.client.modelRegistry;
2457
+ }
2458
+ /**
2459
+ * Quick completion with automatic cost reporting.
2460
+ *
2461
+ * Streams internally to track token usage, then reports the calculated cost.
2462
+ *
2463
+ * @param prompt - User prompt
2464
+ * @param options - Optional configuration (model, temperature, etc.)
2465
+ * @returns Complete text response
2466
+ */
2467
+ async complete(prompt, options) {
2468
+ const model = resolveModel(options?.model ?? "haiku");
2469
+ let result = "";
2470
+ let inputTokens = 0;
2471
+ let outputTokens = 0;
2472
+ let cachedInputTokens = 0;
2473
+ let cacheCreationInputTokens = 0;
2474
+ const messages = [
2475
+ ...options?.systemPrompt ? [{ role: "system", content: options.systemPrompt }] : [],
2476
+ { role: "user", content: prompt }
2477
+ ];
2478
+ for await (const chunk of this.client.stream({
2479
+ model,
2480
+ messages,
2481
+ temperature: options?.temperature,
2482
+ maxTokens: options?.maxTokens
2483
+ })) {
2484
+ result += chunk.text ?? "";
2485
+ if (chunk.usage) {
2486
+ inputTokens = chunk.usage.inputTokens;
2487
+ outputTokens = chunk.usage.outputTokens;
2488
+ cachedInputTokens = chunk.usage.cachedInputTokens ?? 0;
2489
+ cacheCreationInputTokens = chunk.usage.cacheCreationInputTokens ?? 0;
2490
+ }
2491
+ }
2492
+ this.reportCostFromUsage(model, inputTokens, outputTokens, cachedInputTokens, cacheCreationInputTokens);
2493
+ return result;
2494
+ }
2495
+ /**
2496
+ * Quick streaming with automatic cost reporting when stream completes.
2497
+ *
2498
+ * Yields text chunks as they arrive, then reports cost in finally block.
2499
+ *
2500
+ * @param prompt - User prompt
2501
+ * @param options - Optional configuration (model, temperature, etc.)
2502
+ * @returns Async generator yielding text chunks
2503
+ */
2504
+ async *streamText(prompt, options) {
2505
+ const model = resolveModel(options?.model ?? "haiku");
2506
+ let inputTokens = 0;
2507
+ let outputTokens = 0;
2508
+ let cachedInputTokens = 0;
2509
+ let cacheCreationInputTokens = 0;
2510
+ const messages = [
2511
+ ...options?.systemPrompt ? [{ role: "system", content: options.systemPrompt }] : [],
2512
+ { role: "user", content: prompt }
2513
+ ];
2514
+ try {
2515
+ for await (const chunk of this.client.stream({
2516
+ model,
2517
+ messages,
2518
+ temperature: options?.temperature,
2519
+ maxTokens: options?.maxTokens
2520
+ })) {
2521
+ if (chunk.text) {
2522
+ yield chunk.text;
2523
+ }
2524
+ if (chunk.usage) {
2525
+ inputTokens = chunk.usage.inputTokens;
2526
+ outputTokens = chunk.usage.outputTokens;
2527
+ cachedInputTokens = chunk.usage.cachedInputTokens ?? 0;
2528
+ cacheCreationInputTokens = chunk.usage.cacheCreationInputTokens ?? 0;
2529
+ }
2530
+ }
2531
+ } finally {
2532
+ this.reportCostFromUsage(model, inputTokens, outputTokens, cachedInputTokens, cacheCreationInputTokens);
2533
+ }
2534
+ }
2535
+ /**
2536
+ * Low-level stream access with automatic cost reporting.
2537
+ *
2538
+ * Returns a wrapped stream that reports costs when iteration completes.
2539
+ *
2540
+ * @param options - Full LLM generation options
2541
+ * @returns Wrapped LLM stream that auto-reports costs
2542
+ */
2543
+ stream(options) {
2544
+ return this.createCostReportingStream(options);
2545
+ }
2546
+ /**
2547
+ * Creates a wrapped stream that tracks usage and reports costs on completion.
2548
+ */
2549
+ createCostReportingStream(options) {
2550
+ const innerStream = this.client.stream(options);
2551
+ const reportCostFromUsage = this.reportCostFromUsage.bind(this);
2552
+ const model = options.model;
2553
+ async function* costReportingWrapper() {
2554
+ let inputTokens = 0;
2555
+ let outputTokens = 0;
2556
+ let cachedInputTokens = 0;
2557
+ let cacheCreationInputTokens = 0;
2558
+ try {
2559
+ for await (const chunk of innerStream) {
2560
+ if (chunk.usage) {
2561
+ inputTokens = chunk.usage.inputTokens;
2562
+ outputTokens = chunk.usage.outputTokens;
2563
+ cachedInputTokens = chunk.usage.cachedInputTokens ?? 0;
2564
+ cacheCreationInputTokens = chunk.usage.cacheCreationInputTokens ?? 0;
2565
+ }
2566
+ yield chunk;
2567
+ }
2568
+ } finally {
2569
+ if (inputTokens > 0 || outputTokens > 0) {
2570
+ reportCostFromUsage(model, inputTokens, outputTokens, cachedInputTokens, cacheCreationInputTokens);
2571
+ }
2572
+ }
2573
+ }
2574
+ return costReportingWrapper();
2575
+ }
2576
+ /**
2577
+ * Calculates and reports cost from token usage.
2578
+ */
2579
+ reportCostFromUsage(model, inputTokens, outputTokens, cachedInputTokens = 0, cacheCreationInputTokens = 0) {
2580
+ if (inputTokens === 0 && outputTokens === 0) return;
2581
+ const modelName = model.includes(":") ? model.split(":")[1] : model;
2582
+ const estimate = this.client.modelRegistry.estimateCost(
2583
+ modelName,
2584
+ inputTokens,
2585
+ outputTokens,
2586
+ cachedInputTokens,
2587
+ cacheCreationInputTokens
2588
+ );
2589
+ if (estimate && estimate.totalCost > 0) {
2590
+ this.reportCost(estimate.totalCost);
2591
+ }
2592
+ }
2593
+ };
2594
+ }
2595
+ });
2596
+
2366
2597
  // src/gadgets/error-formatter.ts
2367
2598
  var GadgetErrorFormatter;
2368
2599
  var init_error_formatter = __esm({
@@ -2446,38 +2677,6 @@ var init_error_formatter = __esm({
2446
2677
  }
2447
2678
  });
2448
2679
 
2449
- // src/gadgets/exceptions.ts
2450
- var BreakLoopException, HumanInputException, TimeoutException;
2451
- var init_exceptions = __esm({
2452
- "src/gadgets/exceptions.ts"() {
2453
- "use strict";
2454
- BreakLoopException = class extends Error {
2455
- constructor(message) {
2456
- super(message ?? "Agent loop terminated by gadget");
2457
- this.name = "BreakLoopException";
2458
- }
2459
- };
2460
- HumanInputException = class extends Error {
2461
- question;
2462
- constructor(question) {
2463
- super(`Human input required: ${question}`);
2464
- this.name = "HumanInputException";
2465
- this.question = question;
2466
- }
2467
- };
2468
- TimeoutException = class extends Error {
2469
- timeoutMs;
2470
- gadgetName;
2471
- constructor(gadgetName, timeoutMs) {
2472
- super(`Gadget '${gadgetName}' execution exceeded timeout of ${timeoutMs}ms`);
2473
- this.name = "TimeoutException";
2474
- this.gadgetName = gadgetName;
2475
- this.timeoutMs = timeoutMs;
2476
- }
2477
- };
2478
- }
2479
- });
2480
-
2481
2680
  // src/gadgets/parser.ts
2482
2681
  function stripMarkdownFences(content) {
2483
2682
  let cleaned = content.trim();
@@ -2663,14 +2862,16 @@ var init_executor = __esm({
2663
2862
  init_constants();
2664
2863
  init_logger();
2665
2864
  init_block_params();
2865
+ init_cost_reporting_client();
2666
2866
  init_error_formatter();
2667
2867
  init_exceptions();
2668
2868
  init_parser();
2669
2869
  GadgetExecutor = class {
2670
- constructor(registry, onHumanInputRequired, logger, defaultGadgetTimeoutMs, errorFormatterOptions) {
2870
+ constructor(registry, onHumanInputRequired, logger, defaultGadgetTimeoutMs, errorFormatterOptions, client) {
2671
2871
  this.registry = registry;
2672
2872
  this.onHumanInputRequired = onHumanInputRequired;
2673
2873
  this.defaultGadgetTimeoutMs = defaultGadgetTimeoutMs;
2874
+ this.client = client;
2674
2875
  this.logger = logger ?? createLogger({ name: "llmist:executor" });
2675
2876
  this.errorFormatter = new GadgetErrorFormatter(errorFormatterOptions);
2676
2877
  this.argPrefix = errorFormatterOptions?.argPrefix ?? GADGET_ARG_PREFIX;
@@ -2680,14 +2881,27 @@ var init_executor = __esm({
2680
2881
  argPrefix;
2681
2882
  /**
2682
2883
  * Creates a promise that rejects with a TimeoutException after the specified timeout.
2884
+ * Aborts the provided AbortController before rejecting, allowing gadgets to clean up.
2683
2885
  */
2684
- createTimeoutPromise(gadgetName, timeoutMs) {
2886
+ createTimeoutPromise(gadgetName, timeoutMs, abortController) {
2685
2887
  return new Promise((_, reject) => {
2686
2888
  setTimeout(() => {
2687
- reject(new TimeoutException(gadgetName, timeoutMs));
2889
+ const timeoutError = new TimeoutException(gadgetName, timeoutMs);
2890
+ abortController.abort(timeoutError.message);
2891
+ reject(timeoutError);
2688
2892
  }, timeoutMs);
2689
2893
  });
2690
2894
  }
2895
+ /**
2896
+ * Normalizes gadget execute result to consistent format.
2897
+ * Handles both string returns (backwards compat) and object returns with cost.
2898
+ */
2899
+ normalizeExecuteResult(raw) {
2900
+ if (typeof raw === "string") {
2901
+ return { result: raw, cost: 0 };
2902
+ }
2903
+ return { result: raw.result, cost: raw.cost ?? 0 };
2904
+ }
2691
2905
  // Execute a gadget call asynchronously
2692
2906
  async execute(call) {
2693
2907
  const startTime = Date.now();
@@ -2782,30 +2996,53 @@ var init_executor = __esm({
2782
2996
  validatedParameters = schemaAwareParameters;
2783
2997
  }
2784
2998
  const timeoutMs = gadget.timeoutMs ?? this.defaultGadgetTimeoutMs;
2785
- let result;
2999
+ const abortController = new AbortController();
3000
+ let callbackCost = 0;
3001
+ const reportCost = (amount) => {
3002
+ if (amount > 0) {
3003
+ callbackCost += amount;
3004
+ this.logger.debug("Gadget reported cost via callback", {
3005
+ gadgetName: call.gadgetName,
3006
+ amount,
3007
+ totalCallbackCost: callbackCost
3008
+ });
3009
+ }
3010
+ };
3011
+ const ctx = {
3012
+ reportCost,
3013
+ llmist: this.client ? new CostReportingLLMistWrapper(this.client, reportCost) : void 0,
3014
+ signal: abortController.signal
3015
+ };
3016
+ let rawResult;
2786
3017
  if (timeoutMs && timeoutMs > 0) {
2787
3018
  this.logger.debug("Executing gadget with timeout", {
2788
3019
  gadgetName: call.gadgetName,
2789
3020
  timeoutMs
2790
3021
  });
2791
- result = await Promise.race([
2792
- Promise.resolve(gadget.execute(validatedParameters)),
2793
- this.createTimeoutPromise(call.gadgetName, timeoutMs)
3022
+ rawResult = await Promise.race([
3023
+ Promise.resolve(gadget.execute(validatedParameters, ctx)),
3024
+ this.createTimeoutPromise(call.gadgetName, timeoutMs, abortController)
2794
3025
  ]);
2795
3026
  } else {
2796
- result = await Promise.resolve(gadget.execute(validatedParameters));
3027
+ rawResult = await Promise.resolve(gadget.execute(validatedParameters, ctx));
2797
3028
  }
3029
+ const { result, cost: returnCost } = this.normalizeExecuteResult(rawResult);
3030
+ const totalCost = callbackCost + returnCost;
2798
3031
  const executionTimeMs = Date.now() - startTime;
2799
3032
  this.logger.info("Gadget executed successfully", {
2800
3033
  gadgetName: call.gadgetName,
2801
3034
  invocationId: call.invocationId,
2802
- executionTimeMs
3035
+ executionTimeMs,
3036
+ cost: totalCost > 0 ? totalCost : void 0,
3037
+ callbackCost: callbackCost > 0 ? callbackCost : void 0,
3038
+ returnCost: returnCost > 0 ? returnCost : void 0
2803
3039
  });
2804
3040
  this.logger.debug("Gadget result", {
2805
3041
  gadgetName: call.gadgetName,
2806
3042
  invocationId: call.invocationId,
2807
3043
  parameters: validatedParameters,
2808
3044
  result,
3045
+ cost: totalCost,
2809
3046
  executionTimeMs
2810
3047
  });
2811
3048
  return {
@@ -2813,7 +3050,8 @@ var init_executor = __esm({
2813
3050
  invocationId: call.invocationId,
2814
3051
  parameters: validatedParameters,
2815
3052
  result,
2816
- executionTimeMs
3053
+ executionTimeMs,
3054
+ cost: totalCost
2817
3055
  };
2818
3056
  } catch (error) {
2819
3057
  if (error instanceof BreakLoopException) {
@@ -2844,6 +3082,19 @@ var init_executor = __esm({
2844
3082
  executionTimeMs: Date.now() - startTime
2845
3083
  };
2846
3084
  }
3085
+ if (error instanceof AbortError) {
3086
+ this.logger.info("Gadget execution was aborted", {
3087
+ gadgetName: call.gadgetName,
3088
+ executionTimeMs: Date.now() - startTime
3089
+ });
3090
+ return {
3091
+ gadgetName: call.gadgetName,
3092
+ invocationId: call.invocationId,
3093
+ parameters: validatedParameters,
3094
+ error: error.message,
3095
+ executionTimeMs: Date.now() - startTime
3096
+ };
3097
+ }
2847
3098
  if (error instanceof HumanInputException) {
2848
3099
  this.logger.info("Gadget requested human input", {
2849
3100
  gadgetName: call.gadgetName,
@@ -2970,7 +3221,8 @@ var init_stream_processor = __esm({
2970
3221
  options.onHumanInputRequired,
2971
3222
  this.logger.getSubLogger({ name: "executor" }),
2972
3223
  options.defaultGadgetTimeoutMs,
2973
- { argPrefix: options.gadgetArgPrefix }
3224
+ { argPrefix: options.gadgetArgPrefix },
3225
+ options.client
2974
3226
  );
2975
3227
  }
2976
3228
  /**
@@ -3237,6 +3489,7 @@ var init_stream_processor = __esm({
3237
3489
  error: result.error,
3238
3490
  executionTimeMs: result.executionTimeMs,
3239
3491
  breaksLoop: result.breaksLoop,
3492
+ cost: result.cost,
3240
3493
  logger: this.logger
3241
3494
  };
3242
3495
  await this.hooks.observers.onGadgetExecutionComplete(context);
@@ -3613,7 +3866,8 @@ var init_agent = __esm({
3613
3866
  onHumanInputRequired: this.onHumanInputRequired,
3614
3867
  stopOnGadgetError: this.stopOnGadgetError,
3615
3868
  shouldContinueAfterError: this.shouldContinueAfterError,
3616
- defaultGadgetTimeoutMs: this.defaultGadgetTimeoutMs
3869
+ defaultGadgetTimeoutMs: this.defaultGadgetTimeoutMs,
3870
+ client: this.client
3617
3871
  });
3618
3872
  const result = await processor.process(stream2);
3619
3873
  for (const output of result.outputs) {
@@ -3917,6 +4171,7 @@ var init_builder = __esm({
3917
4171
  gadgetOutputLimitPercent;
3918
4172
  compactionConfig;
3919
4173
  signal;
4174
+ trailingMessage;
3920
4175
  constructor(client) {
3921
4176
  this.client = client;
3922
4177
  }
@@ -4392,6 +4647,31 @@ var init_builder = __esm({
4392
4647
  this.signal = signal;
4393
4648
  return this;
4394
4649
  }
4650
+ /**
4651
+ * Add an ephemeral trailing message that appears at the end of each LLM request.
4652
+ *
4653
+ * The message is NOT persisted to conversation history - it only appears in the
4654
+ * current LLM call. This is useful for injecting context-specific instructions
4655
+ * or reminders without polluting the conversation history.
4656
+ *
4657
+ * @param message - Static string or function that generates the message
4658
+ * @returns This builder for chaining
4659
+ *
4660
+ * @example
4661
+ * ```typescript
4662
+ * // Static message
4663
+ * .withTrailingMessage("Always respond in JSON format.")
4664
+ *
4665
+ * // Dynamic message based on iteration
4666
+ * .withTrailingMessage((ctx) =>
4667
+ * `[Iteration ${ctx.iteration}/${ctx.maxIterations}] Stay focused on the task.`
4668
+ * )
4669
+ * ```
4670
+ */
4671
+ withTrailingMessage(message) {
4672
+ this.trailingMessage = message;
4673
+ return this;
4674
+ }
4395
4675
  /**
4396
4676
  * Add a synthetic gadget call to the conversation history.
4397
4677
  *
@@ -4433,6 +4713,36 @@ ${endPrefix}`
4433
4713
  });
4434
4714
  return this;
4435
4715
  }
4716
+ /**
4717
+ * Compose the final hooks, including trailing message if configured.
4718
+ */
4719
+ composeHooks() {
4720
+ if (!this.trailingMessage) {
4721
+ return this.hooks;
4722
+ }
4723
+ const trailingMsg = this.trailingMessage;
4724
+ const existingBeforeLLMCall = this.hooks?.controllers?.beforeLLMCall;
4725
+ const trailingMessageController = async (ctx) => {
4726
+ const result = existingBeforeLLMCall ? await existingBeforeLLMCall(ctx) : { action: "proceed" };
4727
+ if (result.action === "skip") {
4728
+ return result;
4729
+ }
4730
+ const messages = [...result.modifiedOptions?.messages || ctx.options.messages];
4731
+ const content = typeof trailingMsg === "function" ? trailingMsg({ iteration: ctx.iteration, maxIterations: ctx.maxIterations }) : trailingMsg;
4732
+ messages.push({ role: "user", content });
4733
+ return {
4734
+ action: "proceed",
4735
+ modifiedOptions: { ...result.modifiedOptions, messages }
4736
+ };
4737
+ };
4738
+ return {
4739
+ ...this.hooks,
4740
+ controllers: {
4741
+ ...this.hooks?.controllers,
4742
+ beforeLLMCall: trailingMessageController
4743
+ }
4744
+ };
4745
+ }
4436
4746
  /**
4437
4747
  * Format parameters as block format with JSON Pointer paths.
4438
4748
  */
@@ -4494,7 +4804,7 @@ ${endPrefix}`
4494
4804
  maxIterations: this.maxIterations,
4495
4805
  temperature: this.temperature,
4496
4806
  logger: this.logger,
4497
- hooks: this.hooks,
4807
+ hooks: this.composeHooks(),
4498
4808
  promptConfig: this.promptConfig,
4499
4809
  initialMessages: this.initialMessages,
4500
4810
  onHumanInputRequired: this.onHumanInputRequired,
@@ -4598,7 +4908,7 @@ ${endPrefix}`
4598
4908
  maxIterations: this.maxIterations,
4599
4909
  temperature: this.temperature,
4600
4910
  logger: this.logger,
4601
- hooks: this.hooks,
4911
+ hooks: this.composeHooks(),
4602
4912
  promptConfig: this.promptConfig,
4603
4913
  initialMessages: this.initialMessages,
4604
4914
  onHumanInputRequired: this.onHumanInputRequired,
@@ -6644,10 +6954,18 @@ async function testGadget(gadget, params, options) {
6644
6954
  validatedParams = validationResult.data;
6645
6955
  }
6646
6956
  try {
6647
- const result = await Promise.resolve(gadget.execute(validatedParams));
6957
+ const rawResult = await Promise.resolve(gadget.execute(validatedParams));
6958
+ if (typeof rawResult === "string") {
6959
+ return {
6960
+ result: rawResult,
6961
+ validatedParams,
6962
+ cost: 0
6963
+ };
6964
+ }
6648
6965
  return {
6649
- result,
6650
- validatedParams
6966
+ result: rawResult.result,
6967
+ validatedParams,
6968
+ cost: rawResult.cost ?? 0
6651
6969
  };
6652
6970
  } catch (error) {
6653
6971
  return {
@@ -7956,6 +8274,10 @@ export {
7956
8274
  init_prompt_config,
7957
8275
  LLMMessageBuilder,
7958
8276
  init_messages,
8277
+ BreakLoopException,
8278
+ HumanInputException,
8279
+ AbortError,
8280
+ init_exceptions,
7959
8281
  createLogger,
7960
8282
  defaultLogger,
7961
8283
  init_logger,
@@ -7985,9 +8307,6 @@ export {
7985
8307
  collectEvents,
7986
8308
  collectText,
7987
8309
  init_event_handlers,
7988
- BreakLoopException,
7989
- HumanInputException,
7990
- init_exceptions,
7991
8310
  StreamParser,
7992
8311
  init_parser,
7993
8312
  GadgetExecutor,
@@ -8060,4 +8379,4 @@ export {
8060
8379
  MockPromptRecorder,
8061
8380
  waitFor
8062
8381
  };
8063
- //# sourceMappingURL=chunk-T3DIKQWU.js.map
8382
+ //# sourceMappingURL=chunk-LBHWVCZ2.js.map