llmist 0.3.1 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  createGadget
4
- } from "./chunk-I55AV3WV.js";
4
+ } from "./chunk-QVDGTUQN.js";
5
5
  import {
6
6
  AgentBuilder,
7
7
  BaseGadget,
@@ -22,7 +22,7 @@ import {
22
22
  init_model_shortcuts,
23
23
  init_registry,
24
24
  resolveModel
25
- } from "./chunk-VYBRYR2S.js";
25
+ } from "./chunk-LQE7TKKW.js";
26
26
 
27
27
  // src/cli/constants.ts
28
28
  var CLI_NAME = "llmist";
@@ -45,7 +45,8 @@ var OPTION_FLAGS = {
45
45
  parameterFormat: "--parameter-format <format>",
46
46
  logLevel: "--log-level <level>",
47
47
  logFile: "--log-file <path>",
48
- noBuiltins: "--no-builtins"
48
+ noBuiltins: "--no-builtins",
49
+ noBuiltinInteraction: "--no-builtin-interaction"
49
50
  };
50
51
  var OPTION_DESCRIPTIONS = {
51
52
  model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-sonnet-4-5.",
@@ -57,7 +58,8 @@ var OPTION_DESCRIPTIONS = {
57
58
  parameterFormat: "Format for gadget parameter schemas: 'json', 'yaml', or 'auto'.",
58
59
  logLevel: "Log level: silly, trace, debug, info, warn, error, fatal.",
59
60
  logFile: "Path to log file. When set, logs are written to file instead of stderr.",
60
- noBuiltins: "Disable built-in gadgets (AskUser, TellUser)."
61
+ noBuiltins: "Disable built-in gadgets (AskUser, TellUser).",
62
+ noBuiltinInteraction: "Disable interactive gadgets (AskUser) while keeping TellUser."
61
63
  };
62
64
  var SUMMARY_PREFIX = "[llmist]";
63
65
 
@@ -67,7 +69,7 @@ import { Command, InvalidArgumentError as InvalidArgumentError3 } from "commande
67
69
  // package.json
68
70
  var package_default = {
69
71
  name: "llmist",
70
- version: "0.3.0",
72
+ version: "0.4.0",
71
73
  description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
72
74
  type: "module",
73
75
  main: "dist/index.cjs",
@@ -179,7 +181,6 @@ init_builder();
179
181
  init_registry();
180
182
  init_constants();
181
183
  import { createInterface } from "node:readline/promises";
182
- import chalk2 from "chalk";
183
184
  import { InvalidArgumentError as InvalidArgumentError2 } from "commander";
184
185
 
185
186
  // src/cli/builtin-gadgets.ts
@@ -317,8 +318,66 @@ async function loadGadgets(specifiers, cwd, importer = (specifier) => import(spe
317
318
 
318
319
  // src/cli/utils.ts
319
320
  init_constants();
320
- import chalk from "chalk";
321
+ import chalk2 from "chalk";
321
322
  import { InvalidArgumentError } from "commander";
323
+
324
+ // src/cli/ui/formatters.ts
325
+ import chalk from "chalk";
326
+ function formatTokens(tokens) {
327
+ return tokens >= 1e3 ? `${(tokens / 1e3).toFixed(1)}k` : `${tokens}`;
328
+ }
329
+ function formatCost(cost) {
330
+ if (cost < 1e-3) {
331
+ return cost.toFixed(5);
332
+ }
333
+ if (cost < 0.01) {
334
+ return cost.toFixed(4);
335
+ }
336
+ if (cost < 1) {
337
+ return cost.toFixed(3);
338
+ }
339
+ return cost.toFixed(2);
340
+ }
341
+ function renderSummary(metadata) {
342
+ const parts = [];
343
+ if (metadata.iterations !== void 0) {
344
+ parts.push(chalk.cyan(`#${metadata.iterations}`));
345
+ }
346
+ if (metadata.usage) {
347
+ const { inputTokens, outputTokens } = metadata.usage;
348
+ parts.push(chalk.dim("\u2191") + chalk.yellow(` ${formatTokens(inputTokens)}`));
349
+ parts.push(chalk.dim("\u2193") + chalk.green(` ${formatTokens(outputTokens)}`));
350
+ }
351
+ if (metadata.elapsedSeconds !== void 0 && metadata.elapsedSeconds > 0) {
352
+ parts.push(chalk.dim(`${metadata.elapsedSeconds}s`));
353
+ }
354
+ if (metadata.cost !== void 0 && metadata.cost > 0) {
355
+ parts.push(chalk.cyan(`$${formatCost(metadata.cost)}`));
356
+ }
357
+ if (metadata.finishReason) {
358
+ parts.push(chalk.dim(metadata.finishReason));
359
+ }
360
+ if (parts.length === 0) {
361
+ return null;
362
+ }
363
+ return parts.join(chalk.dim(" | "));
364
+ }
365
+ function formatGadgetSummary(result) {
366
+ const gadgetLabel = chalk.magenta.bold(result.gadgetName);
367
+ const timeLabel = chalk.dim(`${Math.round(result.executionTimeMs)}ms`);
368
+ if (result.error) {
369
+ return `${chalk.red("\u2717")} ${gadgetLabel} ${chalk.red("error:")} ${result.error} ${timeLabel}`;
370
+ }
371
+ if (result.breaksLoop) {
372
+ return `${chalk.yellow("\u23F9")} ${gadgetLabel} ${chalk.yellow("finished:")} ${result.result} ${timeLabel}`;
373
+ }
374
+ const maxLen = 80;
375
+ const shouldTruncate = result.gadgetName !== "TellUser";
376
+ const resultText = result.result ? shouldTruncate && result.result.length > maxLen ? `${result.result.slice(0, maxLen)}...` : result.result : "";
377
+ return `${chalk.green("\u2713")} ${gadgetLabel} ${chalk.dim("\u2192")} ${resultText} ${timeLabel}`;
378
+ }
379
+
380
+ // src/cli/utils.ts
322
381
  function createNumericParser({
323
382
  label,
324
383
  integer = false,
@@ -401,15 +460,20 @@ var StreamProgress = class {
401
460
  totalTokens = 0;
402
461
  totalCost = 0;
403
462
  iterations = 0;
463
+ currentIteration = 0;
404
464
  /**
405
465
  * Starts a new LLM call. Switches to streaming mode.
406
466
  * @param model - Model name being used
407
- * @param estimatedInputTokens - Estimated input tokens based on prompt length
467
+ * @param estimatedInputTokens - Initial input token count. Should come from
468
+ * client.countTokens() for accuracy (provider-specific counting), not
469
+ * character-based estimation. Will be updated with provider-returned counts
470
+ * via setInputTokens() during streaming if available.
408
471
  */
409
472
  startCall(model, estimatedInputTokens) {
410
473
  this.mode = "streaming";
411
474
  this.model = model;
412
475
  this.callStartTime = Date.now();
476
+ this.currentIteration++;
413
477
  this.callInputTokens = estimatedInputTokens ?? 0;
414
478
  this.callInputTokensEstimated = true;
415
479
  this.callOutputTokens = 0;
@@ -446,8 +510,10 @@ var StreamProgress = class {
446
510
  }
447
511
  /**
448
512
  * Sets the input token count for current call (from stream metadata).
449
- * @param tokens - Token count
450
- * @param estimated - If true, shown with ~ prefix until actual count arrives
513
+ * @param tokens - Token count from provider or client.countTokens()
514
+ * @param estimated - If true, this is a fallback estimate (character-based).
515
+ * If false, this is an accurate count from the provider API or client.countTokens().
516
+ * Display shows ~ prefix only when estimated=true.
451
517
  */
452
518
  setInputTokens(tokens, estimated = false) {
453
519
  if (estimated && !this.callInputTokensEstimated) {
@@ -458,8 +524,10 @@ var StreamProgress = class {
458
524
  }
459
525
  /**
460
526
  * Sets the output token count for current call (from stream metadata).
461
- * @param tokens - Token count
462
- * @param estimated - If true, shown with ~ prefix until actual count arrives
527
+ * @param tokens - Token count from provider streaming response
528
+ * @param estimated - If true, this is a fallback estimate (character-based).
529
+ * If false, this is an accurate count from the provider's streaming metadata.
530
+ * Display shows ~ prefix only when estimated=true.
463
531
  */
464
532
  setOutputTokens(tokens, estimated = false) {
465
533
  if (estimated && !this.callOutputTokensEstimated) {
@@ -468,6 +536,14 @@ var StreamProgress = class {
468
536
  this.callOutputTokens = tokens;
469
537
  this.callOutputTokensEstimated = estimated;
470
538
  }
539
+ /**
540
+ * Get total elapsed time in seconds since the first call started.
541
+ * @returns Elapsed time in seconds with 1 decimal place
542
+ */
543
+ getTotalElapsedSeconds() {
544
+ if (this.totalStartTime === 0) return 0;
545
+ return Number(((Date.now() - this.totalStartTime) / 1e3).toFixed(1));
546
+ }
471
547
  /**
472
548
  * Starts the progress indicator animation after a brief delay.
473
549
  */
@@ -502,40 +578,38 @@ var StreamProgress = class {
502
578
  const elapsed = ((Date.now() - this.callStartTime) / 1e3).toFixed(1);
503
579
  const outTokens = this.callOutputTokensEstimated ? Math.round(this.callOutputChars / FALLBACK_CHARS_PER_TOKEN) : this.callOutputTokens;
504
580
  const parts = [];
505
- if (this.model) {
506
- parts.push(chalk.cyan(this.model));
507
- }
581
+ parts.push(chalk2.cyan(`#${this.currentIteration}`));
508
582
  if (this.callInputTokens > 0) {
509
583
  const prefix = this.callInputTokensEstimated ? "~" : "";
510
- parts.push(chalk.dim("out:") + chalk.yellow(` ${prefix}${this.callInputTokens}`));
584
+ parts.push(chalk2.dim("\u2191") + chalk2.yellow(` ${prefix}${formatTokens(this.callInputTokens)}`));
511
585
  }
512
586
  if (this.isStreaming || outTokens > 0) {
513
587
  const prefix = this.callOutputTokensEstimated ? "~" : "";
514
- parts.push(chalk.dim("in:") + chalk.green(` ${prefix}${outTokens}`));
588
+ parts.push(chalk2.dim("\u2193") + chalk2.green(` ${prefix}${formatTokens(outTokens)}`));
515
589
  }
590
+ parts.push(chalk2.dim(`${elapsed}s`));
516
591
  if (this.totalCost > 0) {
517
- parts.push(chalk.dim("cost:") + chalk.cyan(` $${this.formatCost(this.totalCost)}`));
592
+ parts.push(chalk2.cyan(`$${formatCost(this.totalCost)}`));
518
593
  }
519
- parts.push(chalk.dim(`${elapsed}s`));
520
- this.target.write(`\r${chalk.cyan(spinner)} ${parts.join(chalk.dim(" | "))}`);
594
+ this.target.write(`\r${chalk2.cyan(spinner)} ${parts.join(chalk2.dim(" | "))}`);
521
595
  }
522
596
  renderCumulativeMode(spinner) {
523
597
  const elapsed = ((Date.now() - this.totalStartTime) / 1e3).toFixed(1);
524
598
  const parts = [];
525
599
  if (this.model) {
526
- parts.push(chalk.cyan(this.model));
600
+ parts.push(chalk2.cyan(this.model));
527
601
  }
528
602
  if (this.totalTokens > 0) {
529
- parts.push(chalk.dim("total:") + chalk.magenta(` ${this.totalTokens}`));
603
+ parts.push(chalk2.dim("total:") + chalk2.magenta(` ${this.totalTokens}`));
530
604
  }
531
605
  if (this.iterations > 0) {
532
- parts.push(chalk.dim("iter:") + chalk.blue(` ${this.iterations}`));
606
+ parts.push(chalk2.dim("iter:") + chalk2.blue(` ${this.iterations}`));
533
607
  }
534
608
  if (this.totalCost > 0) {
535
- parts.push(chalk.dim("cost:") + chalk.cyan(` $${this.formatCost(this.totalCost)}`));
609
+ parts.push(chalk2.dim("cost:") + chalk2.cyan(` $${formatCost(this.totalCost)}`));
536
610
  }
537
- parts.push(chalk.dim(`${elapsed}s`));
538
- this.target.write(`\r${chalk.cyan(spinner)} ${parts.join(chalk.dim(" | "))}`);
611
+ parts.push(chalk2.dim(`${elapsed}s`));
612
+ this.target.write(`\r${chalk2.cyan(spinner)} ${parts.join(chalk2.dim(" | "))}`);
539
613
  }
540
614
  /**
541
615
  * Pauses the progress indicator and clears the line.
@@ -583,49 +657,28 @@ var StreamProgress = class {
583
657
  if (this.callInputTokens > 0) {
584
658
  const prefix = this.callInputTokensEstimated ? "~" : "";
585
659
  parts.push(
586
- chalk.dim("out:") + chalk.yellow(` ${prefix}${this.formatTokens(this.callInputTokens)}`)
660
+ chalk2.dim("\u2191") + chalk2.yellow(` ${prefix}${formatTokens(this.callInputTokens)}`)
587
661
  );
588
662
  }
589
663
  if (outTokens > 0) {
590
664
  const prefix = outEstimated ? "~" : "";
591
- parts.push(chalk.dim("in:") + chalk.green(` ${prefix}${this.formatTokens(outTokens)}`));
665
+ parts.push(chalk2.dim("\u2193") + chalk2.green(` ${prefix}${formatTokens(outTokens)}`));
592
666
  }
593
- parts.push(chalk.dim(`${elapsed}s`));
667
+ parts.push(chalk2.dim(`${elapsed}s`));
594
668
  } else {
595
669
  const elapsed = Math.round((Date.now() - this.totalStartTime) / 1e3);
596
670
  if (this.totalTokens > 0) {
597
- parts.push(chalk.magenta(this.formatTokens(this.totalTokens)));
671
+ parts.push(chalk2.magenta(formatTokens(this.totalTokens)));
598
672
  }
599
673
  if (this.iterations > 0) {
600
- parts.push(chalk.blue(`i${this.iterations}`));
674
+ parts.push(chalk2.blue(`i${this.iterations}`));
601
675
  }
602
676
  if (this.totalCost > 0) {
603
- parts.push(chalk.cyan(`$${this.formatCost(this.totalCost)}`));
677
+ parts.push(chalk2.cyan(`$${formatCost(this.totalCost)}`));
604
678
  }
605
- parts.push(chalk.dim(`${elapsed}s`));
606
- }
607
- return `${parts.join(chalk.dim(" \u2502 "))} ${chalk.green(">")} `;
608
- }
609
- /**
610
- * Formats token count compactly (3625 -> "3.6k").
611
- */
612
- formatTokens(tokens) {
613
- return tokens >= 1e3 ? `${(tokens / 1e3).toFixed(1)}k` : `${tokens}`;
614
- }
615
- /**
616
- * Formats cost compactly (0.0001234 -> "0.00012", 0.1234 -> "0.12", 1.234 -> "1.23").
617
- */
618
- formatCost(cost) {
619
- if (cost < 1e-3) {
620
- return cost.toFixed(5);
621
- }
622
- if (cost < 0.01) {
623
- return cost.toFixed(4);
624
- }
625
- if (cost < 1) {
626
- return cost.toFixed(3);
679
+ parts.push(chalk2.dim(`${elapsed}s`));
627
680
  }
628
- return cost.toFixed(2);
681
+ return `${parts.join(chalk2.dim(" | "))} ${chalk2.green(">")} `;
629
682
  }
630
683
  };
631
684
  async function readStream(stream) {
@@ -655,44 +708,12 @@ async function resolvePrompt(promptArg, env) {
655
708
  }
656
709
  return pipedInput;
657
710
  }
658
- function renderSummary(metadata) {
659
- const parts = [];
660
- if (metadata.iterations !== void 0) {
661
- parts.push(chalk.dim(`iterations: ${metadata.iterations}`));
662
- }
663
- if (metadata.finishReason) {
664
- parts.push(chalk.dim(`finish: ${metadata.finishReason}`));
665
- }
666
- if (metadata.usage) {
667
- const { inputTokens, outputTokens, totalTokens } = metadata.usage;
668
- parts.push(
669
- chalk.dim(`tokens: `) + chalk.cyan(`${totalTokens}`) + chalk.dim(` (in: ${inputTokens}, out: ${outputTokens})`)
670
- );
671
- }
672
- if (metadata.cost !== void 0 && metadata.cost > 0) {
673
- let formattedCost;
674
- if (metadata.cost < 1e-3) {
675
- formattedCost = metadata.cost.toFixed(5);
676
- } else if (metadata.cost < 0.01) {
677
- formattedCost = metadata.cost.toFixed(4);
678
- } else if (metadata.cost < 1) {
679
- formattedCost = metadata.cost.toFixed(3);
680
- } else {
681
- formattedCost = metadata.cost.toFixed(2);
682
- }
683
- parts.push(chalk.dim(`cost: `) + chalk.cyan(`$${formattedCost}`));
684
- }
685
- if (parts.length === 0) {
686
- return null;
687
- }
688
- return parts.join(chalk.dim(" \u2502 "));
689
- }
690
711
  async function executeAction(action, env) {
691
712
  try {
692
713
  await action();
693
714
  } catch (error) {
694
715
  const message = error instanceof Error ? error.message : String(error);
695
- env.stderr.write(`${chalk.red.bold("Error:")} ${message}
716
+ env.stderr.write(`${chalk2.red.bold("Error:")} ${message}
696
717
  `);
697
718
  env.setExitCode(1);
698
719
  }
@@ -735,26 +756,15 @@ ${statsPrompt}` : statsPrompt;
735
756
  }
736
757
  };
737
758
  }
738
- function formatGadgetSummary(result) {
739
- const gadgetLabel = chalk2.magenta.bold(result.gadgetName);
740
- const timeLabel = chalk2.dim(`${Math.round(result.executionTimeMs)}ms`);
741
- if (result.error) {
742
- return `${chalk2.red("\u2717")} ${gadgetLabel} ${chalk2.red("error:")} ${result.error} ${timeLabel}`;
743
- }
744
- if (result.breaksLoop) {
745
- return `${chalk2.yellow("\u23F9")} ${gadgetLabel} ${chalk2.yellow("finished:")} ${result.result} ${timeLabel}`;
746
- }
747
- const maxLen = 80;
748
- const shouldTruncate = result.gadgetName !== "TellUser";
749
- const resultText = result.result ? shouldTruncate && result.result.length > maxLen ? `${result.result.slice(0, maxLen)}...` : result.result : "";
750
- return `${chalk2.green("\u2713")} ${gadgetLabel} ${chalk2.dim("\u2192")} ${resultText} ${timeLabel}`;
751
- }
752
759
  async function handleAgentCommand(promptArg, options, env) {
753
760
  const prompt = await resolvePrompt(promptArg, env);
754
761
  const client = env.createClient();
755
762
  const registry = new GadgetRegistry();
756
763
  if (options.builtins !== false) {
757
764
  for (const gadget of builtinGadgets) {
765
+ if (options.builtinInteraction === false && gadget.name === "AskUser") {
766
+ continue;
767
+ }
758
768
  registry.registerByClass(gadget);
759
769
  }
760
770
  }
@@ -771,16 +781,28 @@ async function handleAgentCommand(promptArg, options, env) {
771
781
  let finishReason;
772
782
  let usage;
773
783
  let iterations = 0;
774
- const estimateMessagesTokens = (messages) => {
775
- const totalChars = messages.reduce((sum, m) => sum + (m.content?.length ?? 0), 0);
776
- return Math.round(totalChars / FALLBACK_CHARS_PER_TOKEN);
784
+ const countMessagesTokens = async (model, messages) => {
785
+ try {
786
+ return await client.countTokens(model, messages);
787
+ } catch {
788
+ const totalChars = messages.reduce((sum, m) => sum + (m.content?.length ?? 0), 0);
789
+ return Math.round(totalChars / FALLBACK_CHARS_PER_TOKEN);
790
+ }
777
791
  };
778
792
  const builder = new AgentBuilder(client).withModel(options.model).withLogger(env.createLogger("llmist:cli:agent")).withHooks({
779
793
  observers: {
794
+ // onLLMCallStart: Start progress indicator for each LLM call
795
+ // This showcases how to react to agent lifecycle events
780
796
  onLLMCallStart: async (context) => {
781
- const estimate = estimateMessagesTokens(context.options.messages);
782
- progress.startCall(context.options.model, estimate);
797
+ const inputTokens = await countMessagesTokens(
798
+ context.options.model,
799
+ context.options.messages
800
+ );
801
+ progress.startCall(context.options.model, inputTokens);
802
+ progress.setInputTokens(inputTokens, false);
783
803
  },
804
+ // onStreamChunk: Real-time updates as LLM generates tokens
805
+ // This enables responsive UIs that show progress during generation
784
806
  onStreamChunk: async (context) => {
785
807
  progress.update(context.accumulatedText.length);
786
808
  if (context.usage) {
@@ -792,10 +814,20 @@ async function handleAgentCommand(promptArg, options, env) {
792
814
  }
793
815
  }
794
816
  },
817
+ // onLLMCallComplete: Finalize metrics after each LLM call
818
+ // This is where you'd typically log metrics or update dashboards
795
819
  onLLMCallComplete: async (context) => {
796
820
  finishReason = context.finishReason;
797
821
  usage = context.usage;
798
822
  iterations = Math.max(iterations, context.iteration + 1);
823
+ if (context.usage) {
824
+ if (context.usage.inputTokens) {
825
+ progress.setInputTokens(context.usage.inputTokens, false);
826
+ }
827
+ if (context.usage.outputTokens) {
828
+ progress.setOutputTokens(context.usage.outputTokens, false);
829
+ }
830
+ }
799
831
  progress.endCall(context.usage);
800
832
  }
801
833
  }
@@ -837,7 +869,8 @@ async function handleAgentCommand(promptArg, options, env) {
837
869
  finishReason,
838
870
  usage,
839
871
  iterations,
840
- cost: progress.getTotalCost()
872
+ cost: progress.getTotalCost(),
873
+ elapsedSeconds: progress.getTotalElapsedSeconds()
841
874
  });
842
875
  if (summary) {
843
876
  env.stderr.write(`${summary}
@@ -864,7 +897,7 @@ function registerAgentCommand(program, env) {
864
897
  OPTION_DESCRIPTIONS.parameterFormat,
865
898
  parseParameterFormat,
866
899
  DEFAULT_PARAMETER_FORMAT
867
- ).option(OPTION_FLAGS.noBuiltins, OPTION_DESCRIPTIONS.noBuiltins).action(
900
+ ).option(OPTION_FLAGS.noBuiltins, OPTION_DESCRIPTIONS.noBuiltins).option(OPTION_FLAGS.noBuiltinInteraction, OPTION_DESCRIPTIONS.noBuiltinInteraction).action(
868
901
  (prompt, options) => executeAction(() => handleAgentCommand(prompt, options, env), env)
869
902
  );
870
903
  }
@@ -1002,7 +1035,7 @@ function renderCompactTable(models, stream) {
1002
1035
  );
1003
1036
  stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1004
1037
  for (const model of models) {
1005
- const contextFormatted = formatTokens(model.contextWindow);
1038
+ const contextFormatted = formatTokens2(model.contextWindow);
1006
1039
  const inputPrice = `$${model.pricing.input.toFixed(2)}`;
1007
1040
  const outputPrice = `$${model.pricing.output.toFixed(2)}`;
1008
1041
  stream.write(
@@ -1021,9 +1054,9 @@ function renderVerboseTable(models, stream) {
1021
1054
  stream.write(chalk3.dim(" " + "\u2500".repeat(60)) + "\n");
1022
1055
  stream.write(` ${chalk3.dim("Name:")} ${chalk3.white(model.displayName)}
1023
1056
  `);
1024
- stream.write(` ${chalk3.dim("Context:")} ${chalk3.yellow(formatTokens(model.contextWindow))}
1057
+ stream.write(` ${chalk3.dim("Context:")} ${chalk3.yellow(formatTokens2(model.contextWindow))}
1025
1058
  `);
1026
- stream.write(` ${chalk3.dim("Max Output:")} ${chalk3.yellow(formatTokens(model.maxOutputTokens))}
1059
+ stream.write(` ${chalk3.dim("Max Output:")} ${chalk3.yellow(formatTokens2(model.maxOutputTokens))}
1027
1060
  `);
1028
1061
  stream.write(` ${chalk3.dim("Pricing:")} ${chalk3.cyan(`$${model.pricing.input.toFixed(2)} input`)} ${chalk3.dim("/")} ${chalk3.cyan(`$${model.pricing.output.toFixed(2)} output`)} ${chalk3.dim("(per 1M tokens)")}
1029
1062
  `);
@@ -1086,7 +1119,7 @@ function renderJSON(models, stream) {
1086
1119
  };
1087
1120
  stream.write(JSON.stringify(output, null, 2) + "\n");
1088
1121
  }
1089
- function formatTokens(count) {
1122
+ function formatTokens2(count) {
1090
1123
  if (count >= 1e6) {
1091
1124
  return `${(count / 1e6).toFixed(1)}M tokens`;
1092
1125
  } else if (count >= 1e3) {