llmist 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  createGadget
4
- } from "./chunk-MO5ONHPZ.js";
4
+ } from "./chunk-I55AV3WV.js";
5
5
  import {
6
6
  AgentBuilder,
7
7
  BaseGadget,
@@ -11,6 +11,7 @@ import {
11
11
  HumanInputException,
12
12
  LLMMessageBuilder,
13
13
  LLMist,
14
+ MODEL_ALIASES,
14
15
  createLogger,
15
16
  init_builder,
16
17
  init_client,
@@ -21,14 +22,15 @@ import {
21
22
  init_model_shortcuts,
22
23
  init_registry,
23
24
  resolveModel
24
- } from "./chunk-J3NCIWMY.js";
25
+ } from "./chunk-VYBRYR2S.js";
25
26
 
26
27
  // src/cli/constants.ts
27
28
  var CLI_NAME = "llmist";
28
29
  var CLI_DESCRIPTION = "Command line utilities for llmist agents and direct LLM access.";
29
30
  var COMMANDS = {
30
31
  complete: "complete",
31
- agent: "agent"
32
+ agent: "agent",
33
+ models: "models"
32
34
  };
33
35
  var LOG_LEVELS = ["silly", "trace", "debug", "info", "warn", "error", "fatal"];
34
36
  var DEFAULT_MODEL = "openai:gpt-5-nano";
@@ -46,7 +48,7 @@ var OPTION_FLAGS = {
46
48
  noBuiltins: "--no-builtins"
47
49
  };
48
50
  var OPTION_DESCRIPTIONS = {
49
- model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-3-5-sonnet-latest.",
51
+ model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-sonnet-4-5.",
50
52
  systemPrompt: "Optional system prompt prepended to the conversation.",
51
53
  temperature: "Sampling temperature between 0 and 2.",
52
54
  maxTokens: "Maximum number of output tokens requested from the model.",
@@ -65,7 +67,7 @@ import { Command, InvalidArgumentError as InvalidArgumentError3 } from "commande
65
67
  // package.json
66
68
  var package_default = {
67
69
  name: "llmist",
68
- version: "0.2.0",
70
+ version: "0.2.1",
69
71
  description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
70
72
  type: "module",
71
73
  main: "dist/index.cjs",
@@ -683,8 +685,7 @@ function renderSummary(metadata) {
683
685
  if (parts.length === 0) {
684
686
  return null;
685
687
  }
686
- return `${chalk.dim("\u2500".repeat(40))}
687
- ${parts.join(chalk.dim(" \u2502 "))}`;
688
+ return parts.join(chalk.dim(" \u2502 "));
688
689
  }
689
690
  async function executeAction(action, env) {
690
691
  try {
@@ -943,11 +944,171 @@ function registerCompleteCommand(program, env) {
943
944
  );
944
945
  }
945
946
 
947
+ // src/cli/models-command.ts
948
+ import chalk3 from "chalk";
949
+ init_model_shortcuts();
950
+ async function handleModelsCommand(options, env) {
951
+ const client = env.createClient();
952
+ const models = client.modelRegistry.listModels(options.provider);
953
+ if (options.format === "json") {
954
+ renderJSON(models, env.stdout);
955
+ } else {
956
+ renderTable(models, options.verbose || false, env.stdout);
957
+ }
958
+ }
959
+ function renderTable(models, verbose, stream) {
960
+ const grouped = /* @__PURE__ */ new Map();
961
+ for (const model of models) {
962
+ const provider = model.provider;
963
+ if (!grouped.has(provider)) {
964
+ grouped.set(provider, []);
965
+ }
966
+ grouped.get(provider).push(model);
967
+ }
968
+ stream.write(chalk3.bold.cyan("\nAvailable Models\n"));
969
+ stream.write(chalk3.cyan("=".repeat(80)) + "\n\n");
970
+ const providers = Array.from(grouped.keys()).sort();
971
+ for (const provider of providers) {
972
+ const providerModels = grouped.get(provider);
973
+ const providerName = provider.charAt(0).toUpperCase() + provider.slice(1);
974
+ stream.write(chalk3.bold.yellow(`${providerName} Models
975
+ `));
976
+ if (verbose) {
977
+ renderVerboseTable(providerModels, stream);
978
+ } else {
979
+ renderCompactTable(providerModels, stream);
980
+ }
981
+ stream.write("\n");
982
+ }
983
+ stream.write(chalk3.bold.magenta("Model Shortcuts\n"));
984
+ stream.write(chalk3.dim("\u2500".repeat(80)) + "\n");
985
+ const shortcuts = Object.entries(MODEL_ALIASES).sort((a, b) => a[0].localeCompare(b[0]));
986
+ for (const [shortcut, fullName] of shortcuts) {
987
+ stream.write(chalk3.cyan(` ${shortcut.padEnd(15)}`) + chalk3.dim(" \u2192 ") + chalk3.white(fullName) + "\n");
988
+ }
989
+ stream.write("\n");
990
+ }
991
+ function renderCompactTable(models, stream) {
992
+ const idWidth = 25;
993
+ const nameWidth = 22;
994
+ const contextWidth = 13;
995
+ const inputWidth = 10;
996
+ const outputWidth = 10;
997
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
998
+ stream.write(
999
+ chalk3.bold(
1000
+ "Model ID".padEnd(idWidth) + " " + "Display Name".padEnd(nameWidth) + " " + "Context".padEnd(contextWidth) + " " + "Input".padEnd(inputWidth) + " " + "Output".padEnd(outputWidth)
1001
+ ) + "\n"
1002
+ );
1003
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1004
+ for (const model of models) {
1005
+ const contextFormatted = formatTokens(model.contextWindow);
1006
+ const inputPrice = `$${model.pricing.input.toFixed(2)}`;
1007
+ const outputPrice = `$${model.pricing.output.toFixed(2)}`;
1008
+ stream.write(
1009
+ chalk3.green(model.modelId.padEnd(idWidth)) + " " + chalk3.white(model.displayName.padEnd(nameWidth)) + " " + chalk3.yellow(contextFormatted.padEnd(contextWidth)) + " " + chalk3.cyan(inputPrice.padEnd(inputWidth)) + " " + chalk3.cyan(outputPrice.padEnd(outputWidth)) + "\n"
1010
+ );
1011
+ }
1012
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1013
+ stream.write(chalk3.dim(` * Prices are per 1M tokens
1014
+ `));
1015
+ }
1016
+ function renderVerboseTable(models, stream) {
1017
+ for (const model of models) {
1018
+ stream.write(chalk3.bold.green(`
1019
+ ${model.modelId}
1020
+ `));
1021
+ stream.write(chalk3.dim(" " + "\u2500".repeat(60)) + "\n");
1022
+ stream.write(` ${chalk3.dim("Name:")} ${chalk3.white(model.displayName)}
1023
+ `);
1024
+ stream.write(` ${chalk3.dim("Context:")} ${chalk3.yellow(formatTokens(model.contextWindow))}
1025
+ `);
1026
+ stream.write(` ${chalk3.dim("Max Output:")} ${chalk3.yellow(formatTokens(model.maxOutputTokens))}
1027
+ `);
1028
+ stream.write(` ${chalk3.dim("Pricing:")} ${chalk3.cyan(`$${model.pricing.input.toFixed(2)} input`)} ${chalk3.dim("/")} ${chalk3.cyan(`$${model.pricing.output.toFixed(2)} output`)} ${chalk3.dim("(per 1M tokens)")}
1029
+ `);
1030
+ if (model.pricing.cachedInput !== void 0) {
1031
+ stream.write(` ${chalk3.dim("Cached Input:")} ${chalk3.cyan(`$${model.pricing.cachedInput.toFixed(2)} per 1M tokens`)}
1032
+ `);
1033
+ }
1034
+ if (model.knowledgeCutoff) {
1035
+ stream.write(` ${chalk3.dim("Knowledge:")} ${model.knowledgeCutoff}
1036
+ `);
1037
+ }
1038
+ const features = [];
1039
+ if (model.features.streaming) features.push("streaming");
1040
+ if (model.features.functionCalling) features.push("function-calling");
1041
+ if (model.features.vision) features.push("vision");
1042
+ if (model.features.reasoning) features.push("reasoning");
1043
+ if (model.features.structuredOutputs) features.push("structured-outputs");
1044
+ if (model.features.fineTuning) features.push("fine-tuning");
1045
+ if (features.length > 0) {
1046
+ stream.write(` ${chalk3.dim("Features:")} ${chalk3.blue(features.join(", "))}
1047
+ `);
1048
+ }
1049
+ if (model.metadata) {
1050
+ if (model.metadata.family) {
1051
+ stream.write(` ${chalk3.dim("Family:")} ${model.metadata.family}
1052
+ `);
1053
+ }
1054
+ if (model.metadata.releaseDate) {
1055
+ stream.write(` ${chalk3.dim("Released:")} ${model.metadata.releaseDate}
1056
+ `);
1057
+ }
1058
+ if (model.metadata.notes) {
1059
+ stream.write(` ${chalk3.dim("Notes:")} ${chalk3.italic(model.metadata.notes)}
1060
+ `);
1061
+ }
1062
+ }
1063
+ }
1064
+ stream.write("\n");
1065
+ }
1066
+ function renderJSON(models, stream) {
1067
+ const output = {
1068
+ models: models.map((model) => ({
1069
+ provider: model.provider,
1070
+ modelId: model.modelId,
1071
+ displayName: model.displayName,
1072
+ contextWindow: model.contextWindow,
1073
+ maxOutputTokens: model.maxOutputTokens,
1074
+ pricing: {
1075
+ input: model.pricing.input,
1076
+ output: model.pricing.output,
1077
+ cachedInput: model.pricing.cachedInput,
1078
+ currency: "USD",
1079
+ per: "1M tokens"
1080
+ },
1081
+ knowledgeCutoff: model.knowledgeCutoff,
1082
+ features: model.features,
1083
+ metadata: model.metadata
1084
+ })),
1085
+ shortcuts: MODEL_ALIASES
1086
+ };
1087
+ stream.write(JSON.stringify(output, null, 2) + "\n");
1088
+ }
1089
+ function formatTokens(count) {
1090
+ if (count >= 1e6) {
1091
+ return `${(count / 1e6).toFixed(1)}M tokens`;
1092
+ } else if (count >= 1e3) {
1093
+ return `${(count / 1e3).toFixed(0)}K tokens`;
1094
+ } else {
1095
+ return `${count} tokens`;
1096
+ }
1097
+ }
1098
+ function registerModelsCommand(program, env) {
1099
+ program.command(COMMANDS.models).description("List all available LLM models with pricing and capabilities.").option("--provider <name>", "Filter by provider (openai, anthropic, gemini)").option("--format <format>", "Output format: table or json", "table").option("--verbose", "Show detailed model information", false).action(
1100
+ (options) => executeAction(
1101
+ () => handleModelsCommand(options, env),
1102
+ env
1103
+ )
1104
+ );
1105
+ }
1106
+
946
1107
  // src/cli/environment.ts
947
1108
  init_client();
948
1109
  init_logger();
949
1110
  import readline from "node:readline";
950
- import chalk3 from "chalk";
1111
+ import chalk4 from "chalk";
951
1112
  var LOG_LEVEL_MAP = {
952
1113
  silly: 0,
953
1114
  trace: 1,
@@ -991,14 +1152,14 @@ function createPromptFunction(stdin, stdout) {
991
1152
  output: stdout
992
1153
  });
993
1154
  stdout.write("\n");
994
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1155
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
995
1156
  `);
996
- stdout.write(chalk3.cyan.bold("\u{1F916} Agent asks:\n"));
1157
+ stdout.write(chalk4.cyan.bold("\u{1F916} Agent asks:\n"));
997
1158
  stdout.write(`${question}
998
1159
  `);
999
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1160
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
1000
1161
  `);
1001
- rl.question(chalk3.green.bold("You: "), (answer) => {
1162
+ rl.question(chalk4.green.bold("You: "), (answer) => {
1002
1163
  rl.close();
1003
1164
  resolve(answer);
1004
1165
  });
@@ -1041,6 +1202,7 @@ function createProgram(env) {
1041
1202
  });
1042
1203
  registerCompleteCommand(program, env);
1043
1204
  registerAgentCommand(program, env);
1205
+ registerModelsCommand(program, env);
1044
1206
  return program;
1045
1207
  }
1046
1208
  async function runCLI(overrides = {}) {