llmist 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  createGadget
4
- } from "./chunk-MO5ONHPZ.js";
4
+ } from "./chunk-I55AV3WV.js";
5
5
  import {
6
6
  AgentBuilder,
7
7
  BaseGadget,
@@ -11,6 +11,7 @@ import {
11
11
  HumanInputException,
12
12
  LLMMessageBuilder,
13
13
  LLMist,
14
+ MODEL_ALIASES,
14
15
  createLogger,
15
16
  init_builder,
16
17
  init_client,
@@ -21,14 +22,15 @@ import {
21
22
  init_model_shortcuts,
22
23
  init_registry,
23
24
  resolveModel
24
- } from "./chunk-J3NCIWMY.js";
25
+ } from "./chunk-VYBRYR2S.js";
25
26
 
26
27
  // src/cli/constants.ts
27
28
  var CLI_NAME = "llmist";
28
29
  var CLI_DESCRIPTION = "Command line utilities for llmist agents and direct LLM access.";
29
30
  var COMMANDS = {
30
31
  complete: "complete",
31
- agent: "agent"
32
+ agent: "agent",
33
+ models: "models"
32
34
  };
33
35
  var LOG_LEVELS = ["silly", "trace", "debug", "info", "warn", "error", "fatal"];
34
36
  var DEFAULT_MODEL = "openai:gpt-5-nano";
@@ -46,7 +48,7 @@ var OPTION_FLAGS = {
46
48
  noBuiltins: "--no-builtins"
47
49
  };
48
50
  var OPTION_DESCRIPTIONS = {
49
- model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-3-5-sonnet-latest.",
51
+ model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-sonnet-4-5.",
50
52
  systemPrompt: "Optional system prompt prepended to the conversation.",
51
53
  temperature: "Sampling temperature between 0 and 2.",
52
54
  maxTokens: "Maximum number of output tokens requested from the model.",
@@ -943,11 +945,171 @@ function registerCompleteCommand(program, env) {
943
945
  );
944
946
  }
945
947
 
948
+ // src/cli/models-command.ts
949
+ import chalk3 from "chalk";
950
+ init_model_shortcuts();
951
+ async function handleModelsCommand(options, env) {
952
+ const client = env.createClient();
953
+ const models = client.modelRegistry.listModels(options.provider);
954
+ if (options.format === "json") {
955
+ renderJSON(models, env.stdout);
956
+ } else {
957
+ renderTable(models, options.verbose || false, env.stdout);
958
+ }
959
+ }
960
+ function renderTable(models, verbose, stream) {
961
+ const grouped = /* @__PURE__ */ new Map();
962
+ for (const model of models) {
963
+ const provider = model.provider;
964
+ if (!grouped.has(provider)) {
965
+ grouped.set(provider, []);
966
+ }
967
+ grouped.get(provider).push(model);
968
+ }
969
+ stream.write(chalk3.bold.cyan("\nAvailable Models\n"));
970
+ stream.write(chalk3.cyan("=".repeat(80)) + "\n\n");
971
+ const providers = Array.from(grouped.keys()).sort();
972
+ for (const provider of providers) {
973
+ const providerModels = grouped.get(provider);
974
+ const providerName = provider.charAt(0).toUpperCase() + provider.slice(1);
975
+ stream.write(chalk3.bold.yellow(`${providerName} Models
976
+ `));
977
+ if (verbose) {
978
+ renderVerboseTable(providerModels, stream);
979
+ } else {
980
+ renderCompactTable(providerModels, stream);
981
+ }
982
+ stream.write("\n");
983
+ }
984
+ stream.write(chalk3.bold.magenta("Model Shortcuts\n"));
985
+ stream.write(chalk3.dim("\u2500".repeat(80)) + "\n");
986
+ const shortcuts = Object.entries(MODEL_ALIASES).sort((a, b) => a[0].localeCompare(b[0]));
987
+ for (const [shortcut, fullName] of shortcuts) {
988
+ stream.write(chalk3.cyan(` ${shortcut.padEnd(15)}`) + chalk3.dim(" \u2192 ") + chalk3.white(fullName) + "\n");
989
+ }
990
+ stream.write("\n");
991
+ }
992
+ function renderCompactTable(models, stream) {
993
+ const idWidth = 25;
994
+ const nameWidth = 22;
995
+ const contextWidth = 13;
996
+ const inputWidth = 10;
997
+ const outputWidth = 10;
998
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
999
+ stream.write(
1000
+ chalk3.bold(
1001
+ "Model ID".padEnd(idWidth) + " " + "Display Name".padEnd(nameWidth) + " " + "Context".padEnd(contextWidth) + " " + "Input".padEnd(inputWidth) + " " + "Output".padEnd(outputWidth)
1002
+ ) + "\n"
1003
+ );
1004
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1005
+ for (const model of models) {
1006
+ const contextFormatted = formatTokens(model.contextWindow);
1007
+ const inputPrice = `$${model.pricing.input.toFixed(2)}`;
1008
+ const outputPrice = `$${model.pricing.output.toFixed(2)}`;
1009
+ stream.write(
1010
+ chalk3.green(model.modelId.padEnd(idWidth)) + " " + chalk3.white(model.displayName.padEnd(nameWidth)) + " " + chalk3.yellow(contextFormatted.padEnd(contextWidth)) + " " + chalk3.cyan(inputPrice.padEnd(inputWidth)) + " " + chalk3.cyan(outputPrice.padEnd(outputWidth)) + "\n"
1011
+ );
1012
+ }
1013
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1014
+ stream.write(chalk3.dim(` * Prices are per 1M tokens
1015
+ `));
1016
+ }
1017
+ function renderVerboseTable(models, stream) {
1018
+ for (const model of models) {
1019
+ stream.write(chalk3.bold.green(`
1020
+ ${model.modelId}
1021
+ `));
1022
+ stream.write(chalk3.dim(" " + "\u2500".repeat(60)) + "\n");
1023
+ stream.write(` ${chalk3.dim("Name:")} ${chalk3.white(model.displayName)}
1024
+ `);
1025
+ stream.write(` ${chalk3.dim("Context:")} ${chalk3.yellow(formatTokens(model.contextWindow))}
1026
+ `);
1027
+ stream.write(` ${chalk3.dim("Max Output:")} ${chalk3.yellow(formatTokens(model.maxOutputTokens))}
1028
+ `);
1029
+ stream.write(` ${chalk3.dim("Pricing:")} ${chalk3.cyan(`$${model.pricing.input.toFixed(2)} input`)} ${chalk3.dim("/")} ${chalk3.cyan(`$${model.pricing.output.toFixed(2)} output`)} ${chalk3.dim("(per 1M tokens)")}
1030
+ `);
1031
+ if (model.pricing.cachedInput !== void 0) {
1032
+ stream.write(` ${chalk3.dim("Cached Input:")} ${chalk3.cyan(`$${model.pricing.cachedInput.toFixed(2)} per 1M tokens`)}
1033
+ `);
1034
+ }
1035
+ if (model.knowledgeCutoff) {
1036
+ stream.write(` ${chalk3.dim("Knowledge:")} ${model.knowledgeCutoff}
1037
+ `);
1038
+ }
1039
+ const features = [];
1040
+ if (model.features.streaming) features.push("streaming");
1041
+ if (model.features.functionCalling) features.push("function-calling");
1042
+ if (model.features.vision) features.push("vision");
1043
+ if (model.features.reasoning) features.push("reasoning");
1044
+ if (model.features.structuredOutputs) features.push("structured-outputs");
1045
+ if (model.features.fineTuning) features.push("fine-tuning");
1046
+ if (features.length > 0) {
1047
+ stream.write(` ${chalk3.dim("Features:")} ${chalk3.blue(features.join(", "))}
1048
+ `);
1049
+ }
1050
+ if (model.metadata) {
1051
+ if (model.metadata.family) {
1052
+ stream.write(` ${chalk3.dim("Family:")} ${model.metadata.family}
1053
+ `);
1054
+ }
1055
+ if (model.metadata.releaseDate) {
1056
+ stream.write(` ${chalk3.dim("Released:")} ${model.metadata.releaseDate}
1057
+ `);
1058
+ }
1059
+ if (model.metadata.notes) {
1060
+ stream.write(` ${chalk3.dim("Notes:")} ${chalk3.italic(model.metadata.notes)}
1061
+ `);
1062
+ }
1063
+ }
1064
+ }
1065
+ stream.write("\n");
1066
+ }
1067
+ function renderJSON(models, stream) {
1068
+ const output = {
1069
+ models: models.map((model) => ({
1070
+ provider: model.provider,
1071
+ modelId: model.modelId,
1072
+ displayName: model.displayName,
1073
+ contextWindow: model.contextWindow,
1074
+ maxOutputTokens: model.maxOutputTokens,
1075
+ pricing: {
1076
+ input: model.pricing.input,
1077
+ output: model.pricing.output,
1078
+ cachedInput: model.pricing.cachedInput,
1079
+ currency: "USD",
1080
+ per: "1M tokens"
1081
+ },
1082
+ knowledgeCutoff: model.knowledgeCutoff,
1083
+ features: model.features,
1084
+ metadata: model.metadata
1085
+ })),
1086
+ shortcuts: MODEL_ALIASES
1087
+ };
1088
+ stream.write(JSON.stringify(output, null, 2) + "\n");
1089
+ }
1090
+ function formatTokens(count) {
1091
+ if (count >= 1e6) {
1092
+ return `${(count / 1e6).toFixed(1)}M tokens`;
1093
+ } else if (count >= 1e3) {
1094
+ return `${(count / 1e3).toFixed(0)}K tokens`;
1095
+ } else {
1096
+ return `${count} tokens`;
1097
+ }
1098
+ }
1099
+ function registerModelsCommand(program, env) {
1100
+ program.command(COMMANDS.models).description("List all available LLM models with pricing and capabilities.").option("--provider <name>", "Filter by provider (openai, anthropic, gemini)").option("--format <format>", "Output format: table or json", "table").option("--verbose", "Show detailed model information", false).action(
1101
+ (options) => executeAction(
1102
+ () => handleModelsCommand(options, env),
1103
+ env
1104
+ )
1105
+ );
1106
+ }
1107
+
946
1108
  // src/cli/environment.ts
947
1109
  init_client();
948
1110
  init_logger();
949
1111
  import readline from "node:readline";
950
- import chalk3 from "chalk";
1112
+ import chalk4 from "chalk";
951
1113
  var LOG_LEVEL_MAP = {
952
1114
  silly: 0,
953
1115
  trace: 1,
@@ -991,14 +1153,14 @@ function createPromptFunction(stdin, stdout) {
991
1153
  output: stdout
992
1154
  });
993
1155
  stdout.write("\n");
994
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1156
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
995
1157
  `);
996
- stdout.write(chalk3.cyan.bold("\u{1F916} Agent asks:\n"));
1158
+ stdout.write(chalk4.cyan.bold("\u{1F916} Agent asks:\n"));
997
1159
  stdout.write(`${question}
998
1160
  `);
999
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1161
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
1000
1162
  `);
1001
- rl.question(chalk3.green.bold("You: "), (answer) => {
1163
+ rl.question(chalk4.green.bold("You: "), (answer) => {
1002
1164
  rl.close();
1003
1165
  resolve(answer);
1004
1166
  });
@@ -1041,6 +1203,7 @@ function createProgram(env) {
1041
1203
  });
1042
1204
  registerCompleteCommand(program, env);
1043
1205
  registerAgentCommand(program, env);
1206
+ registerModelsCommand(program, env);
1044
1207
  return program;
1045
1208
  }
1046
1209
  async function runCLI(overrides = {}) {