llmist 0.1.6 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  createGadget
4
- } from "./chunk-MO5ONHPZ.js";
4
+ } from "./chunk-I55AV3WV.js";
5
5
  import {
6
6
  AgentBuilder,
7
7
  BaseGadget,
@@ -11,6 +11,7 @@ import {
11
11
  HumanInputException,
12
12
  LLMMessageBuilder,
13
13
  LLMist,
14
+ MODEL_ALIASES,
14
15
  createLogger,
15
16
  init_builder,
16
17
  init_client,
@@ -21,14 +22,15 @@ import {
21
22
  init_model_shortcuts,
22
23
  init_registry,
23
24
  resolveModel
24
- } from "./chunk-J3NCIWMY.js";
25
+ } from "./chunk-VYBRYR2S.js";
25
26
 
26
27
  // src/cli/constants.ts
27
28
  var CLI_NAME = "llmist";
28
29
  var CLI_DESCRIPTION = "Command line utilities for llmist agents and direct LLM access.";
29
30
  var COMMANDS = {
30
31
  complete: "complete",
31
- agent: "agent"
32
+ agent: "agent",
33
+ models: "models"
32
34
  };
33
35
  var LOG_LEVELS = ["silly", "trace", "debug", "info", "warn", "error", "fatal"];
34
36
  var DEFAULT_MODEL = "openai:gpt-5-nano";
@@ -46,7 +48,7 @@ var OPTION_FLAGS = {
46
48
  noBuiltins: "--no-builtins"
47
49
  };
48
50
  var OPTION_DESCRIPTIONS = {
49
- model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-3-5-sonnet-latest.",
51
+ model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-sonnet-4-5.",
50
52
  systemPrompt: "Optional system prompt prepended to the conversation.",
51
53
  temperature: "Sampling temperature between 0 and 2.",
52
54
  maxTokens: "Maximum number of output tokens requested from the model.",
@@ -65,7 +67,7 @@ import { Command, InvalidArgumentError as InvalidArgumentError3 } from "commande
65
67
  // package.json
66
68
  var package_default = {
67
69
  name: "llmist",
68
- version: "0.1.6",
70
+ version: "0.2.0",
69
71
  description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
70
72
  type: "module",
71
73
  main: "dist/index.cjs",
@@ -107,7 +109,8 @@ var package_default = {
107
109
  "test:e2e:watch": "bun test src/e2e --watch --timeout 60000",
108
110
  "test:all": "bun run test && bun run test:e2e",
109
111
  clean: "rimraf dist",
110
- prepare: "node scripts/install-hooks.js || true"
112
+ prepare: "node scripts/install-hooks.js || true",
113
+ "release:dry": "bunx semantic-release --dry-run"
111
114
  },
112
115
  bin: {
113
116
  llmist: "dist/cli.js"
@@ -156,11 +159,16 @@ var package_default = {
156
159
  },
157
160
  devDependencies: {
158
161
  "@biomejs/biome": "^2.3.2",
162
+ "@commitlint/cli": "^20.1.0",
163
+ "@commitlint/config-conventional": "^20.0.0",
164
+ "@semantic-release/changelog": "^6.0.3",
165
+ "@semantic-release/git": "^10.0.1",
159
166
  "@types/js-yaml": "^4.0.9",
160
167
  "@types/node": "^20.12.7",
161
168
  "bun-types": "^1.3.2",
162
169
  dotenv: "^17.2.3",
163
170
  rimraf: "^5.0.5",
171
+ "semantic-release": "^25.0.2",
164
172
  tsup: "^8.3.5",
165
173
  typescript: "^5.4.5"
166
174
  }
@@ -937,11 +945,171 @@ function registerCompleteCommand(program, env) {
937
945
  );
938
946
  }
939
947
 
948
+ // src/cli/models-command.ts
949
+ import chalk3 from "chalk";
950
+ init_model_shortcuts();
951
+ async function handleModelsCommand(options, env) {
952
+ const client = env.createClient();
953
+ const models = client.modelRegistry.listModels(options.provider);
954
+ if (options.format === "json") {
955
+ renderJSON(models, env.stdout);
956
+ } else {
957
+ renderTable(models, options.verbose || false, env.stdout);
958
+ }
959
+ }
960
+ function renderTable(models, verbose, stream) {
961
+ const grouped = /* @__PURE__ */ new Map();
962
+ for (const model of models) {
963
+ const provider = model.provider;
964
+ if (!grouped.has(provider)) {
965
+ grouped.set(provider, []);
966
+ }
967
+ grouped.get(provider).push(model);
968
+ }
969
+ stream.write(chalk3.bold.cyan("\nAvailable Models\n"));
970
+ stream.write(chalk3.cyan("=".repeat(80)) + "\n\n");
971
+ const providers = Array.from(grouped.keys()).sort();
972
+ for (const provider of providers) {
973
+ const providerModels = grouped.get(provider);
974
+ const providerName = provider.charAt(0).toUpperCase() + provider.slice(1);
975
+ stream.write(chalk3.bold.yellow(`${providerName} Models
976
+ `));
977
+ if (verbose) {
978
+ renderVerboseTable(providerModels, stream);
979
+ } else {
980
+ renderCompactTable(providerModels, stream);
981
+ }
982
+ stream.write("\n");
983
+ }
984
+ stream.write(chalk3.bold.magenta("Model Shortcuts\n"));
985
+ stream.write(chalk3.dim("\u2500".repeat(80)) + "\n");
986
+ const shortcuts = Object.entries(MODEL_ALIASES).sort((a, b) => a[0].localeCompare(b[0]));
987
+ for (const [shortcut, fullName] of shortcuts) {
988
+ stream.write(chalk3.cyan(` ${shortcut.padEnd(15)}`) + chalk3.dim(" \u2192 ") + chalk3.white(fullName) + "\n");
989
+ }
990
+ stream.write("\n");
991
+ }
992
+ function renderCompactTable(models, stream) {
993
+ const idWidth = 25;
994
+ const nameWidth = 22;
995
+ const contextWidth = 13;
996
+ const inputWidth = 10;
997
+ const outputWidth = 10;
998
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
999
+ stream.write(
1000
+ chalk3.bold(
1001
+ "Model ID".padEnd(idWidth) + " " + "Display Name".padEnd(nameWidth) + " " + "Context".padEnd(contextWidth) + " " + "Input".padEnd(inputWidth) + " " + "Output".padEnd(outputWidth)
1002
+ ) + "\n"
1003
+ );
1004
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1005
+ for (const model of models) {
1006
+ const contextFormatted = formatTokens(model.contextWindow);
1007
+ const inputPrice = `$${model.pricing.input.toFixed(2)}`;
1008
+ const outputPrice = `$${model.pricing.output.toFixed(2)}`;
1009
+ stream.write(
1010
+ chalk3.green(model.modelId.padEnd(idWidth)) + " " + chalk3.white(model.displayName.padEnd(nameWidth)) + " " + chalk3.yellow(contextFormatted.padEnd(contextWidth)) + " " + chalk3.cyan(inputPrice.padEnd(inputWidth)) + " " + chalk3.cyan(outputPrice.padEnd(outputWidth)) + "\n"
1011
+ );
1012
+ }
1013
+ stream.write(chalk3.dim("\u2500".repeat(idWidth + nameWidth + contextWidth + inputWidth + outputWidth + 8)) + "\n");
1014
+ stream.write(chalk3.dim(` * Prices are per 1M tokens
1015
+ `));
1016
+ }
1017
+ function renderVerboseTable(models, stream) {
1018
+ for (const model of models) {
1019
+ stream.write(chalk3.bold.green(`
1020
+ ${model.modelId}
1021
+ `));
1022
+ stream.write(chalk3.dim(" " + "\u2500".repeat(60)) + "\n");
1023
+ stream.write(` ${chalk3.dim("Name:")} ${chalk3.white(model.displayName)}
1024
+ `);
1025
+ stream.write(` ${chalk3.dim("Context:")} ${chalk3.yellow(formatTokens(model.contextWindow))}
1026
+ `);
1027
+ stream.write(` ${chalk3.dim("Max Output:")} ${chalk3.yellow(formatTokens(model.maxOutputTokens))}
1028
+ `);
1029
+ stream.write(` ${chalk3.dim("Pricing:")} ${chalk3.cyan(`$${model.pricing.input.toFixed(2)} input`)} ${chalk3.dim("/")} ${chalk3.cyan(`$${model.pricing.output.toFixed(2)} output`)} ${chalk3.dim("(per 1M tokens)")}
1030
+ `);
1031
+ if (model.pricing.cachedInput !== void 0) {
1032
+ stream.write(` ${chalk3.dim("Cached Input:")} ${chalk3.cyan(`$${model.pricing.cachedInput.toFixed(2)} per 1M tokens`)}
1033
+ `);
1034
+ }
1035
+ if (model.knowledgeCutoff) {
1036
+ stream.write(` ${chalk3.dim("Knowledge:")} ${model.knowledgeCutoff}
1037
+ `);
1038
+ }
1039
+ const features = [];
1040
+ if (model.features.streaming) features.push("streaming");
1041
+ if (model.features.functionCalling) features.push("function-calling");
1042
+ if (model.features.vision) features.push("vision");
1043
+ if (model.features.reasoning) features.push("reasoning");
1044
+ if (model.features.structuredOutputs) features.push("structured-outputs");
1045
+ if (model.features.fineTuning) features.push("fine-tuning");
1046
+ if (features.length > 0) {
1047
+ stream.write(` ${chalk3.dim("Features:")} ${chalk3.blue(features.join(", "))}
1048
+ `);
1049
+ }
1050
+ if (model.metadata) {
1051
+ if (model.metadata.family) {
1052
+ stream.write(` ${chalk3.dim("Family:")} ${model.metadata.family}
1053
+ `);
1054
+ }
1055
+ if (model.metadata.releaseDate) {
1056
+ stream.write(` ${chalk3.dim("Released:")} ${model.metadata.releaseDate}
1057
+ `);
1058
+ }
1059
+ if (model.metadata.notes) {
1060
+ stream.write(` ${chalk3.dim("Notes:")} ${chalk3.italic(model.metadata.notes)}
1061
+ `);
1062
+ }
1063
+ }
1064
+ }
1065
+ stream.write("\n");
1066
+ }
1067
+ function renderJSON(models, stream) {
1068
+ const output = {
1069
+ models: models.map((model) => ({
1070
+ provider: model.provider,
1071
+ modelId: model.modelId,
1072
+ displayName: model.displayName,
1073
+ contextWindow: model.contextWindow,
1074
+ maxOutputTokens: model.maxOutputTokens,
1075
+ pricing: {
1076
+ input: model.pricing.input,
1077
+ output: model.pricing.output,
1078
+ cachedInput: model.pricing.cachedInput,
1079
+ currency: "USD",
1080
+ per: "1M tokens"
1081
+ },
1082
+ knowledgeCutoff: model.knowledgeCutoff,
1083
+ features: model.features,
1084
+ metadata: model.metadata
1085
+ })),
1086
+ shortcuts: MODEL_ALIASES
1087
+ };
1088
+ stream.write(JSON.stringify(output, null, 2) + "\n");
1089
+ }
1090
+ function formatTokens(count) {
1091
+ if (count >= 1e6) {
1092
+ return `${(count / 1e6).toFixed(1)}M tokens`;
1093
+ } else if (count >= 1e3) {
1094
+ return `${(count / 1e3).toFixed(0)}K tokens`;
1095
+ } else {
1096
+ return `${count} tokens`;
1097
+ }
1098
+ }
1099
+ function registerModelsCommand(program, env) {
1100
+ program.command(COMMANDS.models).description("List all available LLM models with pricing and capabilities.").option("--provider <name>", "Filter by provider (openai, anthropic, gemini)").option("--format <format>", "Output format: table or json", "table").option("--verbose", "Show detailed model information", false).action(
1101
+ (options) => executeAction(
1102
+ () => handleModelsCommand(options, env),
1103
+ env
1104
+ )
1105
+ );
1106
+ }
1107
+
940
1108
  // src/cli/environment.ts
941
1109
  init_client();
942
1110
  init_logger();
943
1111
  import readline from "node:readline";
944
- import chalk3 from "chalk";
1112
+ import chalk4 from "chalk";
945
1113
  var LOG_LEVEL_MAP = {
946
1114
  silly: 0,
947
1115
  trace: 1,
@@ -985,14 +1153,14 @@ function createPromptFunction(stdin, stdout) {
985
1153
  output: stdout
986
1154
  });
987
1155
  stdout.write("\n");
988
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1156
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
989
1157
  `);
990
- stdout.write(chalk3.cyan.bold("\u{1F916} Agent asks:\n"));
1158
+ stdout.write(chalk4.cyan.bold("\u{1F916} Agent asks:\n"));
991
1159
  stdout.write(`${question}
992
1160
  `);
993
- stdout.write(`${chalk3.cyan("\u2500".repeat(60))}
1161
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
994
1162
  `);
995
- rl.question(chalk3.green.bold("You: "), (answer) => {
1163
+ rl.question(chalk4.green.bold("You: "), (answer) => {
996
1164
  rl.close();
997
1165
  resolve(answer);
998
1166
  });
@@ -1035,6 +1203,7 @@ function createProgram(env) {
1035
1203
  });
1036
1204
  registerCompleteCommand(program, env);
1037
1205
  registerAgentCommand(program, env);
1206
+ registerModelsCommand(program, env);
1038
1207
  return program;
1039
1208
  }
1040
1209
  async function runCLI(overrides = {}) {