llmist 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js ADDED
@@ -0,0 +1,987 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ createGadget
4
+ } from "./chunk-JEBGLCDW.js";
5
+ import {
6
+ AgentBuilder,
7
+ BaseGadget,
8
+ BreakLoopException,
9
+ FALLBACK_CHARS_PER_TOKEN,
10
+ GadgetRegistry,
11
+ HumanInputException,
12
+ LLMMessageBuilder,
13
+ LLMist,
14
+ createLogger,
15
+ init_builder,
16
+ init_client,
17
+ init_constants2 as init_constants,
18
+ init_exceptions,
19
+ init_logger,
20
+ init_messages,
21
+ init_registry
22
+ } from "./chunk-TP7HE3MN.js";
23
+
24
+ // src/cli/constants.ts
25
+ var CLI_NAME = "llmist";
26
+ var CLI_DESCRIPTION = "Command line utilities for llmist agents and direct LLM access.";
27
+ var COMMANDS = {
28
+ complete: "complete",
29
+ agent: "agent"
30
+ };
31
+ var LOG_LEVELS = ["silly", "trace", "debug", "info", "warn", "error", "fatal"];
32
+ var DEFAULT_MODEL = "openai:gpt-5-nano";
33
+ var DEFAULT_PARAMETER_FORMAT = "json";
34
+ var OPTION_FLAGS = {
35
+ model: "-m, --model <identifier>",
36
+ systemPrompt: "-s, --system <prompt>",
37
+ temperature: "-t, --temperature <value>",
38
+ maxTokens: "--max-tokens <count>",
39
+ maxIterations: "-i, --max-iterations <count>",
40
+ gadgetModule: "-g, --gadget <module>",
41
+ parameterFormat: "--parameter-format <format>",
42
+ logLevel: "--log-level <level>",
43
+ logFile: "--log-file <path>",
44
+ noBuiltins: "--no-builtins"
45
+ };
46
+ var OPTION_DESCRIPTIONS = {
47
+ model: "Model identifier, e.g. openai:gpt-5-nano or anthropic:claude-3-5-sonnet-latest.",
48
+ systemPrompt: "Optional system prompt prepended to the conversation.",
49
+ temperature: "Sampling temperature between 0 and 2.",
50
+ maxTokens: "Maximum number of output tokens requested from the model.",
51
+ maxIterations: "Maximum number of agent loop iterations before exiting.",
52
+ gadgetModule: "Path or module specifier for a gadget export. Repeat to register multiple gadgets.",
53
+ parameterFormat: "Format for gadget parameter schemas: 'json', 'yaml', or 'auto'.",
54
+ logLevel: "Log level: silly, trace, debug, info, warn, error, fatal.",
55
+ logFile: "Path to log file. When set, logs are written to file instead of stderr.",
56
+ noBuiltins: "Disable built-in gadgets (AskUser, TellUser)."
57
+ };
58
+ var SUMMARY_PREFIX = "[llmist]";
59
+
60
+ // src/cli/program.ts
61
+ import { Command, InvalidArgumentError as InvalidArgumentError3 } from "commander";
62
+
63
+ // package.json
64
+ var package_default = {
65
+ name: "llmist",
66
+ version: "0.1.1",
67
+ description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
68
+ type: "module",
69
+ main: "dist/index.cjs",
70
+ module: "dist/index.js",
71
+ types: "dist/index.d.ts",
72
+ exports: {
73
+ ".": {
74
+ import: {
75
+ types: "./dist/index.d.ts",
76
+ default: "./dist/index.js"
77
+ },
78
+ require: {
79
+ types: "./dist/index.d.cts",
80
+ default: "./dist/index.cjs"
81
+ }
82
+ },
83
+ "./testing": {
84
+ import: {
85
+ types: "./dist/testing/index.d.ts",
86
+ default: "./dist/testing/index.js"
87
+ },
88
+ require: {
89
+ types: "./dist/testing/index.d.cts",
90
+ default: "./dist/testing/index.cjs"
91
+ }
92
+ }
93
+ },
94
+ scripts: {
95
+ cli: "bun run scripts/cli-runner.ts",
96
+ build: "tsup",
97
+ typecheck: "tsc --noEmit",
98
+ lint: "biome lint .",
99
+ format: "biome format --write .",
100
+ check: "biome check --write .",
101
+ test: "bun test",
102
+ "test:unit": "bun test src/agent src/core src/gadgets src/providers src/testing",
103
+ "test:watch": "bun test --watch",
104
+ "test:e2e": "bun test src/e2e --timeout 60000 --bail 1",
105
+ "test:e2e:watch": "bun test src/e2e --watch --timeout 60000",
106
+ "test:all": "bun run test && bun run test:e2e",
107
+ clean: "rimraf dist",
108
+ postinstall: "node scripts/install-hooks.js"
109
+ },
110
+ bin: {
111
+ llmist: "dist/cli.cjs"
112
+ },
113
+ repository: {
114
+ type: "git",
115
+ url: "https://github.com/zbigniewsobiecki/llmist.git"
116
+ },
117
+ publishConfig: {
118
+ access: "public"
119
+ },
120
+ files: [
121
+ "dist"
122
+ ],
123
+ keywords: [
124
+ "llm",
125
+ "ai",
126
+ "agent",
127
+ "agents",
128
+ "openai",
129
+ "anthropic",
130
+ "claude",
131
+ "gemini",
132
+ "gpt",
133
+ "streaming",
134
+ "function-calling",
135
+ "tool-calling",
136
+ "typescript",
137
+ "universal-client",
138
+ "multi-provider",
139
+ "hooks",
140
+ "gadgets"
141
+ ],
142
+ author: "",
143
+ license: "MIT",
144
+ dependencies: {
145
+ "@anthropic-ai/sdk": "^0.69.0",
146
+ "@google/genai": "^1.27.0",
147
+ chalk: "^5.6.2",
148
+ commander: "^12.1.0",
149
+ "js-yaml": "^4.1.0",
150
+ openai: "^4.0.0",
151
+ tiktoken: "^1.0.22",
152
+ tslog: "^4.10.2",
153
+ zod: "^4.1.12"
154
+ },
155
+ devDependencies: {
156
+ "@biomejs/biome": "^2.3.2",
157
+ "@types/js-yaml": "^4.0.9",
158
+ "@types/node": "^20.12.7",
159
+ "bun-types": "^1.3.2",
160
+ dotenv: "^17.2.3",
161
+ rimraf: "^5.0.5",
162
+ tsup: "^8.3.5",
163
+ typescript: "^5.4.5"
164
+ }
165
+ };
166
+
167
+ // src/cli/agent-command.ts
168
+ init_builder();
169
+ init_registry();
170
+ init_constants();
171
+ import { createInterface } from "node:readline/promises";
172
+ import chalk3 from "chalk";
173
+ import { InvalidArgumentError as InvalidArgumentError2 } from "commander";
174
+
175
+ // src/cli/builtin-gadgets.ts
176
+ import chalk from "chalk";
177
+ import { z } from "zod";
178
+ init_exceptions();
179
+ var askUser = createGadget({
180
+ name: "AskUser",
181
+ description: "Ask the user a question when you need more information or clarification. The user's response will be provided back to you.",
182
+ schema: z.object({
183
+ question: z.string().describe("The question to ask the user")
184
+ }),
185
+ execute: ({ question }) => {
186
+ throw new HumanInputException(question);
187
+ }
188
+ });
189
+ var tellUser = createGadget({
190
+ name: "TellUser",
191
+ description: "Tell the user something important. Set done=true when your work is complete and you want to end the conversation.",
192
+ schema: z.object({
193
+ message: z.string().describe("The message to display to the user"),
194
+ done: z.boolean().describe("Set to true to end the conversation, false to continue"),
195
+ type: z.enum(["info", "success", "warning", "error"]).default("info").describe("Message type: info, success, warning, or error")
196
+ }),
197
+ execute: ({ message, done, type }) => {
198
+ const formatters = {
199
+ info: (msg) => chalk.blue(`\u2139\uFE0F ${msg}`),
200
+ success: (msg) => chalk.green(`\u2705 ${msg}`),
201
+ warning: (msg) => chalk.yellow(`\u26A0\uFE0F ${msg}`),
202
+ error: (msg) => chalk.red(`\u274C ${msg}`)
203
+ };
204
+ const formatted = formatters[type](message);
205
+ if (done) {
206
+ throw new BreakLoopException(formatted);
207
+ }
208
+ return formatted;
209
+ }
210
+ });
211
+ var builtinGadgets = [askUser, tellUser];
212
+
213
+ // src/cli/gadgets.ts
214
+ import fs from "node:fs";
215
+ import path from "node:path";
216
+ import { pathToFileURL } from "node:url";
217
+ var PATH_PREFIXES = [".", "/", "~"];
218
+ function isGadgetConstructor(value) {
219
+ if (typeof value !== "function") {
220
+ return false;
221
+ }
222
+ const prototype = value.prototype;
223
+ return Boolean(prototype) && prototype instanceof BaseGadget;
224
+ }
225
+ function expandHomePath(input) {
226
+ if (!input.startsWith("~")) {
227
+ return input;
228
+ }
229
+ const home = process.env.HOME;
230
+ if (!home) {
231
+ return input;
232
+ }
233
+ return path.join(home, input.slice(1));
234
+ }
235
+ function isFileLikeSpecifier(specifier) {
236
+ return PATH_PREFIXES.some((prefix) => specifier.startsWith(prefix)) || specifier.includes(path.sep);
237
+ }
238
+ function resolveGadgetSpecifier(specifier, cwd) {
239
+ if (!isFileLikeSpecifier(specifier)) {
240
+ return specifier;
241
+ }
242
+ const expanded = expandHomePath(specifier);
243
+ const resolvedPath = path.resolve(cwd, expanded);
244
+ if (!fs.existsSync(resolvedPath)) {
245
+ throw new Error(`Gadget module not found at ${resolvedPath}`);
246
+ }
247
+ return pathToFileURL(resolvedPath).href;
248
+ }
249
+ function extractGadgetsFromModule(moduleExports) {
250
+ const results = [];
251
+ const visited = /* @__PURE__ */ new Set();
252
+ const visit = (value) => {
253
+ if (value === void 0 || value === null) {
254
+ return;
255
+ }
256
+ if (visited.has(value)) {
257
+ return;
258
+ }
259
+ visited.add(value);
260
+ if (value instanceof BaseGadget) {
261
+ results.push(value);
262
+ return;
263
+ }
264
+ if (isGadgetConstructor(value)) {
265
+ results.push(new value());
266
+ return;
267
+ }
268
+ if (Array.isArray(value)) {
269
+ for (const entry of value) {
270
+ visit(entry);
271
+ }
272
+ return;
273
+ }
274
+ if (typeof value === "object") {
275
+ for (const entry of Object.values(value)) {
276
+ visit(entry);
277
+ }
278
+ }
279
+ };
280
+ visit(moduleExports);
281
+ return results;
282
+ }
283
+ async function loadGadgets(specifiers, cwd, importer = (specifier) => import(specifier)) {
284
+ const gadgets = [];
285
+ for (const specifier of specifiers) {
286
+ const resolved = resolveGadgetSpecifier(specifier, cwd);
287
+ let exports;
288
+ try {
289
+ exports = await importer(resolved);
290
+ } catch (error) {
291
+ const message = error instanceof Error ? error.message : String(error);
292
+ throw new Error(`Failed to load gadget module '${specifier}': ${message}`);
293
+ }
294
+ let extracted;
295
+ try {
296
+ extracted = extractGadgetsFromModule(exports);
297
+ } catch (error) {
298
+ const message = error instanceof Error ? error.message : String(error);
299
+ throw new Error(`Failed to initialize gadgets from module '${specifier}': ${message}`);
300
+ }
301
+ if (extracted.length === 0) {
302
+ throw new Error(`Module '${specifier}' does not export any Gadget instances.`);
303
+ }
304
+ gadgets.push(...extracted);
305
+ }
306
+ return gadgets;
307
+ }
308
+
309
+ // src/cli/utils.ts
310
+ init_constants();
311
+ import chalk2 from "chalk";
312
+ import { InvalidArgumentError } from "commander";
313
+ function createNumericParser({
314
+ label,
315
+ integer = false,
316
+ min,
317
+ max
318
+ }) {
319
+ return (value) => {
320
+ const parsed = Number(value);
321
+ if (Number.isNaN(parsed)) {
322
+ throw new InvalidArgumentError(`${label} must be a number.`);
323
+ }
324
+ if (integer && !Number.isInteger(parsed)) {
325
+ throw new InvalidArgumentError(`${label} must be an integer.`);
326
+ }
327
+ if (min !== void 0 && parsed < min) {
328
+ throw new InvalidArgumentError(`${label} must be greater than or equal to ${min}.`);
329
+ }
330
+ if (max !== void 0 && parsed > max) {
331
+ throw new InvalidArgumentError(`${label} must be less than or equal to ${max}.`);
332
+ }
333
+ return parsed;
334
+ };
335
+ }
336
+ var StreamPrinter = class {
337
+ constructor(target) {
338
+ this.target = target;
339
+ }
340
+ endedWithNewline = true;
341
+ /**
342
+ * Writes text to the target stream and tracks newline state.
343
+ *
344
+ * @param text - Text to write
345
+ */
346
+ write(text) {
347
+ if (!text) {
348
+ return;
349
+ }
350
+ this.target.write(text);
351
+ this.endedWithNewline = text.endsWith("\n");
352
+ }
353
+ /**
354
+ * Ensures output ends with a newline by writing one if needed.
355
+ */
356
+ ensureNewline() {
357
+ if (!this.endedWithNewline) {
358
+ this.target.write("\n");
359
+ this.endedWithNewline = true;
360
+ }
361
+ }
362
+ };
363
+ function isInteractive(stream) {
364
+ return Boolean(stream.isTTY);
365
+ }
366
+ var SPINNER_FRAMES = ["\u280B", "\u2819", "\u2839", "\u2838", "\u283C", "\u2834", "\u2826", "\u2827", "\u2807", "\u280F"];
367
+ var SPINNER_DELAY_MS = 500;
368
+ var StreamProgress = class {
369
+ constructor(target, isTTY) {
370
+ this.target = target;
371
+ this.isTTY = isTTY;
372
+ }
373
+ // Animation state
374
+ frameIndex = 0;
375
+ interval = null;
376
+ delayTimeout = null;
377
+ isRunning = false;
378
+ hasRendered = false;
379
+ // Current call stats (streaming mode)
380
+ mode = "cumulative";
381
+ model = "";
382
+ callStartTime = Date.now();
383
+ callInputTokens = 0;
384
+ callInputTokensEstimated = true;
385
+ callOutputTokens = 0;
386
+ callOutputTokensEstimated = true;
387
+ callOutputChars = 0;
388
+ isStreaming = false;
389
+ // Cumulative stats (cumulative mode)
390
+ totalStartTime = Date.now();
391
+ totalTokens = 0;
392
+ iterations = 0;
393
+ /**
394
+ * Starts a new LLM call. Switches to streaming mode.
395
+ * @param model - Model name being used
396
+ * @param estimatedInputTokens - Estimated input tokens based on prompt length
397
+ */
398
+ startCall(model, estimatedInputTokens) {
399
+ this.mode = "streaming";
400
+ this.model = model;
401
+ this.callStartTime = Date.now();
402
+ this.callInputTokens = estimatedInputTokens ?? 0;
403
+ this.callInputTokensEstimated = true;
404
+ this.callOutputTokens = 0;
405
+ this.callOutputTokensEstimated = true;
406
+ this.callOutputChars = 0;
407
+ this.isStreaming = false;
408
+ this.start();
409
+ }
410
+ /**
411
+ * Ends the current LLM call. Updates cumulative stats and switches to cumulative mode.
412
+ * @param usage - Final token usage from the call
413
+ */
414
+ endCall(usage) {
415
+ this.iterations++;
416
+ if (usage) {
417
+ this.totalTokens += usage.totalTokens;
418
+ }
419
+ this.pause();
420
+ this.mode = "cumulative";
421
+ }
422
+ /**
423
+ * Sets the input token count for current call (from stream metadata).
424
+ * @param tokens - Token count
425
+ * @param estimated - If true, shown with ~ prefix until actual count arrives
426
+ */
427
+ setInputTokens(tokens, estimated = false) {
428
+ if (estimated && !this.callInputTokensEstimated) {
429
+ return;
430
+ }
431
+ this.callInputTokens = tokens;
432
+ this.callInputTokensEstimated = estimated;
433
+ }
434
+ /**
435
+ * Sets the output token count for current call (from stream metadata).
436
+ * @param tokens - Token count
437
+ * @param estimated - If true, shown with ~ prefix until actual count arrives
438
+ */
439
+ setOutputTokens(tokens, estimated = false) {
440
+ if (estimated && !this.callOutputTokensEstimated) {
441
+ return;
442
+ }
443
+ this.callOutputTokens = tokens;
444
+ this.callOutputTokensEstimated = estimated;
445
+ }
446
+ /**
447
+ * Starts the progress indicator animation after a brief delay.
448
+ */
449
+ start() {
450
+ if (!this.isTTY || this.isRunning) return;
451
+ this.isRunning = true;
452
+ this.delayTimeout = setTimeout(() => {
453
+ if (this.isRunning) {
454
+ this.interval = setInterval(() => this.render(), 80);
455
+ this.render();
456
+ }
457
+ }, SPINNER_DELAY_MS);
458
+ }
459
+ /**
460
+ * Updates output character count for current call and marks streaming as active.
461
+ * @param totalChars - Total accumulated character count
462
+ */
463
+ update(totalChars) {
464
+ this.callOutputChars = totalChars;
465
+ this.isStreaming = true;
466
+ }
467
+ render() {
468
+ const spinner = SPINNER_FRAMES[this.frameIndex++ % SPINNER_FRAMES.length];
469
+ if (this.mode === "streaming") {
470
+ this.renderStreamingMode(spinner);
471
+ } else {
472
+ this.renderCumulativeMode(spinner);
473
+ }
474
+ this.hasRendered = true;
475
+ }
476
+ renderStreamingMode(spinner) {
477
+ const elapsed = ((Date.now() - this.callStartTime) / 1e3).toFixed(1);
478
+ const outTokens = this.callOutputTokensEstimated ? Math.round(this.callOutputChars / FALLBACK_CHARS_PER_TOKEN) : this.callOutputTokens;
479
+ const parts = [];
480
+ if (this.model) {
481
+ parts.push(chalk2.cyan(this.model));
482
+ }
483
+ if (this.callInputTokens > 0) {
484
+ const prefix = this.callInputTokensEstimated ? "~" : "";
485
+ parts.push(chalk2.dim("out:") + chalk2.yellow(` ${prefix}${this.callInputTokens}`));
486
+ }
487
+ if (this.isStreaming || outTokens > 0) {
488
+ const prefix = this.callOutputTokensEstimated ? "~" : "";
489
+ parts.push(chalk2.dim("in:") + chalk2.green(` ${prefix}${outTokens}`));
490
+ }
491
+ parts.push(chalk2.dim(`${elapsed}s`));
492
+ this.target.write(`\r${chalk2.cyan(spinner)} ${parts.join(chalk2.dim(" | "))}`);
493
+ }
494
+ renderCumulativeMode(spinner) {
495
+ const elapsed = ((Date.now() - this.totalStartTime) / 1e3).toFixed(1);
496
+ const parts = [];
497
+ if (this.model) {
498
+ parts.push(chalk2.cyan(this.model));
499
+ }
500
+ if (this.totalTokens > 0) {
501
+ parts.push(chalk2.dim("total:") + chalk2.magenta(` ${this.totalTokens}`));
502
+ }
503
+ if (this.iterations > 0) {
504
+ parts.push(chalk2.dim("iter:") + chalk2.blue(` ${this.iterations}`));
505
+ }
506
+ parts.push(chalk2.dim(`${elapsed}s`));
507
+ this.target.write(`\r${chalk2.cyan(spinner)} ${parts.join(chalk2.dim(" | "))}`);
508
+ }
509
+ /**
510
+ * Pauses the progress indicator and clears the line.
511
+ * Can be resumed with start().
512
+ */
513
+ pause() {
514
+ if (!this.isTTY || !this.isRunning) return;
515
+ if (this.delayTimeout) {
516
+ clearTimeout(this.delayTimeout);
517
+ this.delayTimeout = null;
518
+ }
519
+ if (this.interval) {
520
+ clearInterval(this.interval);
521
+ this.interval = null;
522
+ }
523
+ this.isRunning = false;
524
+ if (this.hasRendered) {
525
+ this.target.write("\r\x1B[K");
526
+ this.hasRendered = false;
527
+ }
528
+ }
529
+ /**
530
+ * Completes the progress indicator and clears the line.
531
+ */
532
+ complete() {
533
+ this.pause();
534
+ }
535
+ /**
536
+ * Returns a formatted prompt string with stats (like bash PS1).
537
+ * Shows current call stats during streaming, cumulative stats otherwise.
538
+ * Format: "out: 1.2k │ in: ~300 │ 5s > " or "3.6k │ i2 │ 34s > "
539
+ */
540
+ formatPrompt() {
541
+ const parts = [];
542
+ if (this.mode === "streaming") {
543
+ const elapsed = Math.round((Date.now() - this.callStartTime) / 1e3);
544
+ const outTokens = this.callOutputTokensEstimated ? Math.round(this.callOutputChars / FALLBACK_CHARS_PER_TOKEN) : this.callOutputTokens;
545
+ const outEstimated = this.callOutputTokensEstimated;
546
+ if (this.callInputTokens > 0) {
547
+ const prefix = this.callInputTokensEstimated ? "~" : "";
548
+ parts.push(
549
+ chalk2.dim("out:") + chalk2.yellow(` ${prefix}${this.formatTokens(this.callInputTokens)}`)
550
+ );
551
+ }
552
+ if (outTokens > 0) {
553
+ const prefix = outEstimated ? "~" : "";
554
+ parts.push(chalk2.dim("in:") + chalk2.green(` ${prefix}${this.formatTokens(outTokens)}`));
555
+ }
556
+ parts.push(chalk2.dim(`${elapsed}s`));
557
+ } else {
558
+ const elapsed = Math.round((Date.now() - this.totalStartTime) / 1e3);
559
+ if (this.totalTokens > 0) {
560
+ parts.push(chalk2.magenta(this.formatTokens(this.totalTokens)));
561
+ }
562
+ if (this.iterations > 0) {
563
+ parts.push(chalk2.blue(`i${this.iterations}`));
564
+ }
565
+ parts.push(chalk2.dim(`${elapsed}s`));
566
+ }
567
+ return `${parts.join(chalk2.dim(" \u2502 "))} ${chalk2.green(">")} `;
568
+ }
569
+ /**
570
+ * Formats token count compactly (3625 -> "3.6k").
571
+ */
572
+ formatTokens(tokens) {
573
+ return tokens >= 1e3 ? `${(tokens / 1e3).toFixed(1)}k` : `${tokens}`;
574
+ }
575
+ };
576
+ async function readStream(stream) {
577
+ const chunks = [];
578
+ for await (const chunk of stream) {
579
+ if (typeof chunk === "string") {
580
+ chunks.push(chunk);
581
+ } else {
582
+ chunks.push(chunk.toString("utf8"));
583
+ }
584
+ }
585
+ return chunks.join("");
586
+ }
587
+ function normalizePrompt(value) {
588
+ return value.trim();
589
+ }
590
+ async function resolvePrompt(promptArg, env) {
591
+ if (promptArg?.trim()) {
592
+ return normalizePrompt(promptArg);
593
+ }
594
+ if (isInteractive(env.stdin)) {
595
+ throw new Error("Prompt is required. Provide an argument or pipe content via stdin.");
596
+ }
597
+ const pipedInput = normalizePrompt(await readStream(env.stdin));
598
+ if (!pipedInput) {
599
+ throw new Error("Received empty stdin payload. Provide a prompt to continue.");
600
+ }
601
+ return pipedInput;
602
+ }
603
+ function renderSummary(metadata) {
604
+ const parts = [];
605
+ if (metadata.iterations !== void 0) {
606
+ parts.push(chalk2.dim(`iterations: ${metadata.iterations}`));
607
+ }
608
+ if (metadata.finishReason) {
609
+ parts.push(chalk2.dim(`finish: ${metadata.finishReason}`));
610
+ }
611
+ if (metadata.usage) {
612
+ const { inputTokens, outputTokens, totalTokens } = metadata.usage;
613
+ parts.push(
614
+ chalk2.dim(`tokens: `) + chalk2.cyan(`${totalTokens}`) + chalk2.dim(` (in: ${inputTokens}, out: ${outputTokens})`)
615
+ );
616
+ }
617
+ if (parts.length === 0) {
618
+ return null;
619
+ }
620
+ return `${chalk2.dim("\u2500".repeat(40))}
621
+ ${parts.join(chalk2.dim(" \u2502 "))}`;
622
+ }
623
+ async function executeAction(action, env) {
624
+ try {
625
+ await action();
626
+ } catch (error) {
627
+ const message = error instanceof Error ? error.message : String(error);
628
+ env.stderr.write(`${chalk2.red.bold("Error:")} ${message}
629
+ `);
630
+ env.setExitCode(1);
631
+ }
632
+ }
633
+
634
+ // src/cli/agent-command.ts
635
+ var PARAMETER_FORMAT_VALUES = ["json", "yaml", "auto"];
636
+ function parseParameterFormat(value) {
637
+ const normalized = value.toLowerCase();
638
+ if (!PARAMETER_FORMAT_VALUES.includes(normalized)) {
639
+ throw new InvalidArgumentError2("Parameter format must be one of 'json', 'yaml', or 'auto'.");
640
+ }
641
+ return normalized;
642
+ }
643
+ function createHumanInputHandler(env, progress) {
644
+ const stdout = env.stdout;
645
+ if (!isInteractive(env.stdin) || typeof stdout.isTTY !== "boolean" || !stdout.isTTY) {
646
+ return void 0;
647
+ }
648
+ return async (question) => {
649
+ progress.pause();
650
+ const rl = createInterface({ input: env.stdin, output: env.stdout });
651
+ try {
652
+ const questionLine = question.trim() ? `
653
+ ${question.trim()}` : "";
654
+ let isFirst = true;
655
+ while (true) {
656
+ const statsPrompt = progress.formatPrompt();
657
+ const prompt = isFirst ? `${questionLine}
658
+ ${statsPrompt}` : statsPrompt;
659
+ isFirst = false;
660
+ const answer = await rl.question(prompt);
661
+ const trimmed = answer.trim();
662
+ if (trimmed) {
663
+ return trimmed;
664
+ }
665
+ }
666
+ } finally {
667
+ rl.close();
668
+ }
669
+ };
670
+ }
671
+ function formatGadgetSummary(result) {
672
+ const gadgetLabel = chalk3.magenta.bold(result.gadgetName);
673
+ const timeLabel = chalk3.dim(`${Math.round(result.executionTimeMs)}ms`);
674
+ if (result.error) {
675
+ return `${chalk3.red("\u2717")} ${gadgetLabel} ${chalk3.red("error:")} ${result.error} ${timeLabel}`;
676
+ }
677
+ if (result.breaksLoop) {
678
+ return `${chalk3.yellow("\u23F9")} ${gadgetLabel} ${chalk3.yellow("finished:")} ${result.result} ${timeLabel}`;
679
+ }
680
+ const maxLen = 80;
681
+ const resultText = result.result ? result.result.length > maxLen ? `${result.result.slice(0, maxLen)}...` : result.result : "";
682
+ return `${chalk3.green("\u2713")} ${gadgetLabel} ${chalk3.dim("\u2192")} ${resultText} ${timeLabel}`;
683
+ }
684
+ async function handleAgentCommand(promptArg, options, env) {
685
+ const prompt = await resolvePrompt(promptArg, env);
686
+ const client = env.createClient();
687
+ const registry = new GadgetRegistry();
688
+ if (options.builtins !== false) {
689
+ for (const gadget of builtinGadgets) {
690
+ registry.registerByClass(gadget);
691
+ }
692
+ }
693
+ const gadgetSpecifiers = options.gadget ?? [];
694
+ if (gadgetSpecifiers.length > 0) {
695
+ const gadgets2 = await loadGadgets(gadgetSpecifiers, process.cwd());
696
+ for (const gadget of gadgets2) {
697
+ registry.registerByClass(gadget);
698
+ }
699
+ }
700
+ const printer = new StreamPrinter(env.stdout);
701
+ const stderrTTY = env.stderr.isTTY === true;
702
+ const progress = new StreamProgress(env.stderr, stderrTTY);
703
+ let finishReason;
704
+ let usage;
705
+ let iterations = 0;
706
+ const estimateMessagesTokens = (messages) => {
707
+ const totalChars = messages.reduce((sum, m) => sum + (m.content?.length ?? 0), 0);
708
+ return Math.round(totalChars / FALLBACK_CHARS_PER_TOKEN);
709
+ };
710
+ const builder = new AgentBuilder(client).withModel(options.model).withLogger(env.createLogger("llmist:cli:agent")).withHooks({
711
+ observers: {
712
+ onLLMCallStart: async (context) => {
713
+ const estimate = estimateMessagesTokens(context.options.messages);
714
+ progress.startCall(context.options.model, estimate);
715
+ },
716
+ onStreamChunk: async (context) => {
717
+ progress.update(context.accumulatedText.length);
718
+ if (context.usage) {
719
+ if (context.usage.inputTokens) {
720
+ progress.setInputTokens(context.usage.inputTokens, false);
721
+ }
722
+ if (context.usage.outputTokens) {
723
+ progress.setOutputTokens(context.usage.outputTokens, false);
724
+ }
725
+ }
726
+ },
727
+ onLLMCallComplete: async (context) => {
728
+ finishReason = context.finishReason;
729
+ usage = context.usage;
730
+ iterations = Math.max(iterations, context.iteration + 1);
731
+ progress.endCall(context.usage);
732
+ }
733
+ }
734
+ });
735
+ if (options.system) {
736
+ builder.withSystem(options.system);
737
+ }
738
+ if (options.maxIterations !== void 0) {
739
+ builder.withMaxIterations(options.maxIterations);
740
+ }
741
+ if (options.temperature !== void 0) {
742
+ builder.withTemperature(options.temperature);
743
+ }
744
+ const humanInputHandler = createHumanInputHandler(env, progress);
745
+ if (humanInputHandler) {
746
+ builder.onHumanInput(humanInputHandler);
747
+ }
748
+ const gadgets = registry.getAll();
749
+ if (gadgets.length > 0) {
750
+ builder.withGadgets(...gadgets);
751
+ }
752
+ const agent = builder.ask(prompt);
753
+ for await (const event of agent.run()) {
754
+ if (event.type === "text") {
755
+ progress.pause();
756
+ printer.write(event.content);
757
+ } else if (event.type === "gadget_result") {
758
+ progress.pause();
759
+ env.stderr.write(`${formatGadgetSummary(event.result)}
760
+ `);
761
+ }
762
+ }
763
+ progress.complete();
764
+ printer.ensureNewline();
765
+ const summary = renderSummary({ finishReason, usage, iterations });
766
+ if (summary) {
767
+ env.stderr.write(`${summary}
768
+ `);
769
+ }
770
+ }
771
+ function registerAgentCommand(program, env) {
772
+ program.command(COMMANDS.agent).description("Run the llmist agent loop with optional gadgets.").argument("[prompt]", "Prompt for the agent loop. Falls back to stdin when available.").option(OPTION_FLAGS.model, OPTION_DESCRIPTIONS.model, DEFAULT_MODEL).option(OPTION_FLAGS.systemPrompt, OPTION_DESCRIPTIONS.systemPrompt).option(
773
+ OPTION_FLAGS.temperature,
774
+ OPTION_DESCRIPTIONS.temperature,
775
+ createNumericParser({ label: "Temperature", min: 0, max: 2 })
776
+ ).option(
777
+ OPTION_FLAGS.maxIterations,
778
+ OPTION_DESCRIPTIONS.maxIterations,
779
+ createNumericParser({ label: "Max iterations", integer: true, min: 1 })
780
+ ).option(
781
+ OPTION_FLAGS.gadgetModule,
782
+ OPTION_DESCRIPTIONS.gadgetModule,
783
+ (value, previous = []) => [...previous, value],
784
+ []
785
+ ).option(
786
+ OPTION_FLAGS.parameterFormat,
787
+ OPTION_DESCRIPTIONS.parameterFormat,
788
+ parseParameterFormat,
789
+ DEFAULT_PARAMETER_FORMAT
790
+ ).option(OPTION_FLAGS.noBuiltins, OPTION_DESCRIPTIONS.noBuiltins).action(
791
+ (prompt, options) => executeAction(() => handleAgentCommand(prompt, options, env), env)
792
+ );
793
+ }
794
+
795
+ // src/cli/complete-command.ts
796
+ init_messages();
797
+ init_constants();
798
+ async function handleCompleteCommand(promptArg, options, env) {
799
+ const prompt = await resolvePrompt(promptArg, env);
800
+ const client = env.createClient();
801
+ const builder = new LLMMessageBuilder();
802
+ if (options.system) {
803
+ builder.addSystem(options.system);
804
+ }
805
+ builder.addUser(prompt);
806
+ const stream = client.stream({
807
+ model: options.model,
808
+ messages: builder.build(),
809
+ temperature: options.temperature,
810
+ maxTokens: options.maxTokens
811
+ });
812
+ const printer = new StreamPrinter(env.stdout);
813
+ const stderrTTY = env.stderr.isTTY === true;
814
+ const progress = new StreamProgress(env.stderr, stderrTTY);
815
+ const estimatedInputTokens = Math.round(prompt.length / FALLBACK_CHARS_PER_TOKEN);
816
+ progress.startCall(options.model, estimatedInputTokens);
817
+ let finishReason;
818
+ let usage;
819
+ let totalChars = 0;
820
+ for await (const chunk of stream) {
821
+ if (chunk.usage) {
822
+ usage = chunk.usage;
823
+ if (chunk.usage.inputTokens) {
824
+ progress.setInputTokens(chunk.usage.inputTokens, false);
825
+ }
826
+ if (chunk.usage.outputTokens) {
827
+ progress.setOutputTokens(chunk.usage.outputTokens, false);
828
+ }
829
+ }
830
+ if (chunk.text) {
831
+ progress.pause();
832
+ totalChars += chunk.text.length;
833
+ progress.update(totalChars);
834
+ printer.write(chunk.text);
835
+ }
836
+ if (chunk.finishReason !== void 0) {
837
+ finishReason = chunk.finishReason;
838
+ }
839
+ }
840
+ progress.complete();
841
+ printer.ensureNewline();
842
+ const summary = renderSummary({ finishReason, usage });
843
+ if (summary) {
844
+ env.stderr.write(`${summary}
845
+ `);
846
+ }
847
+ }
848
+ function registerCompleteCommand(program, env) {
849
+ program.command(COMMANDS.complete).description("Stream a single completion from a specified model.").argument("[prompt]", "Prompt to send to the LLM. If omitted, stdin is used when available.").option(OPTION_FLAGS.model, OPTION_DESCRIPTIONS.model, DEFAULT_MODEL).option(OPTION_FLAGS.systemPrompt, OPTION_DESCRIPTIONS.systemPrompt).option(
850
+ OPTION_FLAGS.temperature,
851
+ OPTION_DESCRIPTIONS.temperature,
852
+ createNumericParser({ label: "Temperature", min: 0, max: 2 })
853
+ ).option(
854
+ OPTION_FLAGS.maxTokens,
855
+ OPTION_DESCRIPTIONS.maxTokens,
856
+ createNumericParser({ label: "Max tokens", integer: true, min: 1 })
857
+ ).action(
858
+ (prompt, options) => executeAction(
859
+ () => handleCompleteCommand(prompt, options, env),
860
+ env
861
+ )
862
+ );
863
+ }
864
+
865
+ // src/cli/environment.ts
866
+ init_client();
867
+ init_logger();
868
+ import readline from "node:readline";
869
+ import chalk4 from "chalk";
870
+ var LOG_LEVEL_MAP = {
871
+ silly: 0,
872
+ trace: 1,
873
+ debug: 2,
874
+ info: 3,
875
+ warn: 4,
876
+ error: 5,
877
+ fatal: 6
878
+ };
879
+ function createLoggerFactory(config) {
880
+ return (name) => {
881
+ const options = { name };
882
+ if (config?.logLevel) {
883
+ const level = config.logLevel.toLowerCase();
884
+ if (level in LOG_LEVEL_MAP) {
885
+ options.minLevel = LOG_LEVEL_MAP[level];
886
+ }
887
+ }
888
+ if (config?.logFile) {
889
+ const originalLogFile = process.env.LLMIST_LOG_FILE;
890
+ process.env.LLMIST_LOG_FILE = config.logFile;
891
+ const logger = createLogger(options);
892
+ if (originalLogFile === void 0) {
893
+ delete process.env.LLMIST_LOG_FILE;
894
+ } else {
895
+ process.env.LLMIST_LOG_FILE = originalLogFile;
896
+ }
897
+ return logger;
898
+ }
899
+ if (!process.env.LLMIST_LOG_FILE) {
900
+ options.type = "pretty";
901
+ }
902
+ return createLogger(options);
903
+ };
904
+ }
905
+ function createPromptFunction(stdin, stdout) {
906
+ return (question) => {
907
+ return new Promise((resolve) => {
908
+ const rl = readline.createInterface({
909
+ input: stdin,
910
+ output: stdout
911
+ });
912
+ stdout.write("\n");
913
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
914
+ `);
915
+ stdout.write(chalk4.cyan.bold("\u{1F916} Agent asks:\n"));
916
+ stdout.write(`${question}
917
+ `);
918
+ stdout.write(`${chalk4.cyan("\u2500".repeat(60))}
919
+ `);
920
+ rl.question(chalk4.green.bold("You: "), (answer) => {
921
+ rl.close();
922
+ resolve(answer);
923
+ });
924
+ });
925
+ };
926
+ }
927
+ function createDefaultEnvironment(loggerConfig) {
928
+ const isTTY = Boolean(process.stdin.isTTY);
929
+ return {
930
+ argv: process.argv,
931
+ stdin: process.stdin,
932
+ stdout: process.stdout,
933
+ stderr: process.stderr,
934
+ createClient: () => new LLMist(),
935
+ setExitCode: (code) => {
936
+ process.exitCode = code;
937
+ },
938
+ loggerConfig,
939
+ createLogger: createLoggerFactory(loggerConfig),
940
+ isTTY,
941
+ prompt: isTTY ? createPromptFunction(process.stdin, process.stdout) : async () => {
942
+ throw new Error("Cannot prompt for input: stdin is not a TTY");
943
+ }
944
+ };
945
+ }
946
+
947
+ // src/cli/program.ts
948
+ function parseLogLevel(value) {
949
+ const normalized = value.toLowerCase();
950
+ if (!LOG_LEVELS.includes(normalized)) {
951
+ throw new InvalidArgumentError3(`Log level must be one of: ${LOG_LEVELS.join(", ")}`);
952
+ }
953
+ return normalized;
954
+ }
955
+ function createProgram(env) {
956
+ const program = new Command();
957
+ program.name(CLI_NAME).description(CLI_DESCRIPTION).version(package_default.version).option(OPTION_FLAGS.logLevel, OPTION_DESCRIPTIONS.logLevel, parseLogLevel).option(OPTION_FLAGS.logFile, OPTION_DESCRIPTIONS.logFile).configureOutput({
958
+ writeOut: (str) => env.stdout.write(str),
959
+ writeErr: (str) => env.stderr.write(str)
960
+ });
961
+ registerCompleteCommand(program, env);
962
+ registerAgentCommand(program, env);
963
+ return program;
964
+ }
965
+ async function runCLI(overrides = {}) {
966
+ const preParser = new Command();
967
+ preParser.option(OPTION_FLAGS.logLevel, OPTION_DESCRIPTIONS.logLevel, parseLogLevel).option(OPTION_FLAGS.logFile, OPTION_DESCRIPTIONS.logFile).allowUnknownOption().allowExcessArguments().helpOption(false);
968
+ preParser.parse(process.argv);
969
+ const globalOpts = preParser.opts();
970
+ const loggerConfig = {
971
+ logLevel: globalOpts.logLevel,
972
+ logFile: globalOpts.logFile
973
+ };
974
+ const defaultEnv = createDefaultEnvironment(loggerConfig);
975
+ const env = { ...defaultEnv, ...overrides };
976
+ const program = createProgram(env);
977
+ await program.parseAsync(env.argv);
978
+ }
979
+
980
+ // src/cli.ts
981
+ runCLI().catch((error) => {
982
+ const message = error instanceof Error ? error.message : String(error);
983
+ process.stderr.write(`${SUMMARY_PREFIX} Error: ${message}
984
+ `);
985
+ process.exitCode = 1;
986
+ });
987
+ //# sourceMappingURL=cli.js.map