llmist 0.8.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -22,7 +22,7 @@ import {
22
22
  init_model_shortcuts,
23
23
  init_registry,
24
24
  resolveModel
25
- } from "./chunk-62M4TDAK.js";
25
+ } from "./chunk-T24KLXY4.js";
26
26
 
27
27
  // src/cli/constants.ts
28
28
  var CLI_NAME = "llmist";
@@ -34,7 +34,6 @@ var COMMANDS = {
34
34
  };
35
35
  var LOG_LEVELS = ["silly", "trace", "debug", "info", "warn", "error", "fatal"];
36
36
  var DEFAULT_MODEL = "openai:gpt-5-nano";
37
- var DEFAULT_PARAMETER_FORMAT = "toml";
38
37
  var OPTION_FLAGS = {
39
38
  model: "-m, --model <identifier>",
40
39
  systemPrompt: "-s, --system <prompt>",
@@ -42,10 +41,11 @@ var OPTION_FLAGS = {
42
41
  maxTokens: "--max-tokens <count>",
43
42
  maxIterations: "-i, --max-iterations <count>",
44
43
  gadgetModule: "-g, --gadget <module>",
45
- parameterFormat: "--parameter-format <format>",
46
44
  logLevel: "--log-level <level>",
47
45
  logFile: "--log-file <path>",
48
46
  logReset: "--log-reset",
47
+ logLlmRequests: "--log-llm-requests [dir]",
48
+ logLlmResponses: "--log-llm-responses [dir]",
49
49
  noBuiltins: "--no-builtins",
50
50
  noBuiltinInteraction: "--no-builtin-interaction",
51
51
  quiet: "-q, --quiet"
@@ -57,10 +57,11 @@ var OPTION_DESCRIPTIONS = {
57
57
  maxTokens: "Maximum number of output tokens requested from the model.",
58
58
  maxIterations: "Maximum number of agent loop iterations before exiting.",
59
59
  gadgetModule: "Path or module specifier for a gadget export. Repeat to register multiple gadgets.",
60
- parameterFormat: "Format for gadget parameter schemas: 'json', 'yaml', 'toml', or 'auto'.",
61
60
  logLevel: "Log level: silly, trace, debug, info, warn, error, fatal.",
62
61
  logFile: "Path to log file. When set, logs are written to file instead of stderr.",
63
62
  logReset: "Reset (truncate) the log file at session start instead of appending.",
63
+ logLlmRequests: "Save raw LLM requests as plain text. Optional dir, defaults to ~/.llmist/logs/requests/",
64
+ logLlmResponses: "Save raw LLM responses as plain text. Optional dir, defaults to ~/.llmist/logs/responses/",
64
65
  noBuiltins: "Disable built-in gadgets (AskUser, TellUser).",
65
66
  noBuiltinInteraction: "Disable interactive gadgets (AskUser) while keeping TellUser.",
66
67
  quiet: "Suppress all output except content (text and TellUser messages)."
@@ -68,12 +69,12 @@ var OPTION_DESCRIPTIONS = {
68
69
  var SUMMARY_PREFIX = "[llmist]";
69
70
 
70
71
  // src/cli/program.ts
71
- import { Command, InvalidArgumentError as InvalidArgumentError3 } from "commander";
72
+ import { Command, InvalidArgumentError as InvalidArgumentError2 } from "commander";
72
73
 
73
74
  // package.json
74
75
  var package_default = {
75
76
  name: "llmist",
76
- version: "0.7.0",
77
+ version: "0.8.0",
77
78
  description: "Universal TypeScript LLM client with streaming-first agent framework. Works with any model - no structured outputs or native tool calling required. Implements its own flexible grammar for function calling.",
78
79
  type: "module",
79
80
  main: "dist/index.cjs",
@@ -375,8 +376,33 @@ async function loadGadgets(specifiers, cwd, importer = (specifier) => import(spe
375
376
  return gadgets;
376
377
  }
377
378
 
378
- // src/cli/option-helpers.ts
379
- import { InvalidArgumentError as InvalidArgumentError2 } from "commander";
379
+ // src/cli/llm-logging.ts
380
+ import { mkdir, writeFile } from "node:fs/promises";
381
+ import { homedir } from "node:os";
382
+ import { join } from "node:path";
383
+ var DEFAULT_LLM_LOG_DIR = join(homedir(), ".llmist", "logs");
384
+ function resolveLogDir(option, subdir) {
385
+ if (option === true) {
386
+ return join(DEFAULT_LLM_LOG_DIR, subdir);
387
+ }
388
+ if (typeof option === "string") {
389
+ return option;
390
+ }
391
+ return void 0;
392
+ }
393
+ function formatLlmRequest(messages) {
394
+ const lines = [];
395
+ for (const msg of messages) {
396
+ lines.push(`=== ${msg.role.toUpperCase()} ===`);
397
+ lines.push(msg.content ?? "");
398
+ lines.push("");
399
+ }
400
+ return lines.join("\n");
401
+ }
402
+ async function writeLogFile(dir, filename, content) {
403
+ await mkdir(dir, { recursive: true });
404
+ await writeFile(join(dir, filename), content, "utf-8");
405
+ }
380
406
 
381
407
  // src/cli/utils.ts
382
408
  init_constants();
@@ -420,9 +446,29 @@ function ensureMarkedConfigured() {
420
446
  }
421
447
  function renderMarkdown(text) {
422
448
  ensureMarkedConfigured();
423
- const rendered = marked.parse(text);
449
+ let rendered = marked.parse(text);
450
+ rendered = rendered.replace(/\*\*(.+?)\*\*/g, (_, content) => chalk.bold(content)).replace(/(?<!\*)\*(\S[^*]*)\*(?!\*)/g, (_, content) => chalk.italic(content));
424
451
  return rendered.trimEnd();
425
452
  }
453
+ function createRainbowSeparator() {
454
+ const colors = [chalk.red, chalk.yellow, chalk.green, chalk.cyan, chalk.blue, chalk.magenta];
455
+ const char = "\u2500";
456
+ const width = process.stdout.columns || 80;
457
+ let result = "";
458
+ for (let i = 0; i < width; i++) {
459
+ result += colors[i % colors.length](char);
460
+ }
461
+ return result;
462
+ }
463
+ function renderMarkdownWithSeparators(text) {
464
+ const rendered = renderMarkdown(text);
465
+ const separator = createRainbowSeparator();
466
+ return `
467
+ ${separator}
468
+ ${rendered}
469
+ ${separator}
470
+ `;
471
+ }
426
472
  function formatTokens(tokens) {
427
473
  return tokens >= 1e3 ? `${(tokens / 1e3).toFixed(1)}k` : `${tokens}`;
428
474
  }
@@ -542,7 +588,7 @@ function formatGadgetSummary(result) {
542
588
  const summaryLine = `${icon} ${gadgetLabel}${paramsLabel} ${chalk.dim("\u2192")} ${outputLabel} ${timeLabel}`;
543
589
  if (result.gadgetName === "TellUser" && result.parameters?.message) {
544
590
  const message = String(result.parameters.message);
545
- const rendered = renderMarkdown(message);
591
+ const rendered = renderMarkdownWithSeparators(message);
546
592
  return `${summaryLine}
547
593
  ${rendered}`;
548
594
  }
@@ -849,7 +895,7 @@ var StreamProgress = class {
849
895
  }
850
896
  this.isRunning = false;
851
897
  if (this.hasRendered) {
852
- this.target.write("\r\x1B[K");
898
+ this.target.write("\r\x1B[K\x1B[0G");
853
899
  this.hasRendered = false;
854
900
  }
855
901
  }
@@ -942,16 +988,6 @@ async function executeAction(action, env) {
942
988
  }
943
989
 
944
990
  // src/cli/option-helpers.ts
945
- var PARAMETER_FORMAT_VALUES = ["json", "yaml", "toml", "auto"];
946
- function parseParameterFormat(value) {
947
- const normalized = value.toLowerCase();
948
- if (!PARAMETER_FORMAT_VALUES.includes(normalized)) {
949
- throw new InvalidArgumentError2(
950
- `Parameter format must be one of: ${PARAMETER_FORMAT_VALUES.join(", ")}`
951
- );
952
- }
953
- return normalized;
954
- }
955
991
  function addCompleteOptions(cmd, defaults) {
956
992
  return cmd.option(OPTION_FLAGS.model, OPTION_DESCRIPTIONS.model, defaults?.model ?? DEFAULT_MODEL).option(OPTION_FLAGS.systemPrompt, OPTION_DESCRIPTIONS.systemPrompt, defaults?.system).option(
957
993
  OPTION_FLAGS.temperature,
@@ -963,7 +999,7 @@ function addCompleteOptions(cmd, defaults) {
963
999
  OPTION_DESCRIPTIONS.maxTokens,
964
1000
  createNumericParser({ label: "Max tokens", integer: true, min: 1 }),
965
1001
  defaults?.["max-tokens"]
966
- ).option(OPTION_FLAGS.quiet, OPTION_DESCRIPTIONS.quiet, defaults?.quiet);
1002
+ ).option(OPTION_FLAGS.quiet, OPTION_DESCRIPTIONS.quiet, defaults?.quiet).option(OPTION_FLAGS.logLlmRequests, OPTION_DESCRIPTIONS.logLlmRequests, defaults?.["log-llm-requests"]).option(OPTION_FLAGS.logLlmResponses, OPTION_DESCRIPTIONS.logLlmResponses, defaults?.["log-llm-responses"]);
967
1003
  }
968
1004
  function addAgentOptions(cmd, defaults) {
969
1005
  const gadgetAccumulator = (value, previous = []) => [
@@ -983,16 +1019,11 @@ function addAgentOptions(cmd, defaults) {
983
1019
  defaults?.["max-iterations"]
984
1020
  ).option(OPTION_FLAGS.gadgetModule, OPTION_DESCRIPTIONS.gadgetModule, gadgetAccumulator, [
985
1021
  ...defaultGadgets
986
- ]).option(
987
- OPTION_FLAGS.parameterFormat,
988
- OPTION_DESCRIPTIONS.parameterFormat,
989
- parseParameterFormat,
990
- defaults?.["parameter-format"] ?? DEFAULT_PARAMETER_FORMAT
991
- ).option(OPTION_FLAGS.noBuiltins, OPTION_DESCRIPTIONS.noBuiltins, defaults?.builtins !== false).option(
1022
+ ]).option(OPTION_FLAGS.noBuiltins, OPTION_DESCRIPTIONS.noBuiltins, defaults?.builtins !== false).option(
992
1023
  OPTION_FLAGS.noBuiltinInteraction,
993
1024
  OPTION_DESCRIPTIONS.noBuiltinInteraction,
994
1025
  defaults?.["builtin-interaction"] !== false
995
- ).option(OPTION_FLAGS.quiet, OPTION_DESCRIPTIONS.quiet, defaults?.quiet);
1026
+ ).option(OPTION_FLAGS.quiet, OPTION_DESCRIPTIONS.quiet, defaults?.quiet).option(OPTION_FLAGS.logLlmRequests, OPTION_DESCRIPTIONS.logLlmRequests, defaults?.["log-llm-requests"]).option(OPTION_FLAGS.logLlmResponses, OPTION_DESCRIPTIONS.logLlmResponses, defaults?.["log-llm-responses"]);
996
1027
  }
997
1028
  function configToCompleteOptions(config) {
998
1029
  const result = {};
@@ -1001,6 +1032,8 @@ function configToCompleteOptions(config) {
1001
1032
  if (config.temperature !== void 0) result.temperature = config.temperature;
1002
1033
  if (config["max-tokens"] !== void 0) result.maxTokens = config["max-tokens"];
1003
1034
  if (config.quiet !== void 0) result.quiet = config.quiet;
1035
+ if (config["log-llm-requests"] !== void 0) result.logLlmRequests = config["log-llm-requests"];
1036
+ if (config["log-llm-responses"] !== void 0) result.logLlmResponses = config["log-llm-responses"];
1004
1037
  return result;
1005
1038
  }
1006
1039
  function configToAgentOptions(config) {
@@ -1010,7 +1043,6 @@ function configToAgentOptions(config) {
1010
1043
  if (config.temperature !== void 0) result.temperature = config.temperature;
1011
1044
  if (config["max-iterations"] !== void 0) result.maxIterations = config["max-iterations"];
1012
1045
  if (config.gadget !== void 0) result.gadget = config.gadget;
1013
- if (config["parameter-format"] !== void 0) result.parameterFormat = config["parameter-format"];
1014
1046
  if (config.builtins !== void 0) result.builtins = config.builtins;
1015
1047
  if (config["builtin-interaction"] !== void 0)
1016
1048
  result.builtinInteraction = config["builtin-interaction"];
@@ -1018,7 +1050,11 @@ function configToAgentOptions(config) {
1018
1050
  result.gadgetStartPrefix = config["gadget-start-prefix"];
1019
1051
  if (config["gadget-end-prefix"] !== void 0)
1020
1052
  result.gadgetEndPrefix = config["gadget-end-prefix"];
1053
+ if (config["gadget-arg-prefix"] !== void 0)
1054
+ result.gadgetArgPrefix = config["gadget-arg-prefix"];
1021
1055
  if (config.quiet !== void 0) result.quiet = config.quiet;
1056
+ if (config["log-llm-requests"] !== void 0) result.logLlmRequests = config["log-llm-requests"];
1057
+ if (config["log-llm-responses"] !== void 0) result.logLlmResponses = config["log-llm-responses"];
1022
1058
  return result;
1023
1059
  }
1024
1060
 
@@ -1042,7 +1078,7 @@ function createHumanInputHandler(env, progress) {
1042
1078
  const rl = createInterface({ input: env.stdin, output: env.stdout });
1043
1079
  try {
1044
1080
  const questionLine = question.trim() ? `
1045
- ${renderMarkdown(question.trim())}` : "";
1081
+ ${renderMarkdownWithSeparators(question.trim())}` : "";
1046
1082
  let isFirst = true;
1047
1083
  while (true) {
1048
1084
  const statsPrompt = progress.formatPrompt();
@@ -1085,6 +1121,9 @@ async function executeAgent(promptArg, options, env) {
1085
1121
  const progress = new StreamProgress(env.stderr, stderrTTY, client.modelRegistry);
1086
1122
  let usage;
1087
1123
  let iterations = 0;
1124
+ const llmRequestsDir = resolveLogDir(options.logLlmRequests, "requests");
1125
+ const llmResponsesDir = resolveLogDir(options.logLlmResponses, "responses");
1126
+ let llmCallCounter = 0;
1088
1127
  const countMessagesTokens = async (model, messages) => {
1089
1128
  try {
1090
1129
  return await client.countTokens(model, messages);
@@ -1107,12 +1146,18 @@ async function executeAgent(promptArg, options, env) {
1107
1146
  // onLLMCallStart: Start progress indicator for each LLM call
1108
1147
  // This showcases how to react to agent lifecycle events
1109
1148
  onLLMCallStart: async (context) => {
1149
+ llmCallCounter++;
1110
1150
  const inputTokens = await countMessagesTokens(
1111
1151
  context.options.model,
1112
1152
  context.options.messages
1113
1153
  );
1114
1154
  progress.startCall(context.options.model, inputTokens);
1115
1155
  progress.setInputTokens(inputTokens, false);
1156
+ if (llmRequestsDir) {
1157
+ const filename = `${Date.now()}_call_${llmCallCounter}.request.txt`;
1158
+ const content = formatLlmRequest(context.options.messages);
1159
+ await writeLogFile(llmRequestsDir, filename, content);
1160
+ }
1116
1161
  },
1117
1162
  // onStreamChunk: Real-time updates as LLM generates tokens
1118
1163
  // This enables responsive UIs that show progress during generation
@@ -1175,6 +1220,10 @@ async function executeAgent(promptArg, options, env) {
1175
1220
  `);
1176
1221
  }
1177
1222
  }
1223
+ if (llmResponsesDir) {
1224
+ const filename = `${Date.now()}_call_${llmCallCounter}.response.txt`;
1225
+ await writeLogFile(llmResponsesDir, filename, context.rawResponse);
1226
+ }
1178
1227
  }
1179
1228
  },
1180
1229
  // SHOWCASE: Controller-based approval gating for dangerous gadgets
@@ -1239,13 +1288,15 @@ Command rejected by user with message: "${response}"`
1239
1288
  if (gadgets.length > 0) {
1240
1289
  builder.withGadgets(...gadgets);
1241
1290
  }
1242
- builder.withParameterFormat(options.parameterFormat);
1243
1291
  if (options.gadgetStartPrefix) {
1244
1292
  builder.withGadgetStartPrefix(options.gadgetStartPrefix);
1245
1293
  }
1246
1294
  if (options.gadgetEndPrefix) {
1247
1295
  builder.withGadgetEndPrefix(options.gadgetEndPrefix);
1248
1296
  }
1297
+ if (options.gadgetArgPrefix) {
1298
+ builder.withGadgetArgPrefix(options.gadgetArgPrefix);
1299
+ }
1249
1300
  builder.withSyntheticGadgetCall(
1250
1301
  "TellUser",
1251
1302
  {
@@ -1262,17 +1313,25 @@ Command rejected by user with message: "${response}"`
1262
1313
  resultMapping: (text) => `\u2139\uFE0F ${text}`
1263
1314
  });
1264
1315
  const agent = builder.ask(prompt);
1316
+ let textBuffer = "";
1317
+ const flushTextBuffer = () => {
1318
+ if (textBuffer) {
1319
+ const output = options.quiet ? textBuffer : renderMarkdownWithSeparators(textBuffer);
1320
+ printer.write(output);
1321
+ textBuffer = "";
1322
+ }
1323
+ };
1265
1324
  for await (const event of agent.run()) {
1266
1325
  if (event.type === "text") {
1267
1326
  progress.pause();
1268
- printer.write(event.content);
1327
+ textBuffer += event.content;
1269
1328
  } else if (event.type === "gadget_result") {
1329
+ flushTextBuffer();
1270
1330
  progress.pause();
1271
1331
  if (options.quiet) {
1272
1332
  if (event.result.gadgetName === "TellUser" && event.result.parameters?.message) {
1273
1333
  const message = String(event.result.parameters.message);
1274
- const rendered = renderMarkdown(message);
1275
- env.stdout.write(`${rendered}
1334
+ env.stdout.write(`${message}
1276
1335
  `);
1277
1336
  }
1278
1337
  } else {
@@ -1282,6 +1341,7 @@ Command rejected by user with message: "${response}"`
1282
1341
  }
1283
1342
  }
1284
1343
  }
1344
+ flushTextBuffer();
1285
1345
  progress.complete();
1286
1346
  printer.ensureNewline();
1287
1347
  if (!options.quiet && iterations > 1) {
@@ -1320,9 +1380,18 @@ async function executeComplete(promptArg, options, env) {
1320
1380
  builder.addSystem(options.system);
1321
1381
  }
1322
1382
  builder.addUser(prompt);
1383
+ const messages = builder.build();
1384
+ const llmRequestsDir = resolveLogDir(options.logLlmRequests, "requests");
1385
+ const llmResponsesDir = resolveLogDir(options.logLlmResponses, "responses");
1386
+ const timestamp = Date.now();
1387
+ if (llmRequestsDir) {
1388
+ const filename = `${timestamp}_complete.request.txt`;
1389
+ const content = formatLlmRequest(messages);
1390
+ await writeLogFile(llmRequestsDir, filename, content);
1391
+ }
1323
1392
  const stream = client.stream({
1324
1393
  model,
1325
- messages: builder.build(),
1394
+ messages,
1326
1395
  temperature: options.temperature,
1327
1396
  maxTokens: options.maxTokens
1328
1397
  });
@@ -1333,7 +1402,7 @@ async function executeComplete(promptArg, options, env) {
1333
1402
  progress.startCall(model, estimatedInputTokens);
1334
1403
  let finishReason;
1335
1404
  let usage;
1336
- let totalChars = 0;
1405
+ let accumulatedResponse = "";
1337
1406
  for await (const chunk of stream) {
1338
1407
  if (chunk.usage) {
1339
1408
  usage = chunk.usage;
@@ -1346,8 +1415,8 @@ async function executeComplete(promptArg, options, env) {
1346
1415
  }
1347
1416
  if (chunk.text) {
1348
1417
  progress.pause();
1349
- totalChars += chunk.text.length;
1350
- progress.update(totalChars);
1418
+ accumulatedResponse += chunk.text;
1419
+ progress.update(accumulatedResponse.length);
1351
1420
  printer.write(chunk.text);
1352
1421
  }
1353
1422
  if (chunk.finishReason !== void 0) {
@@ -1357,6 +1426,10 @@ async function executeComplete(promptArg, options, env) {
1357
1426
  progress.endCall(usage);
1358
1427
  progress.complete();
1359
1428
  printer.ensureNewline();
1429
+ if (llmResponsesDir) {
1430
+ const filename = `${timestamp}_complete.response.txt`;
1431
+ await writeLogFile(llmResponsesDir, filename, accumulatedResponse);
1432
+ }
1360
1433
  if (stderrTTY && !options.quiet) {
1361
1434
  const summary = renderSummary({ finishReason, usage, cost: progress.getTotalCost() });
1362
1435
  if (summary) {
@@ -1375,8 +1448,8 @@ function registerCompleteCommand(program, env, config) {
1375
1448
 
1376
1449
  // src/cli/config.ts
1377
1450
  import { existsSync, readFileSync } from "node:fs";
1378
- import { homedir } from "node:os";
1379
- import { join } from "node:path";
1451
+ import { homedir as homedir2 } from "node:os";
1452
+ import { join as join2 } from "node:path";
1380
1453
  import { load as parseToml } from "js-toml";
1381
1454
 
1382
1455
  // src/cli/templates.ts
@@ -1471,6 +1544,8 @@ var COMPLETE_CONFIG_KEYS = /* @__PURE__ */ new Set([
1471
1544
  "log-level",
1472
1545
  "log-file",
1473
1546
  "log-reset",
1547
+ "log-llm-requests",
1548
+ "log-llm-responses",
1474
1549
  "type"
1475
1550
  // Allowed for inheritance compatibility, ignored for built-in commands
1476
1551
  ]);
@@ -1480,16 +1555,18 @@ var AGENT_CONFIG_KEYS = /* @__PURE__ */ new Set([
1480
1555
  "temperature",
1481
1556
  "max-iterations",
1482
1557
  "gadget",
1483
- "parameter-format",
1484
1558
  "builtins",
1485
1559
  "builtin-interaction",
1486
1560
  "gadget-start-prefix",
1487
1561
  "gadget-end-prefix",
1562
+ "gadget-arg-prefix",
1488
1563
  "quiet",
1489
1564
  "inherits",
1490
1565
  "log-level",
1491
1566
  "log-file",
1492
1567
  "log-reset",
1568
+ "log-llm-requests",
1569
+ "log-llm-responses",
1493
1570
  "type"
1494
1571
  // Allowed for inheritance compatibility, ignored for built-in commands
1495
1572
  ]);
@@ -1499,9 +1576,8 @@ var CUSTOM_CONFIG_KEYS = /* @__PURE__ */ new Set([
1499
1576
  "type",
1500
1577
  "description"
1501
1578
  ]);
1502
- var VALID_PARAMETER_FORMATS = ["json", "yaml", "toml", "auto"];
1503
1579
  function getConfigPath() {
1504
- return join(homedir(), ".llmist", "cli.toml");
1580
+ return join2(homedir2(), ".llmist", "cli.toml");
1505
1581
  }
1506
1582
  var ConfigError = class extends Error {
1507
1583
  constructor(message, path2) {
@@ -1635,6 +1711,20 @@ function validateCompleteConfig(raw, section) {
1635
1711
  if ("quiet" in rawObj) {
1636
1712
  result.quiet = validateBoolean(rawObj.quiet, "quiet", section);
1637
1713
  }
1714
+ if ("log-llm-requests" in rawObj) {
1715
+ result["log-llm-requests"] = validateStringOrBoolean(
1716
+ rawObj["log-llm-requests"],
1717
+ "log-llm-requests",
1718
+ section
1719
+ );
1720
+ }
1721
+ if ("log-llm-responses" in rawObj) {
1722
+ result["log-llm-responses"] = validateStringOrBoolean(
1723
+ rawObj["log-llm-responses"],
1724
+ "log-llm-responses",
1725
+ section
1726
+ );
1727
+ }
1638
1728
  return result;
1639
1729
  }
1640
1730
  function validateAgentConfig(raw, section) {
@@ -1660,15 +1750,6 @@ function validateAgentConfig(raw, section) {
1660
1750
  if ("gadget" in rawObj) {
1661
1751
  result.gadget = validateStringArray(rawObj.gadget, "gadget", section);
1662
1752
  }
1663
- if ("parameter-format" in rawObj) {
1664
- const format = validateString(rawObj["parameter-format"], "parameter-format", section);
1665
- if (!VALID_PARAMETER_FORMATS.includes(format)) {
1666
- throw new ConfigError(
1667
- `[${section}].parameter-format must be one of: ${VALID_PARAMETER_FORMATS.join(", ")}`
1668
- );
1669
- }
1670
- result["parameter-format"] = format;
1671
- }
1672
1753
  if ("builtins" in rawObj) {
1673
1754
  result.builtins = validateBoolean(rawObj.builtins, "builtins", section);
1674
1755
  }
@@ -1693,11 +1774,38 @@ function validateAgentConfig(raw, section) {
1693
1774
  section
1694
1775
  );
1695
1776
  }
1777
+ if ("gadget-arg-prefix" in rawObj) {
1778
+ result["gadget-arg-prefix"] = validateString(
1779
+ rawObj["gadget-arg-prefix"],
1780
+ "gadget-arg-prefix",
1781
+ section
1782
+ );
1783
+ }
1696
1784
  if ("quiet" in rawObj) {
1697
1785
  result.quiet = validateBoolean(rawObj.quiet, "quiet", section);
1698
1786
  }
1787
+ if ("log-llm-requests" in rawObj) {
1788
+ result["log-llm-requests"] = validateStringOrBoolean(
1789
+ rawObj["log-llm-requests"],
1790
+ "log-llm-requests",
1791
+ section
1792
+ );
1793
+ }
1794
+ if ("log-llm-responses" in rawObj) {
1795
+ result["log-llm-responses"] = validateStringOrBoolean(
1796
+ rawObj["log-llm-responses"],
1797
+ "log-llm-responses",
1798
+ section
1799
+ );
1800
+ }
1699
1801
  return result;
1700
1802
  }
1803
+ function validateStringOrBoolean(value, field, section) {
1804
+ if (typeof value === "string" || typeof value === "boolean") {
1805
+ return value;
1806
+ }
1807
+ throw new ConfigError(`[${section}].${field} must be a string or boolean`);
1808
+ }
1701
1809
  function validateCustomConfig(raw, section) {
1702
1810
  if (typeof raw !== "object" || raw === null) {
1703
1811
  throw new ConfigError(`[${section}] must be a table`);
@@ -1732,15 +1840,6 @@ function validateCustomConfig(raw, section) {
1732
1840
  if ("gadget" in rawObj) {
1733
1841
  result.gadget = validateStringArray(rawObj.gadget, "gadget", section);
1734
1842
  }
1735
- if ("parameter-format" in rawObj) {
1736
- const format = validateString(rawObj["parameter-format"], "parameter-format", section);
1737
- if (!VALID_PARAMETER_FORMATS.includes(format)) {
1738
- throw new ConfigError(
1739
- `[${section}].parameter-format must be one of: ${VALID_PARAMETER_FORMATS.join(", ")}`
1740
- );
1741
- }
1742
- result["parameter-format"] = format;
1743
- }
1744
1843
  if ("builtins" in rawObj) {
1745
1844
  result.builtins = validateBoolean(rawObj.builtins, "builtins", section);
1746
1845
  }
@@ -1765,6 +1864,13 @@ function validateCustomConfig(raw, section) {
1765
1864
  section
1766
1865
  );
1767
1866
  }
1867
+ if ("gadget-arg-prefix" in rawObj) {
1868
+ result["gadget-arg-prefix"] = validateString(
1869
+ rawObj["gadget-arg-prefix"],
1870
+ "gadget-arg-prefix",
1871
+ section
1872
+ );
1873
+ }
1768
1874
  if ("max-tokens" in rawObj) {
1769
1875
  result["max-tokens"] = validateNumber(rawObj["max-tokens"], "max-tokens", section, {
1770
1876
  integer: true,
@@ -2250,7 +2356,7 @@ function registerCustomCommand(program, name, config, env) {
2250
2356
  function parseLogLevel(value) {
2251
2357
  const normalized = value.toLowerCase();
2252
2358
  if (!LOG_LEVELS.includes(normalized)) {
2253
- throw new InvalidArgumentError3(`Log level must be one of: ${LOG_LEVELS.join(", ")}`);
2359
+ throw new InvalidArgumentError2(`Log level must be one of: ${LOG_LEVELS.join(", ")}`);
2254
2360
  }
2255
2361
  return normalized;
2256
2362
  }