jinzd-ai-cli 0.4.57 → 0.4.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@ import {
10
10
  SUBAGENT_DEFAULT_MAX_ROUNDS,
11
11
  SUBAGENT_MAX_ROUNDS_LIMIT,
12
12
  runTestsTool
13
- } from "./chunk-C2MNNHJ6.js";
13
+ } from "./chunk-YJCJBUOG.js";
14
14
 
15
15
  // src/tools/builtin/bash.ts
16
16
  import { execSync } from "child_process";
@@ -221,7 +221,7 @@ Important rules:
221
221
  },
222
222
  timeout: {
223
223
  type: "number",
224
- description: "Timeout in milliseconds, defaults to 30000",
224
+ description: "Timeout in milliseconds, defaults to 120000 (2 min), max 300000 (5 min). For recursive filesystem operations (e.g. Get-ChildItem -Recurse on large trees, find on deep dirs), pass a larger value explicitly (e.g. 240000) \u2014 the default may be too short.",
225
225
  required: false
226
226
  }
227
227
  },
@@ -229,8 +229,9 @@ Important rules:
229
229
  },
230
230
  async execute(args) {
231
231
  const command = String(args["command"] ?? "");
232
+ const DEFAULT_TIMEOUT = 12e4;
232
233
  const MAX_TIMEOUT = 3e5;
233
- const timeout = Math.min(Math.max(Number(args["timeout"] ?? 3e4), 1e3), MAX_TIMEOUT);
234
+ const timeout = Math.min(Math.max(Number(args["timeout"] ?? DEFAULT_TIMEOUT), 1e3), MAX_TIMEOUT);
234
235
  const cwdArg = args["cwd"] ? String(args["cwd"]) : void 0;
235
236
  if (!command.trim()) {
236
237
  throw new ToolError("bash", "command is required");
@@ -287,18 +288,38 @@ Important rules:
287
288
  return result || "(command completed with no output)";
288
289
  } catch (err) {
289
290
  pushBashUndoEntries(beforeSnapshot, parsedTargetsBefore, effectiveCwd);
290
- if (err && typeof err === "object" && "status" in err) {
291
+ if (err && typeof err === "object") {
291
292
  const execErr = err;
292
- const stderr = IS_WINDOWS && Buffer.isBuffer(execErr.stderr) ? execErr.stderr.toString("utf-8").trim() : execErr.stderr?.toString().trim() ?? "";
293
- const stdout = IS_WINDOWS && Buffer.isBuffer(execErr.stdout) ? execErr.stdout.toString("utf-8").trim() : execErr.stdout?.toString().trim() ?? "";
294
- const combined = [stdout, stderr].filter(Boolean).join("\n");
295
- throw new ToolError(
296
- "bash",
297
- `Exit code ${execErr.status}:
293
+ const isTimeout = execErr.code === "ETIMEDOUT" || execErr.status == null && execErr.signal === "SIGTERM" || /ETIMEDOUT/i.test(execErr.message ?? "");
294
+ if (isTimeout) {
295
+ const seconds = Math.round(timeout / 1e3);
296
+ throw new ToolError(
297
+ "bash",
298
+ `Command timed out after ${seconds}s.
299
+
300
+ The previous command ran for longer than the timeout limit and was killed. This usually means it is scanning a large filesystem tree (recursive Get-ChildItem / find), compressing a big archive, or waiting on a network request.
301
+
302
+ How to recover (pick ONE \u2014 do NOT retry the same command):
303
+ 1. Pass an explicit longer timeout, e.g. timeout: ${Math.min(timeout * 2, MAX_TIMEOUT)}
304
+ 2. Use a non-recursive / narrower alternative (e.g. 'Get-ChildItem -File' without -Recurse, or limit depth with -Depth 1)
305
+ 3. Split the work into smaller batches (process subdirectories one at a time)
306
+ 4. For size/count queries, use '(Get-ChildItem X).Count' per directory instead of one giant pipeline
307
+
308
+ [Do NOT retry the identical command \u2014 it will time out again.]`
309
+ );
310
+ }
311
+ if ("status" in execErr && execErr.status !== void 0) {
312
+ const stderr = IS_WINDOWS && Buffer.isBuffer(execErr.stderr) ? execErr.stderr.toString("utf-8").trim() : execErr.stderr?.toString().trim() ?? "";
313
+ const stdout = IS_WINDOWS && Buffer.isBuffer(execErr.stdout) ? execErr.stdout.toString("utf-8").trim() : execErr.stdout?.toString().trim() ?? "";
314
+ const combined = [stdout, stderr].filter(Boolean).join("\n");
315
+ throw new ToolError(
316
+ "bash",
317
+ `Exit code ${execErr.status}:
298
318
  ${combined || (execErr.message ?? "Unknown error")}
299
319
 
300
320
  [Command failed. Report this error to the user. Do not retry with variant commands.]`
301
- );
321
+ );
322
+ }
302
323
  }
303
324
  throw err;
304
325
  }
@@ -6,7 +6,7 @@ import { platform } from "os";
6
6
  import chalk from "chalk";
7
7
 
8
8
  // src/core/constants.ts
9
- var VERSION = "0.4.57";
9
+ var VERSION = "0.4.59";
10
10
  var APP_NAME = "ai-cli";
11
11
  var CONFIG_DIR_NAME = ".aicli";
12
12
  var CONFIG_FILE_NAME = "config.json";
@@ -7,7 +7,7 @@ import {
7
7
  ProviderNotFoundError,
8
8
  RateLimitError,
9
9
  schemaToJsonSchema
10
- } from "./chunk-G6K64M6X.js";
10
+ } from "./chunk-7RX7675B.js";
11
11
  import {
12
12
  APP_NAME,
13
13
  CONFIG_DIR_NAME,
@@ -20,7 +20,7 @@ import {
20
20
  MCP_TOOL_PREFIX,
21
21
  PLUGINS_DIR_NAME,
22
22
  VERSION
23
- } from "./chunk-C2MNNHJ6.js";
23
+ } from "./chunk-YJCJBUOG.js";
24
24
 
25
25
  // src/config/config-manager.ts
26
26
  import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
@@ -8,7 +8,7 @@ import { platform } from "os";
8
8
  import chalk from "chalk";
9
9
 
10
10
  // src/core/constants.ts
11
- var VERSION = "0.4.57";
11
+ var VERSION = "0.4.59";
12
12
  var APP_NAME = "ai-cli";
13
13
  var CONFIG_DIR_NAME = ".aicli";
14
14
  var CONFIG_FILE_NAME = "config.json";
@@ -385,7 +385,7 @@ ${content}`);
385
385
  }
386
386
  }
387
387
  async function runTaskMode(config, providers, configManager, topic) {
388
- const { TaskOrchestrator } = await import("./task-orchestrator-FRF6LTWK.js");
388
+ const { TaskOrchestrator } = await import("./task-orchestrator-I5YCZ72U.js");
389
389
  const orchestrator = new TaskOrchestrator(config, providers, configManager);
390
390
  let interrupted = false;
391
391
  const onSigint = () => {
package/dist/index.js CHANGED
@@ -27,7 +27,7 @@ import {
27
27
  saveDevState,
28
28
  sessionHasMeaningfulContent,
29
29
  setupProxy
30
- } from "./chunk-G5REL4FK.js";
30
+ } from "./chunk-NXSYL5OP.js";
31
31
  import {
32
32
  ToolExecutor,
33
33
  ToolRegistry,
@@ -41,7 +41,7 @@ import {
41
41
  spawnAgentContext,
42
42
  theme,
43
43
  undoStack
44
- } from "./chunk-G6K64M6X.js";
44
+ } from "./chunk-7RX7675B.js";
45
45
  import {
46
46
  fileCheckpoints
47
47
  } from "./chunk-4BKXL7SM.js";
@@ -66,7 +66,7 @@ import {
66
66
  SKILLS_DIR_NAME,
67
67
  VERSION,
68
68
  buildUserIdentityPrompt
69
- } from "./chunk-C2MNNHJ6.js";
69
+ } from "./chunk-YJCJBUOG.js";
70
70
 
71
71
  // src/index.ts
72
72
  import { program } from "commander";
@@ -2161,7 +2161,7 @@ ${hint}` : "")
2161
2161
  usage: "/test [command|filter]",
2162
2162
  async execute(args, ctx) {
2163
2163
  try {
2164
- const { executeTests } = await import("./run-tests-NVCAP42D.js");
2164
+ const { executeTests } = await import("./run-tests-IW6GHAVV.js");
2165
2165
  const argStr = args.join(" ").trim();
2166
2166
  let testArgs = {};
2167
2167
  if (argStr) {
@@ -4322,7 +4322,29 @@ Session '${this.resumeSessionId}' not found.
4322
4322
  sendNotification("ai-cli", `Task completed in ${Math.round(elapsed / 1e3)}s`);
4323
4323
  }
4324
4324
  } catch (err) {
4325
- this.renderer.renderError(err);
4325
+ const errMsg = err instanceof Error ? err.message : String(err);
4326
+ const isCtxLengthError = /maximum context length|context_length_exceeded|context window|too many tokens|reduce the length of the messages/i.test(errMsg);
4327
+ if (isCtxLengthError) {
4328
+ process.stderr.write(
4329
+ theme.error(`
4330
+ \u26A0 Context length exceeded \u2014 the conversation is too long for this model.
4331
+ `)
4332
+ );
4333
+ process.stderr.write(theme.dim(` Details: ${errMsg.split("\n")[0]}
4334
+ `));
4335
+ process.stderr.write(
4336
+ theme.dim(
4337
+ ` Recovery options:
4338
+ 1. Run /compact to summarize old messages and free context
4339
+ 2. Run /clear to start a fresh session (keeps this terminal)
4340
+ 3. Run /model to switch to a model with a larger context window
4341
+
4342
+ `
4343
+ )
4344
+ );
4345
+ } else {
4346
+ this.renderer.renderError(err);
4347
+ }
4326
4348
  }
4327
4349
  }
4328
4350
  /**
@@ -4397,6 +4419,23 @@ Session '${this.resumeSessionId}' not found.
4397
4419
  }
4398
4420
  return total;
4399
4421
  }
4422
+ /**
4423
+ * 估算 agentic 循环当前请求的 token 数(包含 session messages + extraMessages + system prompt)。
4424
+ * extraMessages 结构复杂(含 tool_calls、tool 结果等),这里用 JSON.stringify 后除以字符/token 比。
4425
+ * 用于 handleChatWithTools 循环内每轮发 API 前做上下文压力检查。
4426
+ */
4427
+ estimateRequestTokens(systemPrompt, extraMessages) {
4428
+ let total = this.estimateConversationTokens();
4429
+ if (extraMessages.length > 0) {
4430
+ try {
4431
+ const serialized = JSON.stringify(extraMessages);
4432
+ total += this.estimateTokens(serialized);
4433
+ } catch {
4434
+ }
4435
+ }
4436
+ void systemPrompt;
4437
+ return total;
4438
+ }
4400
4439
  /**
4401
4440
  * 获取当前模型的 context window 大小。
4402
4441
  */
@@ -4865,6 +4904,7 @@ You have a maximum of ${maxToolRounds} tool call rounds for this task. Plan effi
4865
4904
  let lastToolCallSignature = "";
4866
4905
  let repeatedToolCallCount = 0;
4867
4906
  let emptyResponseRetries = 0;
4907
+ let warnedCtx80 = false;
4868
4908
  const roundToolHistory = [];
4869
4909
  this.setupInterjectionListener();
4870
4910
  try {
@@ -4923,6 +4963,52 @@ You have a maximum of ${maxToolRounds} tool call rounds for this task. Plan effi
4923
4963
  `));
4924
4964
  extraMessages.push({ role: "user", content: msg });
4925
4965
  }
4966
+ const ctxWindow = this.getContextWindowSize();
4967
+ if (ctxWindow > 0) {
4968
+ const reqTokens = this.estimateRequestTokens(systemPrompt, extraMessages);
4969
+ const reqRatio = reqTokens / ctxWindow;
4970
+ if (reqRatio >= 0.95) {
4971
+ spinner.stop();
4972
+ process.stderr.write(
4973
+ theme.error(
4974
+ `
4975
+ \u26A0 Context at ${Math.round(reqRatio * 100)}% of ${fmtTokens(ctxWindow)} \u2014 aborting agentic loop before API rejection.
4976
+ `
4977
+ )
4978
+ );
4979
+ process.stderr.write(
4980
+ theme.dim(
4981
+ ` Too much tool output accumulated this turn. Your work so far is preserved.
4982
+ Recovery: run /compact to shrink history, then ask the AI to continue.
4983
+
4984
+ `
4985
+ )
4986
+ );
4987
+ if (roundUsage.inputTokens > 0 || roundUsage.outputTokens > 0) {
4988
+ this.addSessionUsage(roundUsage);
4989
+ session.addTokenUsage(roundUsage);
4990
+ if (this.shouldShowTokens()) {
4991
+ this.renderer.renderUsage(roundUsage, this.sessionTokenUsage);
4992
+ }
4993
+ }
4994
+ return;
4995
+ } else if (reqRatio >= 0.8 && !warnedCtx80) {
4996
+ warnedCtx80 = true;
4997
+ spinner.stop();
4998
+ process.stdout.write(
4999
+ theme.warning(
5000
+ `
5001
+ \u26A0 Context at ${Math.round(reqRatio * 100)}% of ${fmtTokens(ctxWindow)} \u2014 asking AI to wrap up.
5002
+ `
5003
+ )
5004
+ );
5005
+ extraMessages.push({
5006
+ role: "user",
5007
+ content: `\u26A0\uFE0F Context pressure: ~${Math.round(reqRatio * 100)}% of the ${fmtTokens(ctxWindow)} context window is used. Avoid reading more files or running broad scans. Finish the current critical step, then produce a final summary. Every unnecessary tool call now risks breaking the conversation.`
5008
+ });
5009
+ spinner.start(`Thinking... (round ${round + 1}/${maxToolRounds})`);
5010
+ }
5011
+ }
4926
5012
  let result;
4927
5013
  let alreadyRendered = false;
4928
5014
  const chatRequest = {
@@ -5609,7 +5695,7 @@ program.command("web").description("Start Web UI server with browser-based chat
5609
5695
  console.error("Error: Invalid port number. Must be between 1 and 65535.");
5610
5696
  process.exit(1);
5611
5697
  }
5612
- const { startWebServer } = await import("./server-46J5MXHG.js");
5698
+ const { startWebServer } = await import("./server-J7PNU32E.js");
5613
5699
  await startWebServer({ port, host: options.host });
5614
5700
  });
5615
5701
  program.command("user [action] [username]").description("Manage Web UI users (list | create <name> | delete <name> | reset-password <name> | migrate <name>)").action(async (action, username) => {
@@ -5842,7 +5928,7 @@ program.command("hub [topic]").description("Start multi-agent hub (discuss / bra
5842
5928
  }),
5843
5929
  config.get("customProviders")
5844
5930
  );
5845
- const { startHub } = await import("./hub-P3BR4JB5.js");
5931
+ const { startHub } = await import("./hub-3BY5W4VE.js");
5846
5932
  await startHub(
5847
5933
  {
5848
5934
  topic: topic ?? "",
@@ -2,7 +2,7 @@
2
2
  import {
3
3
  executeTests,
4
4
  runTestsTool
5
- } from "./chunk-C2MNNHJ6.js";
5
+ } from "./chunk-YJCJBUOG.js";
6
6
  export {
7
7
  executeTests,
8
8
  runTestsTool
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  executeTests,
3
3
  runTestsTool
4
- } from "./chunk-H7MNK3YO.js";
4
+ } from "./chunk-F5WLEWN2.js";
5
5
  export {
6
6
  executeTests,
7
7
  runTestsTool
@@ -17,7 +17,7 @@ import {
17
17
  hadPreviousWriteToolCalls,
18
18
  loadDevState,
19
19
  setupProxy
20
- } from "./chunk-G5REL4FK.js";
20
+ } from "./chunk-NXSYL5OP.js";
21
21
  import {
22
22
  AuthManager
23
23
  } from "./chunk-BYNY5JPB.js";
@@ -36,7 +36,7 @@ import {
36
36
  spawnAgentContext,
37
37
  truncateOutput,
38
38
  undoStack
39
- } from "./chunk-G6K64M6X.js";
39
+ } from "./chunk-7RX7675B.js";
40
40
  import "./chunk-4BKXL7SM.js";
41
41
  import {
42
42
  AGENTIC_BEHAVIOR_GUIDELINE,
@@ -56,7 +56,7 @@ import {
56
56
  SKILLS_DIR_NAME,
57
57
  VERSION,
58
58
  buildUserIdentityPrompt
59
- } from "./chunk-C2MNNHJ6.js";
59
+ } from "./chunk-YJCJBUOG.js";
60
60
 
61
61
  // src/web/server.ts
62
62
  import express from "express";
@@ -642,6 +642,49 @@ var SessionHandler = class _SessionHandler {
642
642
  } catch {
643
643
  }
644
644
  }
645
+ /** 获取当前模型的 context window 大小(0 表示未知)*/
646
+ getContextWindowSize() {
647
+ try {
648
+ const provider = this.providers.get(this.currentProvider);
649
+ const modelInfo = provider?.info.models.find((m) => m.id === this.currentModel);
650
+ return modelInfo?.contextWindow ?? 0;
651
+ } catch {
652
+ return 0;
653
+ }
654
+ }
655
+ /** 粗略估算文本 token 数(2.5 chars/token)*/
656
+ estTokens(text) {
657
+ return Math.ceil(text.length / 2.5);
658
+ }
659
+ /**
660
+ * 估算当前 agentic 请求总 token 数(session messages + extraMessages + system prompt)。
661
+ * 用于 handleChat 循环内每轮发 API 前的压力检查。
662
+ */
663
+ estimateRequestTokens(systemPrompt, extraMessages) {
664
+ let total = 0;
665
+ if (systemPrompt) total += this.estTokens(systemPrompt);
666
+ const session = this.sessions.current;
667
+ if (session) {
668
+ for (const msg of session.messages) {
669
+ if (typeof msg.content === "string") {
670
+ total += this.estTokens(msg.content);
671
+ } else if (Array.isArray(msg.content)) {
672
+ for (const part of msg.content) {
673
+ if (part.type === "text" && part.text) {
674
+ total += this.estTokens(part.text);
675
+ }
676
+ }
677
+ }
678
+ }
679
+ }
680
+ if (extraMessages.length > 0) {
681
+ try {
682
+ total += this.estTokens(JSON.stringify(extraMessages));
683
+ } catch {
684
+ }
685
+ }
686
+ return total;
687
+ }
645
688
  /** Save session only if it exists and has messages (never persist empty "Untitled" sessions). */
646
689
  saveIfNeeded() {
647
690
  if (this.sessions.current && this.sessions.current.messages.length > 0) {
@@ -801,6 +844,7 @@ You have a maximum of ${maxToolRounds} tool call rounds for this task. Plan effi
801
844
  let warnedLow = false;
802
845
  let warnedCritical = false;
803
846
  let emptyResponseRetries = 0;
847
+ let warnedCtx80 = false;
804
848
  const ac = new AbortController();
805
849
  this.abortController = ac;
806
850
  try {
@@ -836,6 +880,38 @@ You have a maximum of ${maxToolRounds} tool call rounds for this task. Plan effi
836
880
  this.send({ type: "info", message: `\u26A1 Interjection: "${msg}"` });
837
881
  extraMessages.push({ role: "user", content: msg });
838
882
  }
883
+ const ctxWindow = this.getContextWindowSize();
884
+ if (ctxWindow > 0) {
885
+ const reqTokens = this.estimateRequestTokens(systemPrompt, extraMessages);
886
+ const reqRatio = reqTokens / ctxWindow;
887
+ if (reqRatio >= 0.95) {
888
+ this.send({
889
+ type: "response_done",
890
+ content: `\u26A0 Context at ${Math.round(reqRatio * 100)}% of ${ctxWindow.toLocaleString()} tokens \u2014 aborting before API rejection.
891
+
892
+ Too much tool output accumulated this turn. Your work so far is preserved.
893
+
894
+ **Recovery**:
895
+ 1. Run \`/compact\` to shrink history, then ask the AI to continue
896
+ 2. Run \`/clear\` to start fresh
897
+ 3. Switch to a larger-context model`,
898
+ usage: roundUsage
899
+ });
900
+ this.addWebSessionUsage(roundUsage);
901
+ session.addTokenUsage(roundUsage);
902
+ return;
903
+ } else if (reqRatio >= 0.8 && !warnedCtx80) {
904
+ warnedCtx80 = true;
905
+ this.send({
906
+ type: "info",
907
+ message: `\u26A0 Context at ${Math.round(reqRatio * 100)}% \u2014 asking AI to wrap up`
908
+ });
909
+ extraMessages.push({
910
+ role: "user",
911
+ content: `\u26A0\uFE0F Context pressure: ~${Math.round(reqRatio * 100)}% of the ${ctxWindow.toLocaleString()}-token context window is used. Avoid reading more files or running broad scans. Finish the current critical step, then produce a final summary. Every unnecessary tool call now risks breaking the conversation.`
912
+ });
913
+ }
914
+ }
839
915
  const chatRequest = {
840
916
  messages: apiMessages,
841
917
  model: this.currentModel,
@@ -850,11 +926,34 @@ You have a maximum of ${maxToolRounds} tool call rounds for this task. Plan effi
850
926
  ...extraMessages.length > 0 ? { _extraMessages: extraMessages } : {}
851
927
  };
852
928
  let result;
853
- if (supportsStreamingTools) {
854
- const streamGen = provider.chatWithToolsStream(chatRequest, toolDefs);
855
- result = await this.consumeToolStream(streamGen, ac);
856
- } else {
857
- result = await provider.chatWithTools(chatRequest, toolDefs);
929
+ try {
930
+ if (supportsStreamingTools) {
931
+ const streamGen = provider.chatWithToolsStream(chatRequest, toolDefs);
932
+ result = await this.consumeToolStream(streamGen, ac);
933
+ } else {
934
+ result = await provider.chatWithTools(chatRequest, toolDefs);
935
+ }
936
+ } catch (providerErr) {
937
+ const errMsg = providerErr instanceof Error ? providerErr.message : String(providerErr);
938
+ const isCtxLengthError = /maximum context length|context_length_exceeded|context window|too many tokens|reduce the length of the messages/i.test(errMsg);
939
+ if (isCtxLengthError) {
940
+ this.send({
941
+ type: "response_done",
942
+ content: `\u26A0 Context length exceeded \u2014 the conversation is too long for this model.
943
+
944
+ Details: ${errMsg.split("\n")[0]}
945
+
946
+ **Recovery options**:
947
+ 1. Run \`/compact\` to summarize old messages and free context
948
+ 2. Run \`/clear\` to start a fresh session
949
+ 3. Run \`/model\` to switch to a model with a larger context window`,
950
+ usage: roundUsage
951
+ });
952
+ this.addWebSessionUsage(roundUsage);
953
+ session.addTokenUsage(roundUsage);
954
+ return;
955
+ }
956
+ throw providerErr;
858
957
  }
859
958
  if (ac.signal.aborted) break;
860
959
  if (result.usage) {
@@ -1816,7 +1915,7 @@ ${undoResults.map((r) => ` \u2022 ${r}`).join("\n")}` });
1816
1915
  case "test": {
1817
1916
  this.send({ type: "info", message: "\u{1F9EA} Running tests..." });
1818
1917
  try {
1819
- const { executeTests } = await import("./run-tests-NVCAP42D.js");
1918
+ const { executeTests } = await import("./run-tests-IW6GHAVV.js");
1820
1919
  const argStr = args.join(" ").trim();
1821
1920
  let testArgs = {};
1822
1921
  if (argStr) {
@@ -4,11 +4,11 @@ import {
4
4
  getDangerLevel,
5
5
  googleSearchContext,
6
6
  truncateOutput
7
- } from "./chunk-G6K64M6X.js";
7
+ } from "./chunk-7RX7675B.js";
8
8
  import "./chunk-4BKXL7SM.js";
9
9
  import {
10
10
  SUBAGENT_ALLOWED_TOOLS
11
- } from "./chunk-C2MNNHJ6.js";
11
+ } from "./chunk-YJCJBUOG.js";
12
12
 
13
13
  // src/hub/task-orchestrator.ts
14
14
  import { createInterface } from "readline";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "jinzd-ai-cli",
3
- "version": "0.4.57",
3
+ "version": "0.4.59",
4
4
  "description": "Cross-platform REPL-style AI CLI with multi-provider support",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",