cliskill 1.0.1 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  runCli
4
- } from "../chunk-CBGJMMA3.js";
4
+ } from "../chunk-PBLJ6557.js";
5
5
  import "../chunk-AJENHWD3.js";
6
6
  export {
7
7
  runCli
@@ -388,6 +388,19 @@ var BaseAdapter = class {
388
388
  }
389
389
  return timeoutSignal;
390
390
  }
391
+ /**
392
+ * Build an AbortSignal for streaming requests with extended timeout.
393
+ * Streaming responses can take much longer than regular requests,
394
+ * so we use 3x the configured timeout to avoid premature aborts.
395
+ */
396
+ buildStreamingSignal(external) {
397
+ const streamTimeout = Math.max(this.config.timeout * 3, 6e5);
398
+ const timeoutSignal = AbortSignal.timeout(streamTimeout);
399
+ if (external) {
400
+ return AbortSignal.any([timeoutSignal, external]);
401
+ }
402
+ return timeoutSignal;
403
+ }
391
404
  /**
392
405
  * Fetch with automatic retry for rate-limit (429), server errors (5xx),
393
406
  * and network failures (TypeError: fetch failed).
@@ -461,7 +474,7 @@ var GenericCompatAdapter = class extends BaseAdapter {
461
474
  method: "POST",
462
475
  headers: this.buildHeaders(),
463
476
  body: JSON.stringify(body),
464
- signal: this.buildSignal(request.signal)
477
+ signal: this.buildStreamingSignal(request.signal)
465
478
  }));
466
479
  if (!response.ok) {
467
480
  const text = await response.text().catch(() => "unknown error");
@@ -670,7 +683,7 @@ var GLMAdapter = class extends BaseAdapter {
670
683
  method: "POST",
671
684
  headers: this.buildHeaders(),
672
685
  body: JSON.stringify(body),
673
- signal: this.buildSignal(request.signal)
686
+ signal: this.buildStreamingSignal(request.signal)
674
687
  }));
675
688
  if (!response.ok) {
676
689
  const text = await response.text().catch(() => "unknown error");
@@ -5594,40 +5607,36 @@ ${result.summary}` }]
5594
5607
  break;
5595
5608
  }
5596
5609
  if (parser.stopReason === "max_tokens") {
5597
- const truncatedCalls = extractToolCalls(parser.assistantContent);
5598
- const hasEmptyInput = truncatedCalls.some((tc) => Object.keys(tc.input).length === 0);
5599
- if (hasEmptyInput || truncatedCalls.length === 0) {
5600
- if (maxOutputTokensRecoveryCount === 0 && maxOutputTokensOverride === void 0) {
5601
- maxOutputTokensOverride = ESCALATED_MAX_TOKENS;
5602
- maxOutputTokensRecoveryCount++;
5603
- state.messages.pop();
5604
- yield { type: "assistant_text", text: " [escalating token limit...]" };
5605
- continue;
5606
- }
5607
- if (maxOutputTokensRecoveryCount < MAX_OUTPUT_TOKENS_RECOVERY_LIMIT) {
5608
- const textOnly = parser.assistantContent.filter((b) => b.type === "text");
5609
- state.messages[state.messages.length - 1] = {
5610
- role: "assistant",
5611
- content: textOnly.length > 0 ? textOnly : [{ type: "text", text: "[Response truncated \u2014 output token limit]" }]
5612
- };
5613
- state.messages.push({
5614
- role: "user",
5615
- content: [{
5616
- type: "text",
5617
- text: "Output token limit hit. Resume directly \u2014 no apology, no recap. Pick up mid-thought if that is where the cut happened. Break remaining work into smaller pieces."
5618
- }]
5619
- });
5620
- maxOutputTokensRecoveryCount++;
5621
- maxOutputTokensOverride = void 0;
5622
- yield { type: "assistant_text", text: " [truncated \u2014 requesting continuation...]" };
5623
- continue;
5624
- }
5625
- yield {
5626
- type: "error",
5627
- error: new Error("Output token limit reached after multiple recovery attempts. Consider splitting your request into smaller tasks.")
5610
+ if (maxOutputTokensRecoveryCount === 0 && maxOutputTokensOverride === void 0) {
5611
+ maxOutputTokensOverride = ESCALATED_MAX_TOKENS;
5612
+ maxOutputTokensRecoveryCount++;
5613
+ state.messages.pop();
5614
+ yield { type: "assistant_text", text: " [escalating token limit...]" };
5615
+ continue;
5616
+ }
5617
+ if (maxOutputTokensRecoveryCount < MAX_OUTPUT_TOKENS_RECOVERY_LIMIT) {
5618
+ const textOnly = parser.assistantContent.filter((b) => b.type === "text");
5619
+ state.messages[state.messages.length - 1] = {
5620
+ role: "assistant",
5621
+ content: textOnly.length > 0 ? textOnly : [{ type: "text", text: "[Response truncated \u2014 output token limit]" }]
5628
5622
  };
5629
- break;
5623
+ state.messages.push({
5624
+ role: "user",
5625
+ content: [{
5626
+ type: "text",
5627
+ text: "Output token limit hit. Resume directly \u2014 no apology, no recap. Pick up mid-thought if that is where the cut happened. IMPORTANT: Break remaining work into smaller pieces \u2014 write files in multiple smaller writes instead of one huge write."
5628
+ }]
5629
+ });
5630
+ maxOutputTokensRecoveryCount++;
5631
+ maxOutputTokensOverride = void 0;
5632
+ yield { type: "assistant_text", text: " [truncated \u2014 requesting continuation...]" };
5633
+ continue;
5630
5634
  }
5635
+ yield {
5636
+ type: "error",
5637
+ error: new Error("Output token limit reached after multiple recovery attempts. Consider splitting your request into smaller tasks.")
5638
+ };
5639
+ break;
5631
5640
  } else {
5632
5641
  maxOutputTokensRecoveryCount = 0;
5633
5642
  maxOutputTokensOverride = void 0;
@@ -10021,4 +10030,4 @@ export {
10021
10030
  MessageList,
10022
10031
  runCli
10023
10032
  };
10024
- //# sourceMappingURL=chunk-CBGJMMA3.js.map
10033
+ //# sourceMappingURL=chunk-PBLJ6557.js.map