opencodekit 0.20.1 → 0.20.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -20,7 +20,7 @@ var __require = /* @__PURE__ */ createRequire(import.meta.url);
20
20
 
21
21
  //#endregion
22
22
  //#region package.json
23
- var version = "0.20.1";
23
+ var version = "0.20.2";
24
24
 
25
25
  //#endregion
26
26
  //#region src/utils/license.ts
@@ -24,11 +24,11 @@
24
24
  },
25
25
  // Glob patterns for files that should never be auto-pruned
26
26
  // Keep tight: broad patterns reduce DCP effectiveness
27
+ // .opencode/** and .beads/** removed — memory-* and tilth_* outputs
28
+ // already survive compression via compress.protectedTools
27
29
  "protectedFilePatterns": [
28
30
  "**/.env*",
29
31
  "**/AGENTS.md",
30
- "**/.opencode/**",
31
- "**/.beads/**",
32
32
  "**/package.json",
33
33
  "**/tsconfig.json"
34
34
  ],
@@ -43,34 +43,41 @@
43
43
  // v3.1.0: active summary tokens extend effective maxContextLimit
44
44
  "summaryBuffer": true,
45
45
  // Soft upper threshold: above this, strong compression nudges fire
46
- // Accepts number or "X%" of model context window
47
- "maxContextLimit": "80%",
46
+ // Use numeric values percentage requires modelContextLimit from runtime
47
+ // which may be unavailable for some provider/model combos (e.g. GitHub Copilot)
48
+ // Rule: must be BELOW OpenCode emergency threshold (model_max - reserved - max_output)
49
+ // For Copilot Claude (216k ctx, 64k out, 16k reserved): emergency = 136k
50
+ // So DCP must start compressing well before 136k
51
+ "maxContextLimit": 100000,
48
52
  // Per-model override for maxContextLimit (takes priority over global)
49
- // Aligned to claude-opus-4.6 (216k context, 64k output) as primary build agent
50
53
  "modelMaxLimits": {
51
- "github-copilot/claude-opus-4.6": 192000,
52
- "github-copilot/claude-opus-4.5": 192000,
53
- "github-copilot/claude-sonnet-4.6": 192000,
54
- "github-copilot/claude-sonnet-4.5": 192000,
55
- "github-copilot/claude-sonnet-4": 192000,
56
- "github-copilot/claude-haiku-4.5": 172000,
57
- "github-copilot/gpt-5.4": 192000,
58
- "github-copilot/gpt-5.3-codex": 192000,
59
- "github-copilot/gemini-3.1-pro-preview": 192000
54
+ // Claude: 216k ctx, 64k out → emergency at 136k → DCP starts at 110k
55
+ "github-copilot/claude-opus-4.6": 110000,
56
+ "github-copilot/claude-opus-4.5": 110000,
57
+ "github-copilot/claude-sonnet-4.6": 110000,
58
+ "github-copilot/claude-sonnet-4.5": 110000,
59
+ "github-copilot/claude-sonnet-4": 110000,
60
+ // Haiku: smaller model, be more conservative
61
+ "github-copilot/claude-haiku-4.5": 90000,
62
+ // GPT/Gemini: assume similar 200k+ windows
63
+ "github-copilot/gpt-5.4": 110000,
64
+ "github-copilot/gpt-5.3-codex": 110000,
65
+ "github-copilot/gemini-3.1-pro-preview": 110000
60
66
  },
61
67
  // Soft lower threshold: below this, turn/iteration reminders are off
62
- "minContextLimit": "35%",
68
+ // Use numeric values — same reason as maxContextLimit above
69
+ "minContextLimit": 50000,
63
70
  // Per-model override for minContextLimit (takes priority over global)
64
71
  "modelMinLimits": {
65
- "github-copilot/claude-opus-4.6": "30%",
66
- "github-copilot/claude-opus-4.5": "35%",
67
- "github-copilot/claude-sonnet-4.6": "35%",
68
- "github-copilot/claude-sonnet-4.5": "35%",
69
- "github-copilot/claude-sonnet-4": "35%",
70
- "github-copilot/claude-haiku-4.5": "25%",
71
- "github-copilot/gpt-5.4": "30%",
72
- "github-copilot/gpt-5.3-codex": "30%",
73
- "github-copilot/gemini-3.1-pro-preview": "30%"
72
+ "github-copilot/claude-opus-4.6": 65000,
73
+ "github-copilot/claude-opus-4.5": 65000,
74
+ "github-copilot/claude-sonnet-4.6": 65000,
75
+ "github-copilot/claude-sonnet-4.5": 65000,
76
+ "github-copilot/claude-sonnet-4": 65000,
77
+ "github-copilot/claude-haiku-4.5": 50000,
78
+ "github-copilot/gpt-5.4": 65000,
79
+ "github-copilot/gpt-5.3-codex": 65000,
80
+ "github-copilot/gemini-3.1-pro-preview": 65000
74
81
  },
75
82
  // How often context-limit nudge fires above maxContextLimit (1 = every fetch)
76
83
  "nudgeFrequency": 5,
Binary file
@@ -175,7 +175,7 @@
175
175
  "output": 32000
176
176
  },
177
177
  "options": {
178
- "thinking_budget": 10000,
178
+ "thinking_budget": 24000,
179
179
  "type": "enabled"
180
180
  },
181
181
  "reasoning": true,
@@ -229,7 +229,7 @@
229
229
  },
230
230
  "options": {
231
231
  "thinking": {
232
- "budget_tokens": 16000,
232
+ "budget_tokens": 24000,
233
233
  "type": "enabled"
234
234
  }
235
235
  },
@@ -324,7 +324,7 @@
324
324
  },
325
325
  "options": {
326
326
  "thinking": {
327
- "budget_tokens": 16000,
327
+ "budget_tokens": 24000,
328
328
  "type": "enabled"
329
329
  }
330
330
  },
@@ -1674,6 +1674,6 @@
1674
1674
  ]
1675
1675
  },
1676
1676
  "compaction": {
1677
- "reserved": 128000
1677
+ "reserved": 16000
1678
1678
  }
1679
1679
  }
@@ -12,7 +12,7 @@
12
12
  },
13
13
  "dependencies": {
14
14
  "@google/stitch-sdk": "^0.0.3",
15
- "@opencode-ai/plugin": "1.3.13"
15
+ "@opencode-ai/plugin": "1.3.17"
16
16
  },
17
17
  "devDependencies": {
18
18
  "@types/node": "^25.3.0",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencodekit",
3
- "version": "0.20.1",
3
+ "version": "0.20.2",
4
4
  "description": "CLI tool for bootstrapping and managing OpenCodeKit projects",
5
5
  "keywords": [
6
6
  "agents",