opencodekit 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -750,7 +750,7 @@ var cac = (name = "") => new CAC(name);
750
750
  // package.json
751
751
  var package_default = {
752
752
  name: "opencodekit",
753
- version: "0.5.0",
753
+ version: "0.6.0",
754
754
  description: "CLI tool for bootstrapping and managing OpenCodeKit projects",
755
755
  type: "module",
756
756
  repository: {
@@ -230,6 +230,14 @@ read_session("today") # Today's first session
230
230
  read_session("ses_abc123", focus="file changes") # Specific aspect
231
231
  ```
232
232
 
233
+ **`summarize_session`** - Generate AI summary of a session
234
+
235
+ ```
236
+ summarize_session("ses_abc123") # Trigger AI summarization
237
+ ```
238
+
239
+ Use before `read_session` to get a quick overview of what happened in a past session without loading full context.
240
+
233
241
  ### When to Start New Session
234
242
 
235
243
  - Completing distinct task from `bd ready`
@@ -0,0 +1,71 @@
1
+ ---
2
+ description: Generate AI summary of a previous session for quick context
3
+ argument-hint: "[session_reference]"
4
+ agent: build
5
+ ---
6
+
7
+ # Summarize
8
+
9
+ Generate an AI summary of a previous session to quickly understand what happened without loading full context.
10
+
11
+ ## Phase 1: Resolve Session
12
+
13
+ If no argument provided, list recent sessions:
14
+
15
+ ```
16
+ list_sessions(limit=5, project="current")
17
+ ```
18
+
19
+ Parse the session reference:
20
+
21
+ - Empty or "last" resolves to most recent session
22
+ - "2 ago" or "3 ago" resolves to nth most recent
23
+ - "today" resolves to first session today
24
+ - "ses\_..." uses the session ID directly
25
+
26
+ ## Phase 2: Generate Summary
27
+
28
+ Trigger AI summarization using the configured compaction model:
29
+
30
+ ```
31
+ summarize_session(session_id)
32
+ ```
33
+
34
+ The summary is generated asynchronously and stored with the session.
35
+
36
+ ## Phase 3: Display Result
37
+
38
+ Read the session with focus on the generated summary:
39
+
40
+ ```
41
+ read_session(session_id, focus="summary")
42
+ ```
43
+
44
+ Present the summary in a clear, scannable format showing what was accomplished, key decisions made, and any blockers encountered.
45
+
46
+ ## Output
47
+
48
+ Report the session metadata (ID, timestamp, message count, token usage) followed by the AI-generated summary. Highlight key actions taken during the session.
49
+
50
+ End with actionable next steps:
51
+
52
+ - How to load full context if needed
53
+ - How to resume associated work
54
+
55
+ ## Examples
56
+
57
+ ```
58
+ /summarize # Summarize most recent session
59
+ /summarize last # Same as above
60
+ /summarize ses_abc123 # Summarize specific session
61
+ /summarize 2 ago # Summarize 2nd most recent
62
+ /summarize today # First session today
63
+ ```
64
+
65
+ ## When to Use
66
+
67
+ Use this command before resuming work to get a quick overview without loading full context. Helpful during handoffs to understand previous session state, for context triage to decide if a session is worth loading, or when researching across multiple past sessions to find relevant work.
68
+
69
+ ## Integration
70
+
71
+ This command works alongside other session tools. Start with `list_sessions` to discover available sessions, use `/summarize` to quickly understand promising candidates, then load full context with `read_session` only for the most relevant session. This workflow saves context space by avoiding unnecessary full session loads.
@@ -1,477 +1,537 @@
1
1
  {
2
- "$schema": "https://opencode.ai/config.json",
3
- "agent": {
4
- "compaction": {
5
- "model": "proxypal/gemini-3-flash-preview",
6
- "prompt": "You are summarizing a coding session for context continuity.\n\n## Output Structure\n\nUse these sections:\n\n### COMPLETED\n- What was done (with file paths)\n- Bead IDs closed and why\n\n### IN PROGRESS\n- Current task and bead ID (if any)\n- Files being modified (exact paths)\n- Current todo state (preserve TodoWrite items)\n\n### NEXT\n- What needs to be done next\n- Blockers or pending decisions\n\n### CONSTRAINTS\n- User preferences that must persist\n- Rules or requirements stated by user\n- Technical decisions and rationale\n\n### PERSIST TO MEMORY\n- Gotchas discovered → suggest for project/gotchas.md\n- Commands learned → suggest for project/commands.md\n- Patterns observed → suggest for project/conventions.md\n\n## Rules\n\n- PRESERVE: Bead IDs, todo items, file paths, line numbers, user constraints\n- DROP: Failed attempts, superseded info, verbose tool outputs, exploration dead-ends\n- Be concise but complete - this summary replaces the full conversation\n- Include enough context that a new session can continue seamlessly"
7
- },
8
- "build": {
9
- "description": "Primary development agent with full codebase access",
10
- "model": "proxypal/gemini-claude-opus-4-5-thinking"
11
- },
12
- "explore": {
13
- "description": "Fast codebase search specialist",
14
- "model": "proxypal/gemini-3-flash-preview"
15
- },
16
- "general": {
17
- "disable": true
18
- },
19
- "plan": {
20
- "disable": true
21
- },
22
- "planner": {
23
- "description": "Strategic planning agent for architecture and design",
24
- "model": "proxypal/gpt-5.2"
25
- },
26
- "review": {
27
- "description": "Code review, debugging, and security audit specialist",
28
- "model": "proxypal/gemini-claude-opus-4-5-thinking"
29
- },
30
- "rush": {
31
- "description": "Fast primary agent for small, well-defined tasks",
32
- "model": "proxypal/gemini-3-pro-preview"
33
- },
34
- "scout": {
35
- "description": "External research specialist for library docs and patterns",
36
- "model": "proxypal/gemini-claude-sonnet-4-5"
37
- },
38
- "vision": {
39
- "description": "Visual content specialist for multimodal analysis and UI/UX guidance",
40
- "model": "proxypal/gemini-3-pro-preview"
41
- }
42
- },
43
- "autoupdate": false,
44
- "experimental": {
45
- "batch_tool": true,
46
- "chatMaxRetries": 2,
47
- "primary_tools": ["edit", "write", "bash", "prune"]
48
- },
49
- "formatter": {
50
- "biome": {
51
- "command": ["npx", "@biomejs/biome", "check", "--write", "$FILE"],
52
- "extensions": [".js", ".jsx", ".ts", ".tsx", ".json", ".jsonc"]
53
- },
54
- "java-formatter": {
55
- "command": ["google-java-format", "--replace", "$FILE"],
56
- "environment": {
57
- "JAVA_HOME": "{env:JAVA_HOME}"
58
- },
59
- "extensions": [".java"]
60
- },
61
- "prettier": {
62
- "command": ["npx", "prettier", "--write", "$FILE"],
63
- "extensions": [".html", ".css", ".scss", ".sass", ".md", ".yaml", ".yml"]
64
- }
65
- },
66
- "instructions": [".opencode/memory/user.md", ".opencode/memory/project/*.md"],
67
- "keybinds": {
68
- "command_list": ";",
69
- "leader": "`",
70
- "session_child_cycle": "ctrl+alt+right",
71
- "session_child_cycle_reverse": "ctrl+alt+left"
72
- },
73
- "mcp": {
74
- "Framelink MCP for Figma": {
75
- "command": [
76
- "npx",
77
- "-y",
78
- "figma-developer-mcp",
79
- "--figma-api-key={env:FIGMA_API_KEY}",
80
- "--stdio"
81
- ],
82
- "enabled": false,
83
- "type": "local"
84
- },
85
- "context7": {
86
- "command": [
87
- "npx",
88
- "-y",
89
- "@upstash/context7-mcp",
90
- "--api-key",
91
- "{env:CONTEXT7_API_KEY}"
92
- ],
93
- "enabled": true,
94
- "type": "local"
95
- },
96
- "gh_grep": {
97
- "enabled": true,
98
- "type": "remote",
99
- "url": "https://mcp.grep.app"
100
- },
101
- "gkg": {
102
- "enabled": true,
103
- "type": "remote",
104
- "url": "http://localhost:27495/mcp"
105
- },
106
- "playwright": {
107
- "command": ["npx", "@playwright/mcp@latest"],
108
- "enabled": false,
109
- "type": "local"
110
- }
111
- },
112
- "model": "proxypal/gemini-claude-opus-4-5-thinking",
113
- "permission": {
114
- "bash": {
115
- "git commit *": "ask",
116
- "git push *": "ask",
117
- "rm *": "ask",
118
- "rm -rf *": "ask"
119
- },
120
- "doom_loop": "ask",
121
- "edit": "allow",
122
- "external_directory": "allow"
123
- },
124
- "plugin": [
125
- "opencode-gemini-auth",
126
- "@tarquinen/opencode-dcp@latest",
127
- "@howaboua/pickle-thinker@0.1.3",
128
- "@franlol/opencode-md-table-formatter@0.0.3"
129
- ],
130
- "provider": {
131
- "github-copilot": {
132
- "models": {
133
- "claude-haiku-4.5": {
134
- "attachment": true,
135
- "options": {
136
- "thinking": {
137
- "budgetTokens": 16000,
138
- "type": "enabled"
139
- }
140
- },
141
- "reasoning": true,
142
- "temperature": true,
143
- "tool_call": true
144
- },
145
- "claude-opus-4.5": {
146
- "attachment": true,
147
- "options": {
148
- "thinking": {
149
- "budgetTokens": 32000,
150
- "type": "enabled"
151
- }
152
- },
153
- "reasoning": true,
154
- "temperature": true,
155
- "tool_call": true
156
- },
157
- "claude-sonnet-4.5": {
158
- "attachment": true,
159
- "options": {
160
- "thinking": {
161
- "budgetTokens": 16000,
162
- "type": "enabled"
163
- }
164
- },
165
- "reasoning": true,
166
- "temperature": true,
167
- "tool_call": true
168
- },
169
- "gpt-5.1": {
170
- "attachment": true,
171
- "options": {
172
- "reasoning": {
173
- "effort": "high"
174
- }
175
- },
176
- "reasoning": true,
177
- "temperature": true,
178
- "tool_call": true
179
- },
180
- "gpt-5.1-codex": {
181
- "attachment": true,
182
- "options": {
183
- "reasoning": {
184
- "effort": "high"
185
- }
186
- },
187
- "reasoning": true,
188
- "temperature": true,
189
- "tool_call": true
190
- }
191
- },
192
- "npm": "@ai-sdk/anthropic"
193
- },
194
- "opencode": {
195
- "models": {
196
- "big-pickle": {
197
- "options": {
198
- "reasoningEffort": "high",
199
- "temperature": 1,
200
- "top_k": 40,
201
- "top_p": 0.95
202
- },
203
- "reasoning": true
204
- }
205
- }
206
- },
207
- "proxypal": {
208
- "models": {
209
- "gemini-2.5-computer-use-preview-10-2025": {
210
- "limit": {
211
- "context": 1048576,
212
- "output": 65536
213
- },
214
- "name": "Gemini 2 5 Computer Use Preview 10 2025"
215
- },
216
- "gemini-2.5-flash": {
217
- "limit": {
218
- "context": 1048576,
219
- "output": 65536
220
- },
221
- "name": "Gemini 2 5 Flash"
222
- },
223
- "gemini-2.5-flash-lite": {
224
- "limit": {
225
- "context": 1048576,
226
- "output": 65536
227
- },
228
- "name": "Gemini 2 5 Flash Lite"
229
- },
230
- "gemini-2.5-pro": {
231
- "limit": {
232
- "context": 1048576,
233
- "output": 65536
234
- },
235
- "name": "Gemini 2 5 Pro"
236
- },
237
- "gemini-3-flash-preview": {
238
- "limit": {
239
- "context": 1048576,
240
- "output": 65536
241
- },
242
- "name": "Gemini 3 Flash Preview"
243
- },
244
- "gemini-3-pro-image-preview": {
245
- "limit": {
246
- "context": 1048576,
247
- "output": 65536
248
- },
249
- "name": "Gemini 3 Pro Image Preview"
250
- },
251
- "gemini-3-pro-preview": {
252
- "limit": {
253
- "context": 1048576,
254
- "output": 65536
255
- },
256
- "name": "Gemini 3 Pro Preview"
257
- },
258
- "gemini-claude-opus-4-5-thinking": {
259
- "limit": {
260
- "context": 200000,
261
- "output": 64000
262
- },
263
- "name": "Gemini Claude Opus 4 5 Thinking",
264
- "options": {
265
- "thinking": {
266
- "budgetTokens": 32768,
267
- "type": "enabled"
268
- }
269
- },
270
- "reasoning": true
271
- },
272
- "gemini-claude-sonnet-4-5": {
273
- "limit": {
274
- "context": 200000,
275
- "output": 64000
276
- },
277
- "name": "Gemini Claude Sonnet 4 5"
278
- },
279
- "gemini-claude-sonnet-4-5-thinking": {
280
- "limit": {
281
- "context": 200000,
282
- "output": 64000
283
- },
284
- "name": "Gemini Claude Sonnet 4 5 Thinking",
285
- "options": {
286
- "thinking": {
287
- "budgetTokens": 32768,
288
- "type": "enabled"
289
- }
290
- },
291
- "reasoning": true
292
- },
293
- "glm-4.6": {
294
- "limit": {
295
- "context": 128000,
296
- "output": 16384
297
- },
298
- "name": "Glm 4 6"
299
- },
300
- "gpt-5": {
301
- "limit": {
302
- "context": 128000,
303
- "output": 16384
304
- },
305
- "name": "Gpt 5",
306
- "options": {
307
- "reasoning": {
308
- "effort": "medium"
309
- }
310
- },
311
- "reasoning": true
312
- },
313
- "gpt-5-codex": {
314
- "limit": {
315
- "context": 128000,
316
- "output": 16384
317
- },
318
- "name": "Gpt 5 Codex",
319
- "options": {
320
- "reasoning": {
321
- "effort": "medium"
322
- }
323
- },
324
- "reasoning": true
325
- },
326
- "gpt-5-codex-mini": {
327
- "limit": {
328
- "context": 128000,
329
- "output": 16384
330
- },
331
- "name": "Gpt 5 Codex Mini",
332
- "options": {
333
- "reasoning": {
334
- "effort": "medium"
335
- }
336
- },
337
- "reasoning": true
338
- },
339
- "gpt-5.1": {
340
- "limit": {
341
- "context": 128000,
342
- "output": 16384
343
- },
344
- "name": "Gpt 5 1",
345
- "options": {
346
- "reasoning": {
347
- "effort": "medium"
348
- }
349
- },
350
- "reasoning": true
351
- },
352
- "gpt-5.1-codex": {
353
- "limit": {
354
- "context": 128000,
355
- "output": 16384
356
- },
357
- "name": "Gpt 5 1 Codex",
358
- "options": {
359
- "reasoning": {
360
- "effort": "medium"
361
- }
362
- },
363
- "reasoning": true
364
- },
365
- "gpt-5.1-codex-max": {
366
- "limit": {
367
- "context": 128000,
368
- "output": 16384
369
- },
370
- "name": "Gpt 5 1 Codex Max",
371
- "options": {
372
- "reasoning": {
373
- "effort": "medium"
374
- }
375
- },
376
- "reasoning": true
377
- },
378
- "gpt-5.1-codex-mini": {
379
- "limit": {
380
- "context": 128000,
381
- "output": 16384
382
- },
383
- "name": "Gpt 5 1 Codex Mini",
384
- "options": {
385
- "reasoning": {
386
- "effort": "medium"
387
- }
388
- },
389
- "reasoning": true
390
- },
391
- "gpt-5.2": {
392
- "limit": {
393
- "context": 128000,
394
- "output": 16384
395
- },
396
- "name": "Gpt 5 2",
397
- "options": {
398
- "reasoning": {
399
- "effort": "medium"
400
- }
401
- },
402
- "reasoning": true
403
- },
404
- "gpt-5.2-codex": {
405
- "limit": {
406
- "context": 128000,
407
- "output": 16384
408
- },
409
- "name": "Gpt 5 2 Codex",
410
- "options": {
411
- "reasoning": {
412
- "effort": "medium"
413
- }
414
- },
415
- "reasoning": true
416
- },
417
- "gpt-oss-120b-medium": {
418
- "limit": {
419
- "context": 128000,
420
- "output": 16384
421
- },
422
- "name": "Gpt Oss 120b Medium"
423
- }
424
- },
425
- "name": "ProxyPal",
426
- "npm": "@ai-sdk/anthropic",
427
- "options": {
428
- "apiKey": "proxypal-local",
429
- "baseURL": "http://127.0.0.1:8317/v1"
430
- }
431
- },
432
- "zai-coding-plan": {
433
- "models": {
434
- "glm-4.6": {
435
- "attachment": true,
436
- "options": {
437
- "reasoningEffort": "high",
438
- "temperature": 1,
439
- "thinking": {
440
- "type": "enabled"
441
- },
442
- "top_k": 40,
443
- "top_p": 0.95
444
- },
445
- "reasoning": true,
446
- "temperature": true,
447
- "tool_call": true
448
- }
449
- }
450
- }
451
- },
452
- "share": "manual",
453
- "small_model": "opencode/gpt-5-nano",
454
- "theme": "system",
455
- "tools": {
456
- "context7*": true,
457
- "gh_grep*": true,
458
- "gkg*": true
459
- },
460
- "tui": {
461
- "diff_style": "auto",
462
- "scroll_acceleration": {
463
- "enabled": true
464
- },
465
- "scroll_speed": 3
466
- },
467
- "watcher": {
468
- "ignore": [
469
- "node_modules/**",
470
- ".git/**",
471
- "dist/**",
472
- "build/**",
473
- "*.log",
474
- ".DS_Store"
475
- ]
476
- }
2
+ "$schema": "https://opencode.ai/config.json",
3
+ "agent": {
4
+ "compaction": {
5
+ "model": "proxypal/gemini-3-flash-preview",
6
+ "prompt": "You are summarizing a coding session for context continuity.\n\n## Output Structure\n\nUse these sections:\n\n### COMPLETED\n- What was done (with file paths)\n- Bead IDs closed and why\n\n### IN PROGRESS\n- Current task and bead ID (if any)\n- Files being modified (exact paths)\n- Current todo state (preserve TodoWrite items)\n\n### NEXT\n- What needs to be done next\n- Blockers or pending decisions\n\n### CONSTRAINTS\n- User preferences that must persist\n- Rules or requirements stated by user\n- Technical decisions and rationale\n\n### PERSIST TO MEMORY\n- Gotchas discovered → suggest for project/gotchas.md\n- Commands learned → suggest for project/commands.md\n- Patterns observed → suggest for project/conventions.md\n\n## Rules\n\n- PRESERVE: Bead IDs, todo items, file paths, line numbers, user constraints\n- DROP: Failed attempts, superseded info, verbose tool outputs, exploration dead-ends\n- Be concise but complete - this summary replaces the full conversation\n- Include enough context that a new session can continue seamlessly"
7
+ },
8
+ "build": {
9
+ "description": "Primary development agent with full codebase access",
10
+ "model": "proxypal/gemini-claude-opus-4-5-thinking"
11
+ },
12
+ "explore": {
13
+ "description": "Fast codebase search specialist",
14
+ "model": "opencode/grok-code"
15
+ },
16
+ "general": {
17
+ "disable": true
18
+ },
19
+ "plan": {
20
+ "disable": true
21
+ },
22
+ "planner": {
23
+ "description": "Strategic planning agent for architecture and design",
24
+ "model": "proxypal/gpt-5.1"
25
+ },
26
+ "review": {
27
+ "description": "Code review, debugging, and security audit specialist",
28
+ "model": "proxypal/gemini-claude-opus-4-5-thinking"
29
+ },
30
+ "rush": {
31
+ "description": "Fast primary agent for small, well-defined tasks",
32
+ "model": "zai-coding-plan/glm-4.7"
33
+ },
34
+ "scout": {
35
+ "description": "External research specialist for library docs and patterns",
36
+ "model": "proxypal/gemini-claude-sonnet-4-5"
37
+ },
38
+ "vision": {
39
+ "description": "Visual content specialist for multimodal analysis and UI/UX guidance",
40
+ "model": "proxypal/gemini-3-pro-preview"
41
+ }
42
+ },
43
+ "autoupdate": false,
44
+ "experimental": {
45
+ "lsp": true,
46
+ "batch_tool": true,
47
+ "chatMaxRetries": 2,
48
+ "primary_tools": [
49
+ "edit",
50
+ "write",
51
+ "bash",
52
+ "prune"
53
+ ]
54
+ },
55
+ "formatter": {
56
+ "biome": {
57
+ "command": [
58
+ "npx",
59
+ "@biomejs/biome",
60
+ "check",
61
+ "--write",
62
+ "$FILE"
63
+ ],
64
+ "extensions": [
65
+ ".js",
66
+ ".jsx",
67
+ ".ts",
68
+ ".tsx",
69
+ ".json",
70
+ ".jsonc"
71
+ ]
72
+ },
73
+ "java-formatter": {
74
+ "command": [
75
+ "google-java-format",
76
+ "--replace",
77
+ "$FILE"
78
+ ],
79
+ "environment": {
80
+ "JAVA_HOME": "{env:JAVA_HOME}"
81
+ },
82
+ "extensions": [
83
+ ".java"
84
+ ]
85
+ },
86
+ "prettier": {
87
+ "command": [
88
+ "npx",
89
+ "prettier",
90
+ "--write",
91
+ "$FILE"
92
+ ],
93
+ "extensions": [
94
+ ".html",
95
+ ".css",
96
+ ".scss",
97
+ ".sass",
98
+ ".md",
99
+ ".yaml",
100
+ ".yml"
101
+ ]
102
+ }
103
+ },
104
+ "instructions": [
105
+ ".opencode/memory/user.md",
106
+ ".opencode/memory/project/*.md"
107
+ ],
108
+ "keybinds": {
109
+ "command_list": ";",
110
+ "leader": "`",
111
+ "session_child_cycle": "ctrl+alt+right",
112
+ "session_child_cycle_reverse": "ctrl+alt+left"
113
+ },
114
+ "mcp": {
115
+ "Framelink MCP for Figma": {
116
+ "command": [
117
+ "npx",
118
+ "-y",
119
+ "figma-developer-mcp",
120
+ "--figma-api-key={env:FIGMA_API_KEY}",
121
+ "--stdio"
122
+ ],
123
+ "enabled": false,
124
+ "type": "local"
125
+ },
126
+ "context7": {
127
+ "command": [
128
+ "npx",
129
+ "-y",
130
+ "@upstash/context7-mcp",
131
+ "--api-key",
132
+ "{env:CONTEXT7_API_KEY}"
133
+ ],
134
+ "enabled": true,
135
+ "type": "local"
136
+ },
137
+ "gh_grep": {
138
+ "enabled": true,
139
+ "type": "remote",
140
+ "url": "https://mcp.grep.app"
141
+ },
142
+ "gkg": {
143
+ "enabled": true,
144
+ "type": "remote",
145
+ "url": "http://localhost:27495/mcp"
146
+ },
147
+ "playwright": {
148
+ "command": [
149
+ "npx",
150
+ "@playwright/mcp@latest"
151
+ ],
152
+ "enabled": false,
153
+ "type": "local"
154
+ }
155
+ },
156
+ "model": "proxypal/gemini-claude-opus-4-5-thinking",
157
+ "permission": {
158
+ "bash": {
159
+ "git commit *": "ask",
160
+ "git push *": "ask",
161
+ "rm *": "ask",
162
+ "rm -rf *": "ask"
163
+ },
164
+ "doom_loop": "ask",
165
+ "edit": "allow",
166
+ "external_directory": "allow"
167
+ },
168
+ "plugin": [
169
+ "opencode-gemini-auth",
170
+ "@tarquinen/opencode-dcp@latest",
171
+ "@franlol/opencode-md-table-formatter@0.0.3"
172
+ ],
173
+ "provider": {
174
+ "github-copilot": {
175
+ "models": {
176
+ "claude-haiku-4.5": {
177
+ "attachment": true,
178
+ "options": {
179
+ "thinking": {
180
+ "budgetTokens": 16000,
181
+ "type": "enabled"
182
+ }
183
+ },
184
+ "reasoning": true,
185
+ "temperature": true,
186
+ "tool_call": true
187
+ },
188
+ "claude-opus-4.5": {
189
+ "attachment": true,
190
+ "options": {
191
+ "thinking": {
192
+ "budgetTokens": 32000,
193
+ "type": "enabled"
194
+ }
195
+ },
196
+ "reasoning": true,
197
+ "temperature": true,
198
+ "tool_call": true
199
+ },
200
+ "claude-sonnet-4.5": {
201
+ "attachment": true,
202
+ "options": {
203
+ "thinking": {
204
+ "budgetTokens": 16000,
205
+ "type": "enabled"
206
+ }
207
+ },
208
+ "reasoning": true,
209
+ "temperature": true,
210
+ "tool_call": true
211
+ },
212
+ "gpt-5.1": {
213
+ "attachment": true,
214
+ "options": {
215
+ "reasoning": {
216
+ "effort": "high"
217
+ }
218
+ },
219
+ "reasoning": true,
220
+ "temperature": true,
221
+ "tool_call": true
222
+ },
223
+ "gpt-5.1-codex": {
224
+ "attachment": true,
225
+ "options": {
226
+ "reasoning": {
227
+ "effort": "high"
228
+ }
229
+ },
230
+ "reasoning": true,
231
+ "temperature": true,
232
+ "tool_call": true
233
+ }
234
+ },
235
+ "npm": "@ai-sdk/anthropic"
236
+ },
237
+ "opencode": {
238
+ "models": {
239
+ "big-pickle": {
240
+ "options": {
241
+ "reasoningEffort": "high",
242
+ "temperature": 1,
243
+ "top_k": 40,
244
+ "top_p": 0.95
245
+ },
246
+ "reasoning": true
247
+ }
248
+ }
249
+ },
250
+ "proxypal": {
251
+ "models": {
252
+ "gemini-2.5-computer-use-preview-10-2025": {
253
+ "limit": {
254
+ "context": 1048576,
255
+ "output": 65536
256
+ },
257
+ "name": "Gemini 2 5 Computer Use Preview 10 2025"
258
+ },
259
+ "gemini-2.5-flash": {
260
+ "limit": {
261
+ "context": 1048576,
262
+ "output": 65536
263
+ },
264
+ "name": "Gemini 2 5 Flash"
265
+ },
266
+ "gemini-2.5-flash-lite": {
267
+ "limit": {
268
+ "context": 1048576,
269
+ "output": 65536
270
+ },
271
+ "name": "Gemini 2 5 Flash Lite"
272
+ },
273
+ "gemini-2.5-pro": {
274
+ "limit": {
275
+ "context": 1048576,
276
+ "output": 65536
277
+ },
278
+ "name": "Gemini 2 5 Pro"
279
+ },
280
+ "gemini-3-flash-preview": {
281
+ "limit": {
282
+ "context": 1048576,
283
+ "output": 65536
284
+ },
285
+ "name": "Gemini 3 Flash Preview"
286
+ },
287
+ "gemini-3-pro-image-preview": {
288
+ "limit": {
289
+ "context": 1048576,
290
+ "output": 65536
291
+ },
292
+ "name": "Gemini 3 Pro Image Preview"
293
+ },
294
+ "gemini-3-pro-preview": {
295
+ "limit": {
296
+ "context": 1048576,
297
+ "output": 65536
298
+ },
299
+ "name": "Gemini 3 Pro Preview"
300
+ },
301
+ "gemini-claude-opus-4-5-thinking": {
302
+ "limit": {
303
+ "context": 200000,
304
+ "output": 64000
305
+ },
306
+ "name": "Gemini Claude Opus 4 5 Thinking",
307
+ "options": {
308
+ "thinking": {
309
+ "budgetTokens": 32768,
310
+ "type": "enabled"
311
+ }
312
+ },
313
+ "reasoning": true
314
+ },
315
+ "gemini-claude-sonnet-4-5": {
316
+ "limit": {
317
+ "context": 200000,
318
+ "output": 64000
319
+ },
320
+ "name": "Gemini Claude Sonnet 4 5"
321
+ },
322
+ "gemini-claude-sonnet-4-5-thinking": {
323
+ "limit": {
324
+ "context": 200000,
325
+ "output": 64000
326
+ },
327
+ "name": "Gemini Claude Sonnet 4 5 Thinking",
328
+ "options": {
329
+ "thinking": {
330
+ "budgetTokens": 32768,
331
+ "type": "enabled"
332
+ }
333
+ },
334
+ "reasoning": true
335
+ },
336
+ "glm-4.6": {
337
+ "limit": {
338
+ "context": 128000,
339
+ "output": 16384
340
+ },
341
+ "name": "Glm 4 6"
342
+ },
343
+ "gpt-5": {
344
+ "limit": {
345
+ "context": 128000,
346
+ "output": 16384
347
+ },
348
+ "name": "Gpt 5",
349
+ "options": {
350
+ "reasoning": {
351
+ "effort": "medium"
352
+ }
353
+ },
354
+ "reasoning": true
355
+ },
356
+ "gpt-5-codex": {
357
+ "limit": {
358
+ "context": 128000,
359
+ "output": 16384
360
+ },
361
+ "name": "Gpt 5 Codex",
362
+ "options": {
363
+ "reasoning": {
364
+ "effort": "medium"
365
+ }
366
+ },
367
+ "reasoning": true
368
+ },
369
+ "gpt-5-codex-mini": {
370
+ "limit": {
371
+ "context": 128000,
372
+ "output": 16384
373
+ },
374
+ "name": "Gpt 5 Codex Mini",
375
+ "options": {
376
+ "reasoning": {
377
+ "effort": "medium"
378
+ }
379
+ },
380
+ "reasoning": true
381
+ },
382
+ "gpt-5.1": {
383
+ "limit": {
384
+ "context": 128000,
385
+ "output": 16384
386
+ },
387
+ "name": "Gpt 5 1",
388
+ "options": {
389
+ "reasoning": {
390
+ "effort": "medium"
391
+ }
392
+ },
393
+ "reasoning": true
394
+ },
395
+ "gpt-5.1-codex": {
396
+ "limit": {
397
+ "context": 128000,
398
+ "output": 16384
399
+ },
400
+ "name": "Gpt 5 1 Codex",
401
+ "options": {
402
+ "reasoning": {
403
+ "effort": "medium"
404
+ }
405
+ },
406
+ "reasoning": true
407
+ },
408
+ "gpt-5.1-codex-max": {
409
+ "limit": {
410
+ "context": 128000,
411
+ "output": 16384
412
+ },
413
+ "name": "Gpt 5 1 Codex Max",
414
+ "options": {
415
+ "reasoning": {
416
+ "effort": "medium"
417
+ }
418
+ },
419
+ "reasoning": true
420
+ },
421
+ "gpt-5.1-codex-mini": {
422
+ "limit": {
423
+ "context": 128000,
424
+ "output": 16384
425
+ },
426
+ "name": "Gpt 5 1 Codex Mini",
427
+ "options": {
428
+ "reasoning": {
429
+ "effort": "medium"
430
+ }
431
+ },
432
+ "reasoning": true
433
+ },
434
+ "gpt-5.2": {
435
+ "limit": {
436
+ "context": 128000,
437
+ "output": 16384
438
+ },
439
+ "name": "Gpt 5 2",
440
+ "options": {
441
+ "reasoning": {
442
+ "effort": "medium"
443
+ }
444
+ },
445
+ "reasoning": true
446
+ },
447
+ "gpt-5.2-codex": {
448
+ "limit": {
449
+ "context": 128000,
450
+ "output": 16384
451
+ },
452
+ "name": "Gpt 5 2 Codex",
453
+ "options": {
454
+ "reasoning": {
455
+ "effort": "medium"
456
+ }
457
+ },
458
+ "reasoning": true
459
+ },
460
+ "gpt-oss-120b-medium": {
461
+ "limit": {
462
+ "context": 128000,
463
+ "output": 16384
464
+ },
465
+ "name": "Gpt Oss 120b Medium"
466
+ }
467
+ },
468
+ "name": "ProxyPal",
469
+ "npm": "@ai-sdk/anthropic",
470
+ "options": {
471
+ "apiKey": "proxypal-local",
472
+ "baseURL": "http://127.0.0.1:8317/v1"
473
+ }
474
+ },
475
+ "zai-coding-plan": {
476
+ "models": {
477
+ "glm-4.7": {
478
+ "id": "glm-4.7",
479
+ "name": "GLM-4.7",
480
+ "reasoning": true,
481
+ "interleaved": true,
482
+ "options": {
483
+ "reasoningEffort": "high",
484
+ "reasoningSummary": "true",
485
+ "temperature": 1,
486
+ "top_k": 40,
487
+ "top_p": 0.95,
488
+ "maxOutputTokens": 131072,
489
+ "thinking": {
490
+ "type": "enabled"
491
+ }
492
+ }
493
+ },
494
+ "glm-4.6": {
495
+ "attachment": true,
496
+ "options": {
497
+ "reasoningEffort": "high",
498
+ "temperature": 1,
499
+ "thinking": {
500
+ "type": "enabled"
501
+ },
502
+ "top_k": 40,
503
+ "top_p": 0.95
504
+ },
505
+ "reasoning": true,
506
+ "temperature": true,
507
+ "tool_call": true
508
+ }
509
+ }
510
+ }
511
+ },
512
+ "share": "manual",
513
+ "small_model": "opencode/gpt-5-nano",
514
+ "theme": "system",
515
+ "tools": {
516
+ "context7*": true,
517
+ "gh_grep*": true,
518
+ "gkg*": true
519
+ },
520
+ "tui": {
521
+ "diff_style": "auto",
522
+ "scroll_acceleration": {
523
+ "enabled": true
524
+ },
525
+ "scroll_speed": 3
526
+ },
527
+ "watcher": {
528
+ "ignore": [
529
+ "node_modules/**",
530
+ ".git/**",
531
+ "dist/**",
532
+ "build/**",
533
+ "*.log",
534
+ ".DS_Store"
535
+ ]
536
+ }
477
537
  }
@@ -11,7 +11,7 @@
11
11
  "author": "",
12
12
  "license": "ISC",
13
13
  "dependencies": {
14
- "@opencode-ai/plugin": "1.0.180"
14
+ "@opencode-ai/plugin": "1.0.186"
15
15
  },
16
16
  "devDependencies": {
17
17
  "@types/node": "^20.19.27",
@@ -61,7 +61,7 @@ function parseDate(dateStr: string): Date | null {
61
61
  return null;
62
62
  }
63
63
 
64
- export const SessionsPlugin: Plugin = async ({ directory }) => {
64
+ export const SessionsPlugin: Plugin = async ({ client, directory }) => {
65
65
  const storageDir = join(
66
66
  process.env.HOME || "",
67
67
  ".local/share/opencode/storage",
@@ -158,6 +158,31 @@ export const SessionsPlugin: Plugin = async ({ directory }) => {
158
158
 
159
159
  return {
160
160
  tool: {
161
+ summarize_session: tool({
162
+ description:
163
+ "Generate an AI summary of a session using the new auto parameter. Useful for quickly understanding what happened in a previous session.",
164
+ args: {
165
+ session_id: tool.schema
166
+ .string()
167
+ .describe("Session ID to summarize (e.g. 'ses_abc123')"),
168
+ },
169
+ async execute(args) {
170
+ try {
171
+ // Use compaction model from config: proxypal/gemini-3-flash-preview
172
+ const result = await client.session.summarize({
173
+ path: { id: args.session_id },
174
+ body: {
175
+ providerID: "proxypal",
176
+ modelID: "gemini-3-flash-preview",
177
+ },
178
+ });
179
+ return `Session summarized successfully.\n\n${JSON.stringify(result, null, 2)}`;
180
+ } catch (error) {
181
+ return `Error summarizing session: ${error instanceof Error ? error.message : String(error)}`;
182
+ }
183
+ },
184
+ }),
185
+
161
186
  list_sessions: tool({
162
187
  description:
163
188
  "List OpenCode sessions with metadata. Filter by project and date. Use this before read_session to discover available sessions.",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencodekit",
3
- "version": "0.5.0",
3
+ "version": "0.6.0",
4
4
  "description": "CLI tool for bootstrapping and managing OpenCodeKit projects",
5
5
  "type": "module",
6
6
  "repository": {
@@ -1,11 +0,0 @@
1
- {
2
- // Ultrathink config for pickle-thinker
3
- // mode: "lite" keeps the original behavior (prefix user prompts only).
4
- // mode: "tool" adds an extra user turn after each tool result to force deeper analysis.
5
- // Note: tool mode increases turns/tokens and may impact subscription limits.
6
- "enabled": true,
7
- // "lite" | "tool"
8
- "mode": "tool",
9
- // Change the thinking keyword if you like
10
- "prefix": "Ultrathink: "
11
- }