opencode-handoff 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -9,6 +9,7 @@ Inspired by Amp's handoff command - see their [post](https://ampcode.com/news/ha
9
9
  - `/handoff <goal>` command that analyzes the conversation and generates a continuation prompt
10
10
  - Guides the AI to include relevant `@file` references so the next session starts with context loaded
11
11
  - Opens a new session with the prompt as an editable draft
12
+ - `read_session` tool for retrieving full conversation transcripts from previous sessions when the handoff summary isn't sufficient
12
13
 
13
14
  ## Requirements
14
15
 
@@ -20,7 +21,7 @@ Add to your OpenCode config (`~/.config/opencode/config.json`):
20
21
 
21
22
  ```json
22
23
  {
23
- "plugin": ["opencode-handoff@0.1.0"]
24
+ "plugin": ["opencode-handoff@0.3.0"]
24
25
  }
25
26
  ```
26
27
 
@@ -53,6 +54,24 @@ ln -sf ~/.config/opencode/opencode-handoff/src/plugin.ts ~/.config/opencode/plug
53
54
 
54
55
  The AI analyzes the conversation, extracts key decisions and relevant files, generates a focused prompt, and creates a new session with that prompt ready to edit.
55
56
 
57
+ ### Reading Previous Session Transcripts
58
+
59
+ When you use `/handoff`, the generated prompt includes a session reference line:
60
+
61
+ ```
62
+ Continuing work from session sess_01jxyz123. When you lack specific information you can use read_session to get it.
63
+ ```
64
+
65
+ This gives the AI in the new session access to the `read_session` tool, which can fetch the full conversation transcript from the source session. If the handoff summary doesn't include something you need, just ask - the AI can look it up.
66
+
67
+ **Example:**
68
+
69
+ ```
70
+ You: What were the specific error messages we saw earlier?
71
+ ```
72
+
73
+ The AI will use `read_session` to retrieve details from the previous session that weren't included in the handoff summary.
74
+
56
75
  ## Contributing
57
76
 
58
77
  Contributions are welcome! Here's how to set up for development:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencode-handoff",
3
- "version": "0.1.0",
3
+ "version": "0.3.0",
4
4
  "type": "module",
5
5
  "description": "Create focused handoff prompts for continuing work in new OpenCode sessions",
6
6
  "author": "Josh Thomas <josh@joshthomas.dev>",
package/src/files.ts ADDED
@@ -0,0 +1,66 @@
1
+ /**
2
+ * File reference parsing and building for handoff sessions.
3
+ *
4
+ * Handles extraction of @file references from handoff prompts and
5
+ * building file parts for injection into new sessions.
6
+ */
7
+
8
+ import * as path from "node:path"
9
+ import * as fs from "node:fs/promises"
10
+ import type { FilePartInput } from "@opencode-ai/sdk"
11
+
12
+ /**
13
+ * File reference regex matching OpenCode's internal pattern.
14
+ * Matches @file references like @src/plugin.ts
15
+ */
16
+ export const FILE_REGEX = /(?<![\w`])@(\.?[^\s`,.]*(?:\.[^\s`,.]+)*)/g
17
+
18
+ /**
19
+ * Parse @file references from text.
20
+ *
21
+ * @param text - Text to search for @file references
22
+ * @returns Set of file paths referenced in the text
23
+ */
24
+ export function parseFileReferences(text: string): Set<string> {
25
+ const fileRefs = new Set<string>()
26
+
27
+ for (const match of text.matchAll(FILE_REGEX)) {
28
+ if (match[1]) {
29
+ fileRefs.add(match[1])
30
+ }
31
+ }
32
+
33
+ return fileRefs
34
+ }
35
+
36
+ /**
37
+ * Build file parts for files that exist.
38
+ *
39
+ * @param directory - Project directory to resolve relative paths against
40
+ * @param refs - Set of file path references to check
41
+ * @returns Array of file parts for existing files (non-existent files are skipped)
42
+ */
43
+ export async function buildFileParts(
44
+ directory: string,
45
+ refs: Set<string>
46
+ ): Promise<FilePartInput[]> {
47
+ const fileParts: FilePartInput[] = []
48
+
49
+ for (const ref of refs) {
50
+ const filepath = path.resolve(directory, ref)
51
+
52
+ try {
53
+ await fs.stat(filepath)
54
+ fileParts.push({
55
+ type: "file",
56
+ mime: "text/plain",
57
+ url: `file://${filepath}`,
58
+ filename: ref,
59
+ })
60
+ } catch {
61
+ // Skip silently if file doesn't exist
62
+ }
63
+ }
64
+
65
+ return fileParts
66
+ }
package/src/plugin.ts CHANGED
@@ -1,69 +1,101 @@
1
1
  import type { Plugin } from "@opencode-ai/plugin"
2
- import { tool } from "@opencode-ai/plugin"
2
+ import { HandoffSession, ReadSession } from "./tools"
3
+ import { parseFileReferences, buildFileParts } from "./files"
3
4
 
4
- export const HandoffPlugin: Plugin = async (ctx) => ({
5
- config: async (config) => {
6
- config.command = config.command || {}
7
- config.command["handoff"] = {
8
- description: "Create a focused handoff prompt for a new session",
9
- template: `You are creating a handoff message to continue work in a new session.
10
-
11
- User's goal: $ARGUMENTS
5
+ const HANDOFF_COMMAND = `GOAL: You are creating a handoff message to continue work in a new session.
12
6
 
7
+ <context>
13
8
  When an AI assistant starts a fresh session, it spends significant time exploring the codebase—grepping, reading files, searching—before it can begin actual work. This "file archaeology" is wasteful when the previous session already discovered what matters.
14
9
 
15
10
  A good handoff frontloads everything the next session needs so it can start implementing immediately.
11
+ </context>
16
12
 
13
+ <instructions>
17
14
  Analyze this conversation and extract what matters for continuing the work.
18
15
 
19
- 1. FILE REFERENCES (Required)
20
-
21
- Include all relevant @file references on a SINGLE LINE, space-separated.
22
-
23
- Why: Every @file gets loaded into context automatically. The next session won't need to search—the files are already there. This eliminates exploration entirely.
16
+ 1. Identify all relevant files that should be loaded into the next session's context
24
17
 
25
18
  Include files that will be edited, dependencies being touched, relevant tests, configs, and key reference docs. Be generous—the cost of an extra file is low; missing a critical one means another archaeology dig. Target 8-15 files, up to 20 for complex work.
26
19
 
27
- 2. CONTEXT AND GOAL
20
+ 2. Draft the context and goal description
28
21
 
29
- After the files, describe what we're working on and provide whatever context helps continue the work. Structure it based on what fits the conversation—could be tasks, findings, a simple paragraph, or detailed steps.
22
+ Describe what we're working on and provide whatever context helps continue the work. Structure it based on what fits the conversation—could be tasks, findings, a simple paragraph, or detailed steps.
30
23
 
31
24
  Preserve: decisions, constraints, user preferences, technical patterns.
32
25
 
33
26
  Exclude: conversation back-and-forth, dead ends, meta-commentary.
34
27
 
35
28
  The user controls what context matters. If they mentioned something to preserve, include it—trust their judgment about their workflow.
29
+ </instructions>
30
+
31
+ <user_input>
32
+ The user's guidance for continuing work. If empty, the handoff should capture a natural continuation of the current conversation's direction.
33
+
34
+ USER: $ARGUMENTS
35
+ </user_input>
36
36
 
37
37
  ---
38
38
 
39
- After generating the handoff message, IMMEDIATELY call handoff_prepare with the full message as a handoff prompt:
40
- \`handoff_prepare(prompt="...")\``,
41
- }
42
- },
43
-
44
- tool: {
45
- handoff_prepare: tool({
46
- description: "Prepare handoff by creating new session with generated prompt as draft",
47
- args: {
48
- prompt: tool.schema.string().describe("The generated handoff prompt"),
49
- },
50
- async execute(args, context) {
51
- await ctx.client.tui.clearPrompt()
52
- await ctx.client.tui.executeCommand({ body: { command: "session_new" } })
53
- await new Promise(r => setTimeout(r, 200))
54
- await ctx.client.tui.appendPrompt({ body: { text: args.prompt } })
55
-
56
- await ctx.client.tui.showToast({
57
- body: {
58
- title: "Handoff Ready",
59
- message: "Review and edit the draft, then send",
60
- variant: "success",
61
- duration: 4000,
62
- }
63
- })
64
-
65
- return "Handoff prompt created in new session. Review and edit before sending."
39
+ After generating the handoff message, IMMEDIATELY call handoff_session with your prompt and files:
40
+ \`handoff_session(prompt="...", files=["src/foo.ts", "src/bar.ts", ...])\``
41
+
42
+ export const HandoffPlugin: Plugin = async (ctx) => {
43
+ const processedSessions = new Set<string>()
44
+
45
+ return {
46
+ config: async (config) => {
47
+ config.command = config.command || {}
48
+ config.command["handoff"] = {
49
+ description: "Create a focused handoff prompt for a new session",
50
+ template: HANDOFF_COMMAND,
66
51
  }
67
- })
52
+ },
53
+
54
+ tool: {
55
+ handoff_session: HandoffSession(ctx.client),
56
+ read_session: ReadSession(ctx.client),
57
+ },
58
+
59
+ "chat.message": async (_input, output) => {
60
+ const sessionID = output.message.sessionID
61
+
62
+ if (processedSessions.has(sessionID)) return
63
+
64
+ // Get non-synthetic text from the message
65
+ const text = output.parts
66
+ .filter((p): p is typeof p & { type: "text"; text: string } =>
67
+ p.type === "text" && !p.synthetic && typeof p.text === "string"
68
+ )
69
+ .map(p => p.text)
70
+ .join("\n")
71
+
72
+ if (!text.includes("Continuing work from session")) return
73
+
74
+ processedSessions.add(sessionID)
75
+
76
+ const fileRefs = parseFileReferences(text)
77
+ if (fileRefs.size === 0) return
78
+
79
+ const fileParts = await buildFileParts(ctx.directory, fileRefs)
80
+ if (fileParts.length === 0) return
81
+
82
+ // Inject file parts via noReply
83
+ // Must pass model and agent to prevent mode/model switching
84
+ await ctx.client.session.prompt({
85
+ path: { id: sessionID },
86
+ body: {
87
+ noReply: true,
88
+ model: output.message.model,
89
+ agent: output.message.agent,
90
+ parts: fileParts,
91
+ },
92
+ })
93
+ },
94
+
95
+ event: async ({ event }) => {
96
+ if (event.type === "session.deleted") {
97
+ processedSessions.delete(event.properties.info.id)
98
+ }
99
+ }
68
100
  }
69
- })
101
+ }
package/src/tools.ts ADDED
@@ -0,0 +1,141 @@
1
+ /**
2
+ * Tool definitions for opencode-handoff plugin.
3
+ *
4
+ * Factory functions that create tool definitions with injected dependencies:
5
+ * - HandoffSession: Create a new session with handoff prompt
6
+ * - ReadSession: Read conversation transcript from a session
7
+ */
8
+
9
+ import type { PluginInput } from "@opencode-ai/plugin"
10
+ import { tool } from "@opencode-ai/plugin"
11
+
12
+ export type OpencodeClient = PluginInput["client"]
13
+
14
+ /**
15
+ * Create the handoff_session tool.
16
+ *
17
+ * Takes the OpenCode client as a dependency for TUI and session operations.
18
+ */
19
+ export const HandoffSession = (client: OpencodeClient) => {
20
+ return tool({
21
+ description: "Create a new session with the handoff prompt as an editable draft",
22
+ args: {
23
+ prompt: tool.schema.string().describe("The generated handoff prompt"),
24
+ files: tool.schema.array(tool.schema.string()).optional().describe("Array of file paths to load into the new session's context"),
25
+ },
26
+ async execute(args, context) {
27
+ const sessionReference = `Continuing work from session ${context.sessionID}. When you lack specific information you can use read_session to get it.`
28
+ const fileRefs = args.files?.length
29
+ ? args.files.map(f => `@${f.replace(/^@/, '')}`).join(' ')
30
+ : ''
31
+ const fullPrompt = fileRefs
32
+ ? `${sessionReference}\n\n${fileRefs}\n\n${args.prompt}`
33
+ : `${sessionReference}\n\n${args.prompt}`
34
+
35
+ // Double-append workaround for textarea resize bug:
36
+ // appendPrompt uses insertText() which bypasses onContentChange, so resize never triggers.
37
+ // First append sets height in old session, session_new preserves textarea element,
38
+ // second append populates new session with already-expanded textarea.
39
+ await client.tui.clearPrompt()
40
+ await new Promise(r => setTimeout(r, 200))
41
+ await client.tui.appendPrompt({ body: { text: fullPrompt } })
42
+ await client.tui.executeCommand({ body: { command: "session_new" } })
43
+ await new Promise(r => setTimeout(r, 200))
44
+ await client.tui.appendPrompt({ body: { text: fullPrompt } })
45
+
46
+ await client.tui.showToast({
47
+ body: {
48
+ title: "Handoff Ready",
49
+ message: "Review and edit the draft, then send",
50
+ variant: "success",
51
+ duration: 4000,
52
+ }
53
+ })
54
+
55
+ return "Handoff prompt created in new session. Review and edit before sending."
56
+ }
57
+ })
58
+ }
59
+
60
+ /**
61
+ * Format a conversation transcript for display.
62
+ *
63
+ * @param messages - Array of messages from session.messages()
64
+ * @param limit - Optional limit to indicate if results are truncated
65
+ * @returns Formatted transcript with user/assistant sections
66
+ */
67
+ function formatTranscript(
68
+ messages: Array<{ info: any; parts: any[] }>,
69
+ limit?: number
70
+ ): string {
71
+ const lines: string[] = []
72
+
73
+ for (const msg of messages) {
74
+ if (msg.info.role === "user") {
75
+ lines.push("## User")
76
+ for (const part of msg.parts) {
77
+ if (part.type === "text" && !part.ignored) {
78
+ lines.push(part.text)
79
+ }
80
+ if (part.type === "file") {
81
+ lines.push(`[Attached: ${part.filename || "file"}]`)
82
+ }
83
+ }
84
+ lines.push("")
85
+ }
86
+
87
+ if (msg.info.role === "assistant") {
88
+ lines.push("## Assistant")
89
+ for (const part of msg.parts) {
90
+ if (part.type === "text") {
91
+ lines.push(part.text)
92
+ }
93
+ if (part.type === "tool" && part.state.status === "completed") {
94
+ lines.push(`[Tool: ${part.tool}] ${part.state.title}`)
95
+ }
96
+ }
97
+ lines.push("")
98
+ }
99
+ }
100
+
101
+ const output = lines.join("\n").trim()
102
+
103
+ if (messages.length >= (limit ?? 100)) {
104
+ return output + `\n\n(Showing ${messages.length} most recent messages. Use a higher 'limit' to see more.)`
105
+ }
106
+
107
+ return output + `\n\n(End of session - ${messages.length} messages)`
108
+ }
109
+
110
+ /**
111
+ * Create the read_session tool.
112
+ *
113
+ * Takes the OpenCode client as a dependency for session.messages() calls.
114
+ */
115
+ export const ReadSession = (client: OpencodeClient) => {
116
+ return tool({
117
+ description: "Read the conversation transcript from a previous session. Use this when you need specific information from the source session that wasn't included in the handoff summary.",
118
+ args: {
119
+ sessionID: tool.schema.string().describe("The full session ID (e.g., sess_01jxyz...)"),
120
+ limit: tool.schema.number().optional().describe("Maximum number of messages to read (defaults to 100, max 500)"),
121
+ },
122
+ async execute(args) {
123
+ const limit = Math.min(args.limit ?? 100, 500)
124
+
125
+ try {
126
+ const response = await client.session.messages({
127
+ path: { id: args.sessionID },
128
+ query: { limit }
129
+ })
130
+
131
+ if (!response.data || response.data.length === 0) {
132
+ return "Session has no messages or does not exist."
133
+ }
134
+
135
+ return formatTranscript(response.data, limit)
136
+ } catch (error) {
137
+ return `Could not read session ${args.sessionID}: ${error instanceof Error ? error.message : 'Unknown error'}`
138
+ }
139
+ }
140
+ })
141
+ }