opencodekit 0.13.1 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dist/index.js +16 -4
- package/dist/template/.opencode/AGENTS.md +13 -4
- package/dist/template/.opencode/README.md +100 -4
- package/dist/template/.opencode/command/brainstorm.md +25 -2
- package/dist/template/.opencode/command/finish.md +21 -4
- package/dist/template/.opencode/command/handoff.md +17 -0
- package/dist/template/.opencode/command/implement.md +38 -0
- package/dist/template/.opencode/command/plan.md +32 -0
- package/dist/template/.opencode/command/research.md +61 -5
- package/dist/template/.opencode/command/resume.md +31 -0
- package/dist/template/.opencode/command/start.md +31 -0
- package/dist/template/.opencode/command/triage.md +16 -1
- package/dist/template/.opencode/memory/observations/.gitkeep +0 -0
- package/dist/template/.opencode/memory/project/conventions.md +31 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/_transactions/0-8d00d272-cb80-463b-9774-7120a1c994e7.txn +0 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/_transactions/1-a3bea825-dad3-47dd-a6d6-ff41b76ff7b0.txn +0 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/_versions/1.manifest +0 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/_versions/2.manifest +0 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/data/001010101000000101110001f998d04b63936ff83f9a34152d.lance +0 -0
- package/dist/template/.opencode/memory/vector_db/memories.lance/data/010000101010000000010010701b3840d38c2b5f275da99978.lance +0 -0
- package/dist/template/.opencode/opencode.json +587 -511
- package/dist/template/.opencode/package.json +3 -1
- package/dist/template/.opencode/plugin/memory.ts +610 -0
- package/dist/template/.opencode/tool/memory-embed.ts +183 -0
- package/dist/template/.opencode/tool/memory-index.ts +769 -0
- package/dist/template/.opencode/tool/memory-search.ts +358 -66
- package/dist/template/.opencode/tool/observation.ts +301 -12
- package/dist/template/.opencode/tool/repo-map.ts +451 -0
- package/package.json +16 -4
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import { tool } from "@opencode-ai/plugin";
|
|
2
|
+
|
|
3
|
+
// Configuration - Qwen3-Embedding-0.6B
|
|
4
|
+
// Better for code, multilingual (100+ langs), instruction-aware
|
|
5
|
+
// See: https://ollama.com/library/qwen3-embedding
|
|
6
|
+
const OLLAMA_MODEL = "qwen3-embedding:0.6b";
|
|
7
|
+
const OLLAMA_DIMENSIONS = 1024;
|
|
8
|
+
const OLLAMA_BASE_URL = process.env.OLLAMA_HOST || "http://127.0.0.1:11434";
|
|
9
|
+
|
|
10
|
+
interface EmbeddingResult {
|
|
11
|
+
text: string;
|
|
12
|
+
embedding: number[];
|
|
13
|
+
model: string;
|
|
14
|
+
dimensions: number;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
interface BatchEmbeddingResult {
|
|
18
|
+
results: EmbeddingResult[];
|
|
19
|
+
failed: { text: string; error: string }[];
|
|
20
|
+
model: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
async function checkOllamaRunning(): Promise<boolean> {
|
|
24
|
+
try {
|
|
25
|
+
const response = await fetch(`${OLLAMA_BASE_URL}/api/version`, {
|
|
26
|
+
method: "GET",
|
|
27
|
+
signal: AbortSignal.timeout(3000),
|
|
28
|
+
});
|
|
29
|
+
return response.ok;
|
|
30
|
+
} catch {
|
|
31
|
+
return false;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
async function ensureModelAvailable(): Promise<{
|
|
36
|
+
ok: boolean;
|
|
37
|
+
error?: string;
|
|
38
|
+
}> {
|
|
39
|
+
try {
|
|
40
|
+
const listResponse = await fetch(`${OLLAMA_BASE_URL}/api/tags`, {
|
|
41
|
+
method: "GET",
|
|
42
|
+
signal: AbortSignal.timeout(5000),
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (!listResponse.ok) {
|
|
46
|
+
return { ok: false, error: "Failed to list models" };
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const data = (await listResponse.json()) as {
|
|
50
|
+
models?: { name: string }[];
|
|
51
|
+
};
|
|
52
|
+
const models = data.models || [];
|
|
53
|
+
const modelExists = models.some(
|
|
54
|
+
(m) => m.name === OLLAMA_MODEL || m.name === `${OLLAMA_MODEL}:latest`,
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
if (modelExists) {
|
|
58
|
+
return { ok: true };
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
ok: false,
|
|
63
|
+
error: `Model '${OLLAMA_MODEL}' not found. Run: ollama pull ${OLLAMA_MODEL}`,
|
|
64
|
+
};
|
|
65
|
+
} catch (err) {
|
|
66
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
67
|
+
return { ok: false, error: `Failed to check models: ${message}` };
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async function embedWithOllama(
|
|
72
|
+
texts: string[],
|
|
73
|
+
): Promise<{ embeddings: number[][]; error?: string }> {
|
|
74
|
+
try {
|
|
75
|
+
const embeddings: number[][] = [];
|
|
76
|
+
|
|
77
|
+
for (const text of texts) {
|
|
78
|
+
const response = await fetch(`${OLLAMA_BASE_URL}/api/embeddings`, {
|
|
79
|
+
method: "POST",
|
|
80
|
+
headers: { "Content-Type": "application/json" },
|
|
81
|
+
body: JSON.stringify({
|
|
82
|
+
model: OLLAMA_MODEL,
|
|
83
|
+
prompt: text,
|
|
84
|
+
}),
|
|
85
|
+
signal: AbortSignal.timeout(30000),
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
if (!response.ok) {
|
|
89
|
+
const errorText = await response.text();
|
|
90
|
+
return {
|
|
91
|
+
embeddings: [],
|
|
92
|
+
error: `Ollama API error (${response.status}): ${errorText}`,
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
const data = (await response.json()) as { embedding?: number[] };
|
|
97
|
+
if (!data.embedding) {
|
|
98
|
+
return { embeddings: [], error: "No embedding in response" };
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
embeddings.push(data.embedding);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return { embeddings };
|
|
105
|
+
} catch (err) {
|
|
106
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
107
|
+
|
|
108
|
+
if (message.includes("ECONNREFUSED") || message.includes("fetch failed")) {
|
|
109
|
+
return {
|
|
110
|
+
embeddings: [],
|
|
111
|
+
error: "Cannot connect to Ollama. See .opencode/README.md for setup.",
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
return { embeddings: [], error: `Ollama error: ${message}` };
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
export default tool({
|
|
120
|
+
description: `Generate embeddings using Ollama (${OLLAMA_MODEL}). Requires Ollama running locally.`,
|
|
121
|
+
args: {
|
|
122
|
+
text: tool.schema
|
|
123
|
+
.union([tool.schema.string(), tool.schema.array(tool.schema.string())])
|
|
124
|
+
.describe("Text or array of texts to embed"),
|
|
125
|
+
},
|
|
126
|
+
execute: async (args: { text: string | string[] }) => {
|
|
127
|
+
const texts = Array.isArray(args.text) ? args.text : [args.text];
|
|
128
|
+
|
|
129
|
+
if (texts.length === 0) {
|
|
130
|
+
return "Error: No text provided";
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const maxChars = 32000;
|
|
134
|
+
const longTexts = texts.filter((t) => t.length > maxChars);
|
|
135
|
+
if (longTexts.length > 0) {
|
|
136
|
+
return `Error: ${longTexts.length} text(s) exceed ${maxChars} char limit`;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const ollamaRunning = await checkOllamaRunning();
|
|
140
|
+
if (!ollamaRunning) {
|
|
141
|
+
return "Error: Ollama not running. See .opencode/README.md for setup.";
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
const modelCheck = await ensureModelAvailable();
|
|
145
|
+
if (!modelCheck.ok) {
|
|
146
|
+
return `Error: ${modelCheck.error}. See .opencode/README.md for setup.`;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const result = await embedWithOllama(texts);
|
|
150
|
+
|
|
151
|
+
if (result.error) {
|
|
152
|
+
return `Error: ${result.error}`;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const response: BatchEmbeddingResult = {
|
|
156
|
+
results: texts.map((text, i) => ({
|
|
157
|
+
text: text.substring(0, 100) + (text.length > 100 ? "..." : ""),
|
|
158
|
+
embedding: result.embeddings[i],
|
|
159
|
+
model: OLLAMA_MODEL,
|
|
160
|
+
dimensions: OLLAMA_DIMENSIONS,
|
|
161
|
+
})),
|
|
162
|
+
failed: [],
|
|
163
|
+
model: OLLAMA_MODEL,
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
return JSON.stringify(response, null, 2);
|
|
167
|
+
},
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
// Export for other tools (memory-index, observation)
|
|
171
|
+
export async function generateEmbedding(
|
|
172
|
+
text: string,
|
|
173
|
+
): Promise<{ embedding: number[]; model: string } | null> {
|
|
174
|
+
const ollamaRunning = await checkOllamaRunning();
|
|
175
|
+
if (!ollamaRunning) return null;
|
|
176
|
+
|
|
177
|
+
const result = await embedWithOllama([text]);
|
|
178
|
+
if (result.error || result.embeddings.length === 0) return null;
|
|
179
|
+
|
|
180
|
+
return { embedding: result.embeddings[0], model: OLLAMA_MODEL };
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
export const EMBEDDING_DIMENSIONS = OLLAMA_DIMENSIONS;
|