@csuwl/opencode-memory-plugin 1.0.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,356 @@
1
+ /**
2
+ * Configuration Management for OpenCode Memory Plugin
3
+ *
4
+ * This module handles loading, validating, and managing configuration
5
+ * for the memory system including model selection and search modes.
6
+ */
7
+
8
+ import path from "path"
9
+ import { readFile, exists } from "fs/promises"
10
+
11
+ // Configuration file paths
12
+ const MEMORY_DIR = path.join(process.env.HOME || "", ".opencode", "memory")
13
+ const CONFIG_PATH = path.join(MEMORY_DIR, "memory-config.json")
14
+
15
+ // Default configuration
16
+ const DEFAULT_CONFIG: MemoryConfig = {
17
+ version: "2.0",
18
+ search: {
19
+ mode: "hybrid",
20
+ options: {
21
+ hybrid: {
22
+ vectorWeight: 0.7,
23
+ bm25Weight: 0.3
24
+ }
25
+ }
26
+ },
27
+ embedding: {
28
+ enabled: true,
29
+ provider: "transformers",
30
+ model: "Xenova/all-MiniLM-L6-v2",
31
+ fallbackMode: "hash",
32
+ cache: {
33
+ enabled: true,
34
+ directory: path.join(process.env.HOME || "", ".cache", "huggingface")
35
+ }
36
+ },
37
+ models: {
38
+ available: {
39
+ "Xenova/all-MiniLM-L6-v2": {
40
+ dimensions: 384,
41
+ size: "80MB",
42
+ language: "en",
43
+ useCase: "general",
44
+ quality: "good",
45
+ speed: "fast"
46
+ },
47
+ "Xenova/bge-small-en-v1.5": {
48
+ dimensions: 384,
49
+ size: "130MB",
50
+ language: "en",
51
+ useCase: "high-quality",
52
+ quality: "excellent",
53
+ speed: "medium"
54
+ },
55
+ "Xenova/bge-base-en-v1.5": {
56
+ dimensions: 768,
57
+ size: "400MB",
58
+ language: "en",
59
+ useCase: "best-quality",
60
+ quality: "best",
61
+ speed: "slow"
62
+ },
63
+ "Xenova/e5-small-v2": {
64
+ dimensions: 384,
65
+ size: "130MB",
66
+ language: "en",
67
+ useCase: "question-answer",
68
+ quality: "good",
69
+ speed: "medium"
70
+ },
71
+ "Xenova/nomic-embed-text-v1.5": {
72
+ dimensions: 768,
73
+ size: "270MB",
74
+ language: "en",
75
+ useCase: "long-documents",
76
+ quality: "excellent",
77
+ speed: "medium"
78
+ }
79
+ }
80
+ },
81
+ indexing: {
82
+ chunkSize: 400,
83
+ chunkOverlap: 80,
84
+ autoRebuild: true
85
+ }
86
+ }
87
+
88
+ // Type definitions
89
+ export interface MemoryConfig {
90
+ version: string
91
+ search: SearchConfig
92
+ embedding: EmbeddingConfig
93
+ models: ModelsConfig
94
+ indexing: IndexingConfig
95
+ // Legacy v1.0 fields (for backward compatibility)
96
+ auto_save?: boolean
97
+ vector_search?: {
98
+ enabled: boolean
99
+ hybrid: boolean
100
+ rebuild_interval_hours?: number
101
+ }
102
+ consolidation?: {
103
+ enabled: boolean
104
+ run_daily: boolean
105
+ run_hour: number
106
+ archive_days: number
107
+ delete_days: number
108
+ }
109
+ retention?: {
110
+ max_daily_files: number
111
+ max_entries_per_file: number
112
+ chunk_size: number
113
+ chunk_overlap: number
114
+ }
115
+ }
116
+
117
+ export interface SearchConfig {
118
+ mode: "hybrid" | "vector" | "bm25" | "hash"
119
+ options: {
120
+ hybrid?: {
121
+ vectorWeight: number
122
+ bm25Weight: number
123
+ }
124
+ }
125
+ }
126
+
127
+ export interface EmbeddingConfig {
128
+ enabled: boolean
129
+ provider: "transformers" | "openai" | "none"
130
+ model: string
131
+ fallbackMode: "hash" | "bm25" | "error"
132
+ cache: {
133
+ enabled: boolean
134
+ directory: string
135
+ }
136
+ }
137
+
138
+ export interface ModelInfo {
139
+ dimensions: number
140
+ size: string
141
+ language: string
142
+ useCase: string
143
+ quality: "good" | "excellent" | "best"
144
+ speed: "fast" | "medium" | "slow"
145
+ }
146
+
147
+ export interface ModelsConfig {
148
+ available: Record<string, ModelInfo>
149
+ }
150
+
151
+ export interface IndexingConfig {
152
+ chunkSize: number
153
+ chunkOverlap: number
154
+ autoRebuild: boolean
155
+ }
156
+
157
+ // Configuration cache
158
+ let cachedConfig: MemoryConfig | null = null
159
+
160
+ /**
161
+ * Load configuration from file
162
+ * @param reload Force reload from disk
163
+ * @returns Configuration object
164
+ */
165
+ export async function loadConfig(reload = false): Promise<MemoryConfig> {
166
+ // Return cached config if available
167
+ if (cachedConfig && !reload) {
168
+ return cachedConfig
169
+ }
170
+
171
+ try {
172
+ // Check if config file exists
173
+ if (!(await exists(CONFIG_PATH))) {
174
+ console.warn(`Config file not found: ${CONFIG_PATH}, using defaults`)
175
+ cachedConfig = { ...DEFAULT_CONFIG }
176
+ return cachedConfig
177
+ }
178
+
179
+ // Read config file
180
+ const configData = await readFile(CONFIG_PATH, "utf-8")
181
+ const userConfig = JSON.parse(configData) as Partial<MemoryConfig>
182
+
183
+ // Merge with defaults (user config overrides defaults)
184
+ cachedConfig = mergeConfig(DEFAULT_CONFIG, userConfig)
185
+
186
+ // Validate configuration
187
+ validateConfig(cachedConfig)
188
+
189
+ return cachedConfig
190
+ } catch (error) {
191
+ console.error("Failed to load config, using defaults:", error)
192
+ cachedConfig = { ...DEFAULT_CONFIG }
193
+ return cachedConfig
194
+ }
195
+ }
196
+
197
+ /**
198
+ * Merge user configuration with defaults
199
+ */
200
+ function mergeConfig(
201
+ defaults: MemoryConfig,
202
+ user: Partial<MemoryConfig>
203
+ ): MemoryConfig {
204
+ return {
205
+ ...defaults,
206
+ ...user,
207
+ search: {
208
+ ...defaults.search,
209
+ ...user.search,
210
+ options: {
211
+ ...defaults.search.options,
212
+ ...user.search?.options
213
+ }
214
+ },
215
+ embedding: {
216
+ ...defaults.embedding,
217
+ ...user.embedding,
218
+ cache: {
219
+ ...defaults.embedding.cache,
220
+ ...user.embedding?.cache
221
+ }
222
+ },
223
+ models: {
224
+ ...defaults.models,
225
+ ...user.models
226
+ },
227
+ indexing: {
228
+ ...defaults.indexing,
229
+ ...user.indexing
230
+ }
231
+ }
232
+ }
233
+
234
+ /**
235
+ * Validate configuration
236
+ * @throws Error if configuration is invalid
237
+ */
238
+ function validateConfig(config: MemoryConfig): void {
239
+ // Validate search mode
240
+ const validModes = ["hybrid", "vector", "bm25", "hash"]
241
+ if (!validModes.includes(config.search.mode)) {
242
+ throw new Error(
243
+ `Invalid search mode: ${config.search.mode}. Must be one of: ${validModes.join(", ")}`
244
+ )
245
+ }
246
+
247
+ // Validate embedding model
248
+ if (config.embedding.enabled && config.embedding.provider === "transformers") {
249
+ if (!config.models.available[config.embedding.model]) {
250
+ throw new Error(
251
+ `Unknown embedding model: ${config.embedding.model}. ` +
252
+ `Available models: ${Object.keys(config.models.available).join(", ")}`
253
+ )
254
+ }
255
+ }
256
+
257
+ // Validate hybrid weights
258
+ if (config.search.mode === "hybrid" && config.search.options.hybrid) {
259
+ const { vectorWeight, bm25Weight } = config.search.options.hybrid
260
+ if (vectorWeight < 0 || vectorWeight > 1) {
261
+ throw new Error(`vectorWeight must be between 0 and 1, got: ${vectorWeight}`)
262
+ }
263
+ if (bm25Weight < 0 || bm25Weight > 1) {
264
+ throw new Error(`bm25Weight must be between 0 and 1, got: ${bm25Weight}`)
265
+ }
266
+ if (Math.abs(vectorWeight + bm25Weight - 1.0) > 0.01) {
267
+ console.warn(
268
+ `Hybrid weights don't sum to 1.0 (vector: ${vectorWeight}, bm25: ${bm25Weight}). ` +
269
+ `Weights will be normalized.`
270
+ )
271
+ }
272
+ }
273
+
274
+ // Validate fallback mode
275
+ const validFallbacks = ["hash", "bm25", "error"]
276
+ if (!validFallbacks.includes(config.embedding.fallbackMode)) {
277
+ throw new Error(
278
+ `Invalid fallback mode: ${config.embedding.fallbackMode}. ` +
279
+ `Must be one of: ${validFallbacks.join(", ")}`
280
+ )
281
+ }
282
+
283
+ // Validate indexing parameters
284
+ if (config.indexing.chunkSize < 100 || config.indexing.chunkSize > 2000) {
285
+ throw new Error(
286
+ `chunkSize must be between 100 and 2000, got: ${config.indexing.chunkSize}`
287
+ )
288
+ }
289
+ if (config.indexing.chunkOverlap < 0 || config.indexing.chunkOverlap >= config.indexing.chunkSize) {
290
+ throw new Error(
291
+ `chunkOverlap must be between 0 and chunkSize, got: ${config.indexing.chunkOverlap}`
292
+ )
293
+ }
294
+ }
295
+
296
+ /**
297
+ * Get current configuration
298
+ */
299
+ export async function getConfig(): Promise<MemoryConfig> {
300
+ return loadConfig()
301
+ }
302
+
303
+ /**
304
+ * Get search mode
305
+ */
306
+ export async function getSearchMode(): Promise<SearchConfig["mode"]> {
307
+ const config = await getConfig()
308
+ return config.search.mode
309
+ }
310
+
311
+ /**
312
+ * Get embedding model name
313
+ */
314
+ export async function getEmbeddingModel(): Promise<string> {
315
+ const config = await getConfig()
316
+ return config.embedding.model
317
+ }
318
+
319
+ /**
320
+ * Check if embeddings are enabled
321
+ */
322
+ export async function isEmbeddingEnabled(): Promise<boolean> {
323
+ const config = await getConfig()
324
+ return config.embedding.enabled && config.embedding.provider !== "none"
325
+ }
326
+
327
+ /**
328
+ * Get model info
329
+ */
330
+ export async function getModelInfo(modelName?: string): Promise<ModelInfo | null> {
331
+ const config = await getConfig()
332
+ const model = modelName || config.embedding.model
333
+ return config.models.available[model] || null
334
+ }
335
+
336
+ /**
337
+ * List available models
338
+ */
339
+ export async function listAvailableModels(): Promise<Record<string, ModelInfo>> {
340
+ const config = await getConfig()
341
+ return config.models.available
342
+ }
343
+
344
+ /**
345
+ * Clear config cache (for testing)
346
+ */
347
+ export function clearConfigCache(): void {
348
+ cachedConfig = null
349
+ }
350
+
351
+ /**
352
+ * Get configuration file path
353
+ */
354
+ export function getConfigPath(): string {
355
+ return CONFIG_PATH
356
+ }