namnam-skills 2.2.2 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -41,11 +41,13 @@ npx namnam-skills platforms
41
41
  | Cursor | `.cursorrules` | ✅ | - |
42
42
  | Windsurf | `.windsurfrules` | ✅ | - |
43
43
  | Cline | `.clinerules` | ✅ | - |
44
+ | **Roo Code** | `.roo/rules/` | ✅ | - |
44
45
  | Aider | `.aider.conf.yml` | ✅ | - |
45
46
  | OpenAI Codex | `codex.md` | ✅ | - |
46
47
  | Google Gemini | `GEMINI.md` | ✅ | - |
47
48
  | Universal | `AGENTS.md` | ✅ | - |
48
49
 
50
+
49
51
  ## Available Categories
50
52
 
51
53
  | Category | Description | Files |
@@ -102,6 +104,101 @@ npx namnam-skills platforms
102
104
  npx namnam-skills uninstall
103
105
  ```
104
106
 
107
+ ### 🧠 Semantic Commands (Augment-like Features!)
108
+
109
+ The semantic layer provides LLM-powered codebase understanding through embeddings and RAG:
110
+
111
+ ```bash
112
+ # Build semantic index with code embeddings
113
+ namnam semantic index
114
+ namnam sem index --force # Re-index everything
115
+ namnam sem index --provider openai # Use OpenAI embeddings
116
+
117
+ # Semantic search - find code by meaning, not just text
118
+ namnam sem search "authentication logic"
119
+ namnam sem search "how to handle errors" --top 5
120
+
121
+ # Ask questions about your codebase (RAG)
122
+ namnam sem ask "how does the login flow work?"
123
+ namnam sem ask "what design patterns are used?" --provider openai
124
+
125
+ # Find similar code
126
+ namnam sem similar src/auth.js
127
+ namnam sem similar components/Button.tsx --top 10
128
+
129
+ # Generate context for AI tasks
130
+ namnam sem context "implement logout feature"
131
+
132
+ # View index statistics
133
+ namnam sem stats
134
+
135
+ # Configure providers
136
+ namnam sem config --show
137
+ namnam sem config --embedding-provider openai --embedding-model text-embedding-3-small
138
+ namnam sem config --llm-provider anthropic --llm-model claude-3-sonnet-20240229
139
+
140
+ # Clear index
141
+ namnam sem clear
142
+ ```
143
+
144
+ #### Supported Providers
145
+
146
+ | Type | Provider | Models | Notes |
147
+ |------|----------|--------|-------|
148
+ | **Embeddings** | `ollama` | nomic-embed-text, all-minilm | 🆓 Local, no API key |
149
+ | | `openai` | text-embedding-3-small/large | ☁️ Cloud, fast |
150
+ | | `voyage` | voyage-code-2 | 🎯 Code-optimized |
151
+ | **LLM** | `ollama` | codellama, llama2, mistral | 🆓 Local, no API key |
152
+ | | `openai` | gpt-4o, gpt-4-turbo | ☁️ Best quality |
153
+ | | `anthropic` | claude-3-sonnet, claude-3-opus | ☁️ Best for code |
154
+
155
+ #### Quick Setup (Zero Cost with Ollama)
156
+
157
+ ```bash
158
+ # 1. Install Ollama (https://ollama.ai)
159
+ # 2. Pull embedding model
160
+ ollama pull nomic-embed-text
161
+
162
+ # 3. Pull LLM model
163
+ ollama pull codellama
164
+
165
+ # 4. Build index
166
+ namnam sem index
167
+
168
+ # 5. Start asking questions!
169
+ namnam sem ask "explain the main architecture"
170
+ ```
171
+
172
+ #### 🤖 Auto Mode (Like Augment AI!)
173
+
174
+ **Zero configuration needed!** When you install namnam-skills, it automatically:
175
+ 1. Indexes your codebase with embeddings
176
+ 2. Watches for file changes
177
+ 3. Provides context for AI prompts
178
+
179
+ ```bash
180
+ # Check auto mode status
181
+ namnam sem auto
182
+
183
+ # Enable full auto mode
184
+ namnam sem auto --enable
185
+
186
+ # Start real-time file watcher
187
+ namnam sem watch
188
+
189
+ # Generate context for current task
190
+ namnam sem auto-context -q "implement logout" -f src/auth.js
191
+
192
+ # Integrate with Claude Code
193
+ namnam sem integrate
194
+ ```
195
+
196
+ **How it works:**
197
+ - On `npm install` → Auto-indexes codebase
198
+ - On file changes → Updates vectors automatically
199
+ - On AI prompt → Context is available via `namnam sem auto-context`
200
+
201
+
105
202
  ## The `/namnam` Command
106
203
 
107
204
  After installation, use the powerful `/namnam` universal orchestrator:
@@ -169,6 +266,7 @@ your-project/
169
266
  ├── .cursorrules # For Cursor AI
170
267
  ├── .windsurfrules # For Windsurf
171
268
  ├── .clinerules # For Cline
269
+ ├── .roo/rules/ # For Roo Code (5 rule files)
172
270
  ├── codex.md # For OpenAI Codex
173
271
  ├── GEMINI.md # For Google Gemini
174
272
  └── .aider.conf.yml # For Aider
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "namnam-skills",
3
- "version": "2.2.2",
4
- "description": "Ultimate AI Skills Installer - Universal support for Claude, Codex, Cursor, Windsurf, Cline, Aider, Gemini, and Antigravity. 2000+ agents, workflows, and skills with auto-integration.",
3
+ "version": "2.5.0",
4
+ "description": "Ultimate AI Skills Installer with Auto-Semantic Mode (like Augment AI!) - Auto-indexing, file watching, RAG. Universal support for Claude, Codex, Cursor, Windsurf, Cline, Aider, Gemini. 2000+ agents, workflows, and skills.",
5
5
  "author": "NamNam",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -15,6 +15,9 @@
15
15
  "cursor",
16
16
  "windsurf",
17
17
  "cline",
18
+ "roo",
19
+ "roocode",
20
+ "roo-code",
18
21
  "aider",
19
22
  "anthropic",
20
23
  "bmad",
@@ -27,7 +30,15 @@
27
30
  "ai-coding",
28
31
  "developer-tools",
29
32
  "antigravity",
30
- "gemini"
33
+ "gemini",
34
+ "embeddings",
35
+ "rag",
36
+ "semantic-search",
37
+ "vector-search",
38
+ "code-understanding",
39
+ "augment",
40
+ "ollama",
41
+ "voyage"
31
42
  ],
32
43
  "type": "module",
33
44
  "bin": {
@@ -0,0 +1,432 @@
1
+ /**
2
+ * Auto-Semantic Engine for NamNam CLI
3
+ *
4
+ * Provides automatic semantic indexing like Augment AI:
5
+ * - Auto-index on project init
6
+ * - File watcher for real-time updates
7
+ * - Auto-inject context into AI prompts
8
+ * - Background processing
9
+ */
10
+
11
+ import fs from 'fs-extra';
12
+ import path from 'path';
13
+ import { SemanticIndex, loadConfig, saveConfig } from './semantic.js';
14
+
15
+ // ============================================
16
+ // Auto-Index Configuration
17
+ // ============================================
18
+
19
+ const AUTO_CONFIG_FILE = '.namnam/auto-semantic.json';
20
+
21
+ const DEFAULT_AUTO_CONFIG = {
22
+ enabled: true,
23
+ autoIndex: true,
24
+ watchFiles: true,
25
+ autoContext: true,
26
+ indexOnInstall: true,
27
+ debounceMs: 2000,
28
+ excludePatterns: [
29
+ '**/node_modules/**',
30
+ '**/.git/**',
31
+ '**/dist/**',
32
+ '**/build/**',
33
+ '**/.namnam/**'
34
+ ]
35
+ };
36
+
37
+ // ============================================
38
+ // Auto-Config Management
39
+ // ============================================
40
+
41
+ async function getAutoConfig(cwd = process.cwd()) {
42
+ const configPath = path.join(cwd, AUTO_CONFIG_FILE);
43
+
44
+ if (await fs.pathExists(configPath)) {
45
+ try {
46
+ const config = await fs.readJson(configPath);
47
+ return { ...DEFAULT_AUTO_CONFIG, ...config };
48
+ } catch {
49
+ return DEFAULT_AUTO_CONFIG;
50
+ }
51
+ }
52
+ return DEFAULT_AUTO_CONFIG;
53
+ }
54
+
55
+ async function saveAutoConfig(config, cwd = process.cwd()) {
56
+ const configPath = path.join(cwd, AUTO_CONFIG_FILE);
57
+ await fs.ensureDir(path.dirname(configPath));
58
+ await fs.writeJson(configPath, config, { spaces: 2 });
59
+ }
60
+
61
+ async function isAutoSemanticEnabled(cwd = process.cwd()) {
62
+ const config = await getAutoConfig(cwd);
63
+ return config.enabled;
64
+ }
65
+
66
+ // ============================================
67
+ // Auto-Index on Install
68
+ // ============================================
69
+
70
+ /**
71
+ * Run auto-indexing after npm install
72
+ * Called from postinstall.js
73
+ */
74
+ async function runAutoIndex(cwd = process.cwd(), options = {}) {
75
+ const autoConfig = await getAutoConfig(cwd);
76
+
77
+ if (!autoConfig.enabled || !autoConfig.indexOnInstall) {
78
+ return { skipped: true, reason: 'Auto-index disabled' };
79
+ }
80
+
81
+ try {
82
+ const semanticConfig = await loadConfig(cwd);
83
+ const index = new SemanticIndex(cwd, semanticConfig);
84
+
85
+ await index.initialize();
86
+ const result = await index.buildIndex({ force: options.force });
87
+
88
+ // Save timestamp
89
+ await saveAutoConfig({
90
+ ...autoConfig,
91
+ lastIndexed: new Date().toISOString(),
92
+ lastIndexCount: result.total
93
+ }, cwd);
94
+
95
+ return {
96
+ success: true,
97
+ indexed: result.indexed,
98
+ total: result.total
99
+ };
100
+ } catch (error) {
101
+ return {
102
+ success: false,
103
+ error: error.message
104
+ };
105
+ }
106
+ }
107
+
108
+ // ============================================
109
+ // File Watcher for Real-time Updates
110
+ // ============================================
111
+
112
+ let watcherInstance = null;
113
+ let debounceTimer = null;
114
+ let pendingFiles = new Set();
115
+
116
+ /**
117
+ * Start watching for file changes
118
+ */
119
+ async function startWatcher(cwd = process.cwd()) {
120
+ if (watcherInstance) {
121
+ return { already: true };
122
+ }
123
+
124
+ const autoConfig = await getAutoConfig(cwd);
125
+
126
+ if (!autoConfig.enabled || !autoConfig.watchFiles) {
127
+ return { skipped: true, reason: 'File watching disabled' };
128
+ }
129
+
130
+ const { watch } = await import('fs');
131
+
132
+ // Use recursive watching
133
+ watcherInstance = watch(cwd, { recursive: true }, async (eventType, filename) => {
134
+ if (!filename) return;
135
+
136
+ // Check if file should be indexed
137
+ const ext = path.extname(filename).toLowerCase();
138
+ const codeExtensions = ['.js', '.ts', '.jsx', '.tsx', '.py', '.go', '.rs', '.java'];
139
+
140
+ if (!codeExtensions.includes(ext)) return;
141
+
142
+ // Check exclude patterns
143
+ const shouldExclude = autoConfig.excludePatterns.some(pattern => {
144
+ const regex = new RegExp(pattern.replace(/\*\*/g, '.*').replace(/\*/g, '[^/]*'));
145
+ return regex.test(filename);
146
+ });
147
+
148
+ if (shouldExclude) return;
149
+
150
+ // Add to pending and debounce
151
+ pendingFiles.add(filename);
152
+
153
+ if (debounceTimer) {
154
+ clearTimeout(debounceTimer);
155
+ }
156
+
157
+ debounceTimer = setTimeout(async () => {
158
+ const files = Array.from(pendingFiles);
159
+ pendingFiles.clear();
160
+
161
+ try {
162
+ await updateIndexIncremental(files, cwd);
163
+ } catch (error) {
164
+ console.error('Auto-index update failed:', error.message);
165
+ }
166
+ }, autoConfig.debounceMs);
167
+ });
168
+
169
+ return { started: true };
170
+ }
171
+
172
+ /**
173
+ * Stop file watcher
174
+ */
175
+ function stopWatcher() {
176
+ if (watcherInstance) {
177
+ watcherInstance.close();
178
+ watcherInstance = null;
179
+ }
180
+ if (debounceTimer) {
181
+ clearTimeout(debounceTimer);
182
+ debounceTimer = null;
183
+ }
184
+ pendingFiles.clear();
185
+
186
+ return { stopped: true };
187
+ }
188
+
189
+ /**
190
+ * Update index incrementally for changed files
191
+ */
192
+ async function updateIndexIncremental(changedFiles, cwd = process.cwd()) {
193
+ const semanticConfig = await loadConfig(cwd);
194
+ const index = new SemanticIndex(cwd, semanticConfig);
195
+
196
+ await index.initialize();
197
+
198
+ // For now, just rebuild the index
199
+ // Future: implement true incremental update
200
+ return index.buildIndex({ force: false });
201
+ }
202
+
203
+ // ============================================
204
+ // Auto-Context Generation
205
+ // ============================================
206
+
207
+ /**
208
+ * Generate context automatically based on user's current activity
209
+ * This is the key feature that makes it work like Augment
210
+ */
211
+ async function generateAutoContext(options = {}, cwd = process.cwd()) {
212
+ const {
213
+ query = '', // User's current question/task
214
+ currentFile = '', // File user is currently editing
215
+ recentFiles = [], // Recently opened files
216
+ maxChunks = 10,
217
+ maxTokens = 8000
218
+ } = options;
219
+
220
+ const autoConfig = await getAutoConfig(cwd);
221
+
222
+ if (!autoConfig.enabled || !autoConfig.autoContext) {
223
+ return null;
224
+ }
225
+
226
+ try {
227
+ const semanticConfig = await loadConfig(cwd);
228
+ const index = new SemanticIndex(cwd, semanticConfig);
229
+ await index.initialize();
230
+
231
+ const contextParts = [];
232
+ let totalTokens = 0;
233
+
234
+ // 1. Add current file context if available
235
+ if (currentFile && await fs.pathExists(path.join(cwd, currentFile))) {
236
+ const content = await fs.readFile(path.join(cwd, currentFile), 'utf-8');
237
+ const fileContext = `## Current File: ${currentFile}\n\`\`\`\n${content.substring(0, 3000)}\n\`\`\`\n`;
238
+ contextParts.push(fileContext);
239
+ totalTokens += Math.ceil(fileContext.length / 4);
240
+ }
241
+
242
+ // 2. Add semantic search results if query provided
243
+ if (query && query.trim()) {
244
+ const searchResults = await index.search(query, { k: maxChunks, threshold: 0.5 });
245
+
246
+ if (searchResults.length > 0) {
247
+ let semanticContext = `## Relevant Code (based on: "${query}")\n\n`;
248
+
249
+ for (const result of searchResults) {
250
+ if (totalTokens > maxTokens) break;
251
+
252
+ const meta = result.metadata;
253
+ const chunk = `### ${meta.filePath}:${meta.startLine}-${meta.endLine}\n\`\`\`\n${meta.content}\n\`\`\`\n\n`;
254
+
255
+ const chunkTokens = Math.ceil(chunk.length / 4);
256
+ if (totalTokens + chunkTokens > maxTokens) break;
257
+
258
+ semanticContext += chunk;
259
+ totalTokens += chunkTokens;
260
+ }
261
+
262
+ contextParts.push(semanticContext);
263
+ }
264
+ }
265
+
266
+ // 3. Add recent files context
267
+ if (recentFiles.length > 0) {
268
+ let recentContext = '## Recently Edited Files\n\n';
269
+
270
+ for (const file of recentFiles.slice(0, 3)) {
271
+ if (totalTokens > maxTokens) break;
272
+
273
+ const filePath = path.join(cwd, file);
274
+ if (await fs.pathExists(filePath)) {
275
+ const content = await fs.readFile(filePath, 'utf-8');
276
+ const snippet = content.substring(0, 1000);
277
+ const chunk = `### ${file}\n\`\`\`\n${snippet}${content.length > 1000 ? '\n...' : ''}\n\`\`\`\n\n`;
278
+
279
+ const chunkTokens = Math.ceil(chunk.length / 4);
280
+ if (totalTokens + chunkTokens > maxTokens) break;
281
+
282
+ recentContext += chunk;
283
+ totalTokens += chunkTokens;
284
+ }
285
+ }
286
+
287
+ if (recentContext !== '## Recently Edited Files\n\n') {
288
+ contextParts.push(recentContext);
289
+ }
290
+ }
291
+
292
+ if (contextParts.length === 0) {
293
+ return null;
294
+ }
295
+
296
+ return {
297
+ context: contextParts.join('\n'),
298
+ tokenEstimate: totalTokens,
299
+ sources: {
300
+ currentFile: !!currentFile,
301
+ semanticSearch: query ? true : false,
302
+ recentFiles: recentFiles.length > 0
303
+ }
304
+ };
305
+ } catch (error) {
306
+ console.error('Auto-context generation failed:', error.message);
307
+ return null;
308
+ }
309
+ }
310
+
311
+ // ============================================
312
+ // Claude Code Integration
313
+ // ============================================
314
+
315
+ /**
316
+ * Generate CLAUDE.md with semantic context instructions
317
+ */
318
+ async function generateClaudeIntegration(cwd = process.cwd()) {
319
+ const claudeMd = `# NamNam Semantic Context
320
+
321
+ This project uses NamNam's semantic layer for enhanced code understanding.
322
+
323
+ ## Auto-Context
324
+
325
+ The semantic index provides automatic context injection. When asking questions about this codebase:
326
+
327
+ 1. **Semantic Search**: Use \`namnam sem search "<query>"\` to find relevant code
328
+ 2. **Ask Questions**: Use \`namnam sem ask "<question>"\` for RAG-powered answers
329
+ 3. **Find Similar**: Use \`namnam sem similar <file>\` to find related code
330
+
331
+ ## Context Generation
332
+
333
+ For any task, generate relevant context with:
334
+ \`\`\`bash
335
+ namnam sem context "<your task description>"
336
+ \`\`\`
337
+
338
+ This will output relevant code snippets that you should consider when working on the task.
339
+
340
+ ## Configuration
341
+
342
+ - Embedding: Ollama (local) / OpenAI / Voyage
343
+ - LLM: Ollama (local) / OpenAI / Anthropic
344
+ - Run \`namnam sem config --show\` to see current settings
345
+
346
+ ## Auto-Index Status
347
+
348
+ The semantic index is automatically maintained. Check status with:
349
+ \`\`\`bash
350
+ namnam sem stats
351
+ \`\`\`
352
+ `;
353
+
354
+ const claudePath = path.join(cwd, '.claude', 'docs', 'SEMANTIC.md');
355
+ await fs.ensureDir(path.dirname(claudePath));
356
+ await fs.writeFile(claudePath, claudeMd);
357
+
358
+ return claudePath;
359
+ }
360
+
361
+ /**
362
+ * Create a hook file that Claude Code can use to get context
363
+ */
364
+ async function createContextHook(cwd = process.cwd()) {
365
+ const hookContent = `#!/usr/bin/env node
366
+ /**
367
+ * NamNam Semantic Context Hook
368
+ *
369
+ * This script is called by Claude Code to get relevant context.
370
+ * Usage: node .namnam/context-hook.js "<query>" [--current-file <file>]
371
+ */
372
+
373
+ import { generateAutoContext } from './src/auto-semantic.js';
374
+
375
+ const args = process.argv.slice(2);
376
+ const query = args[0] || '';
377
+ const currentFileIndex = args.indexOf('--current-file');
378
+ const currentFile = currentFileIndex > -1 ? args[currentFileIndex + 1] : '';
379
+
380
+ (async () => {
381
+ const result = await generateAutoContext({
382
+ query,
383
+ currentFile
384
+ });
385
+
386
+ if (result) {
387
+ console.log(result.context);
388
+ }
389
+ })();
390
+ `;
391
+
392
+ const hookPath = path.join(cwd, '.namnam', 'hooks', 'context-hook.js');
393
+ await fs.ensureDir(path.dirname(hookPath));
394
+ await fs.writeFile(hookPath, hookContent);
395
+
396
+ return hookPath;
397
+ }
398
+
399
+ // ============================================
400
+ // Exports
401
+ // ============================================
402
+
403
+ export {
404
+ // Config
405
+ getAutoConfig,
406
+ saveAutoConfig,
407
+ isAutoSemanticEnabled,
408
+ DEFAULT_AUTO_CONFIG,
409
+
410
+ // Auto-index
411
+ runAutoIndex,
412
+
413
+ // File watcher
414
+ startWatcher,
415
+ stopWatcher,
416
+ updateIndexIncremental,
417
+
418
+ // Auto-context
419
+ generateAutoContext,
420
+
421
+ // Claude integration
422
+ generateClaudeIntegration,
423
+ createContextHook
424
+ };
425
+
426
+ export default {
427
+ runAutoIndex,
428
+ startWatcher,
429
+ stopWatcher,
430
+ generateAutoContext,
431
+ generateClaudeIntegration
432
+ };