@zabaca/lattice 0.1.2 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +86 -12
- package/commands/entity-extract.md +163 -0
- package/commands/graph-sync.md +117 -0
- package/commands/research.md +183 -0
- package/dist/cli.js +117 -70
- package/package.json +2 -1
package/README.md
CHANGED
|
@@ -67,23 +67,98 @@ VOYAGE_MODEL=voyage-3
|
|
|
67
67
|
LOG_LEVEL=info
|
|
68
68
|
```
|
|
69
69
|
|
|
70
|
-
### 4.
|
|
70
|
+
### 4. Initialize Claude Code Integration
|
|
71
|
+
|
|
72
|
+
Install Lattice slash commands for Claude Code:
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
lattice init # For this project only
|
|
76
|
+
# or
|
|
77
|
+
lattice init --global # For all projects (~/.claude/commands/)
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### 5. Launch Claude Code
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
claude
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### 6. Research a Topic
|
|
87
|
+
|
|
88
|
+
Use the `/research` command to search existing knowledge or create new documentation:
|
|
71
89
|
|
|
72
90
|
```bash
|
|
73
|
-
|
|
74
|
-
|
|
91
|
+
/research "knowledge graphs"
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
This will:
|
|
95
|
+
- Search your existing docs for related content
|
|
96
|
+
- Present findings and ask if you need new research
|
|
97
|
+
- Create organized documentation if requested
|
|
98
|
+
|
|
99
|
+
### 7. Sync & Search
|
|
75
100
|
|
|
76
|
-
|
|
77
|
-
lattice sync ./docs ./notes
|
|
101
|
+
After creating or updating documents, sync to the graph and search:
|
|
78
102
|
|
|
79
|
-
|
|
80
|
-
|
|
103
|
+
```bash
|
|
104
|
+
/graph-sync # Extract entities and sync
|
|
105
|
+
lattice search "your query" # Semantic search
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
---
|
|
109
|
+
|
|
110
|
+
## Using /research
|
|
111
|
+
|
|
112
|
+
The `/research` command provides an AI-assisted research workflow.
|
|
113
|
+
|
|
114
|
+
### Searching Existing Research
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
/research "semantic search"
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
Claude will:
|
|
121
|
+
1. Search your docs using semantic similarity
|
|
122
|
+
2. Read and summarize relevant findings
|
|
123
|
+
3. Ask if existing research answers your question
|
|
124
|
+
|
|
125
|
+
### Creating New Research
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
/research "new topic to explore"
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
If no existing docs match, Claude will:
|
|
132
|
+
1. Perform web research
|
|
133
|
+
2. Create a new topic directory (`docs/new-topic/`)
|
|
134
|
+
3. Generate README.md index and research document
|
|
135
|
+
4. Remind you to run `/graph-sync`
|
|
136
|
+
|
|
137
|
+
### Batch Syncing
|
|
138
|
+
|
|
139
|
+
`/graph-sync` doesn't need to run after each research session. It identifies all documents needing sync:
|
|
140
|
+
|
|
141
|
+
```bash
|
|
142
|
+
# After multiple research sessions
|
|
143
|
+
/graph-sync
|
|
144
|
+
|
|
145
|
+
# Shows: "4 documents need syncing"
|
|
146
|
+
# Extracts entities and syncs all at once
|
|
81
147
|
```
|
|
82
148
|
|
|
83
149
|
---
|
|
84
150
|
|
|
85
151
|
## CLI Commands
|
|
86
152
|
|
|
153
|
+
### `lattice init`
|
|
154
|
+
|
|
155
|
+
Install Claude Code slash commands for Lattice.
|
|
156
|
+
|
|
157
|
+
```bash
|
|
158
|
+
lattice init # Install to .claude/commands/ (current project)
|
|
159
|
+
lattice init --global # Install to ~/.claude/commands/ (all projects)
|
|
160
|
+
```
|
|
161
|
+
|
|
87
162
|
### `lattice sync`
|
|
88
163
|
|
|
89
164
|
Synchronize documents to the knowledge graph.
|
|
@@ -108,13 +183,12 @@ lattice status --verbose # Include detailed change information
|
|
|
108
183
|
|
|
109
184
|
### `lattice search`
|
|
110
185
|
|
|
111
|
-
|
|
186
|
+
Semantic search across the knowledge graph.
|
|
112
187
|
|
|
113
188
|
```bash
|
|
114
|
-
lattice search
|
|
115
|
-
lattice search --
|
|
116
|
-
lattice search --
|
|
117
|
-
lattice search --limit 10 # Limit results
|
|
189
|
+
lattice search "query" # Search all entity types
|
|
190
|
+
lattice search --label Technology "query" # Filter by entity label
|
|
191
|
+
lattice search --limit 10 "query" # Limit results (default: 20)
|
|
118
192
|
```
|
|
119
193
|
|
|
120
194
|
### `lattice stats`
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Extract entities from existing document and add to frontmatter
|
|
3
|
+
argument-hint: file-path
|
|
4
|
+
model: haiku
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Extract entities and relationships from the markdown file "$ARGUMENTS" and update its frontmatter.
|
|
8
|
+
|
|
9
|
+
## IMPORTANT: Always Re-Extract
|
|
10
|
+
|
|
11
|
+
Even if the document already has frontmatter with entities:
|
|
12
|
+
- **RE-READ** the entire document content
|
|
13
|
+
- **RE-EXTRACT** entities based on CURRENT content
|
|
14
|
+
- **REPLACE** existing entities with fresh extraction
|
|
15
|
+
- **DO NOT skip** because "entities already exist"
|
|
16
|
+
|
|
17
|
+
The goal is to ensure entities reflect the document's CURRENT state, not preserve stale metadata from previous extractions.
|
|
18
|
+
|
|
19
|
+
## Process
|
|
20
|
+
|
|
21
|
+
1. **Verify file exists**:
|
|
22
|
+
- Check if "$ARGUMENTS" exists
|
|
23
|
+
- If not, inform user and suggest the correct path
|
|
24
|
+
- Verify it's a markdown file
|
|
25
|
+
|
|
26
|
+
2. **Read and analyze the document**:
|
|
27
|
+
- Read the full content of the file
|
|
28
|
+
- Check for existing frontmatter
|
|
29
|
+
- Analyze document context and purpose
|
|
30
|
+
|
|
31
|
+
3. **Extract entities** by identifying:
|
|
32
|
+
- **Technologies**: Languages, frameworks, databases, libraries, tools mentioned
|
|
33
|
+
- **Concepts**: Patterns, methodologies, theories, architectural approaches
|
|
34
|
+
- **Tools & Services**: Software, platforms, applications referenced
|
|
35
|
+
- **Processes**: Workflows, procedures, methodologies described
|
|
36
|
+
- **Organizations**: Companies, teams, projects mentioned
|
|
37
|
+
|
|
38
|
+
Guidelines:
|
|
39
|
+
- Focus on 3-10 most significant entities for the document
|
|
40
|
+
- Use specific names (e.g., "PostgreSQL" not "database")
|
|
41
|
+
- Prefer proper nouns and technical terms
|
|
42
|
+
- Entities should be directly relevant to the document's focus
|
|
43
|
+
|
|
44
|
+
4. **Generate document summary**:
|
|
45
|
+
- Create a 2-3 sentence summary (50-100 words) that captures:
|
|
46
|
+
- The document's main purpose/topic
|
|
47
|
+
- Key technologies or concepts covered
|
|
48
|
+
- Primary conclusions or recommendations (if any)
|
|
49
|
+
|
|
50
|
+
Summary guidelines:
|
|
51
|
+
- Write in third person
|
|
52
|
+
- Include key terms that enable semantic search
|
|
53
|
+
- Focus on what the document IS ABOUT, not just what it contains
|
|
54
|
+
- Make it suitable for embedding generation
|
|
55
|
+
|
|
56
|
+
Example:
|
|
57
|
+
```yaml
|
|
58
|
+
summary: >
|
|
59
|
+
Research on integrating multiple messaging platforms (Slack, Teams, Discord)
|
|
60
|
+
into a unified API. Covers platform API comparisons, recommended tech stack
|
|
61
|
+
(NestJS, PostgreSQL, Redis), and a phased implementation approach for
|
|
62
|
+
bi-directional message synchronization.
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
5. **Extract relationships** between entities:
|
|
66
|
+
- **REFERENCES**: This entity references/relates to another entity
|
|
67
|
+
|
|
68
|
+
Use `source: this` when the document itself references an entity.
|
|
69
|
+
Use entity names as source/target when entities reference each other.
|
|
70
|
+
|
|
71
|
+
6. **Determine entity types** (choose most appropriate):
|
|
72
|
+
- `Topic`: Research domains (usually auto-derived from directory)
|
|
73
|
+
- `Technology`: Programming languages, frameworks, databases
|
|
74
|
+
- `Concept`: Patterns, theories, methodologies
|
|
75
|
+
- `Tool`: Software, services, platforms
|
|
76
|
+
- `Process`: Workflows, procedures, methodologies
|
|
77
|
+
- `Person`: People
|
|
78
|
+
- `Organization`: Companies, teams, projects
|
|
79
|
+
|
|
80
|
+
7. **Update frontmatter**:
|
|
81
|
+
- If frontmatter exists: **REPLACE** entities and relationships with fresh extraction
|
|
82
|
+
- If no frontmatter: Create new frontmatter block
|
|
83
|
+
- Preserve existing fields like `created`, `status`, `topic` (but update `updated` date)
|
|
84
|
+
- **Replace** the `summary`, `entities` and `relationships` sections entirely
|
|
85
|
+
- If no topic field exists, derive it from the directory name
|
|
86
|
+
(e.g., `docs/claude-code/file.md` -> `topic: claude-code`)
|
|
87
|
+
|
|
88
|
+
Frontmatter template:
|
|
89
|
+
```yaml
|
|
90
|
+
---
|
|
91
|
+
created: YYYY-MM-DD
|
|
92
|
+
updated: YYYY-MM-DD
|
|
93
|
+
status: complete|ongoing|draft
|
|
94
|
+
topic: auto-derived-from-directory
|
|
95
|
+
summary: >
|
|
96
|
+
2-3 sentence summary capturing the document's purpose, key topics,
|
|
97
|
+
and conclusions. Written in third person with key terms for semantic search.
|
|
98
|
+
entities:
|
|
99
|
+
- name: EntityName
|
|
100
|
+
type: Topic|Technology|Concept|Tool|Process|Person|Organization
|
|
101
|
+
description: Brief description of entity and its role in this document
|
|
102
|
+
- name: AnotherEntity
|
|
103
|
+
type: Concept
|
|
104
|
+
description: Another entity description
|
|
105
|
+
relationships:
|
|
106
|
+
- source: this
|
|
107
|
+
relation: REFERENCES
|
|
108
|
+
target: MainTopic
|
|
109
|
+
- source: EntityA
|
|
110
|
+
relation: REFERENCES
|
|
111
|
+
target: EntityB
|
|
112
|
+
graph:
|
|
113
|
+
domain: detected-domain
|
|
114
|
+
---
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
8. **Entity naming consistency**:
|
|
118
|
+
- Check if similar entities exist in other documents
|
|
119
|
+
- Use exact same names when referring to same entities
|
|
120
|
+
- Be specific: "React" not "React library"
|
|
121
|
+
- Use canonical names (e.g., "TypeScript" not "TS")
|
|
122
|
+
|
|
123
|
+
9. **Relationship guidelines**:
|
|
124
|
+
- Start with "source: this" for primary entity the document covers
|
|
125
|
+
- Include 3-7 key relationships
|
|
126
|
+
- Relationships should help build knowledge graph connections
|
|
127
|
+
- Avoid redundant relationships
|
|
128
|
+
|
|
129
|
+
10. **Validate and auto-fix** (retry loop):
|
|
130
|
+
After saving, run validation:
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
lattice validate 2>&1 | grep -A10 "$ARGUMENTS"
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
**If validation reports errors for this file:**
|
|
137
|
+
1. Parse the error message to identify the issue
|
|
138
|
+
2. Fix the frontmatter:
|
|
139
|
+
- **Invalid entity type** (e.g., "Platform", "Feature"): Change to valid type
|
|
140
|
+
- **Invalid relation** (e.g., "AFFECTS", "ENABLES"): Change to valid relation
|
|
141
|
+
- **String instead of object**: Reformat to proper object structure
|
|
142
|
+
3. Save the fixed frontmatter
|
|
143
|
+
4. Re-run validation
|
|
144
|
+
5. Repeat until validation passes (max 3 attempts)
|
|
145
|
+
|
|
146
|
+
**Valid entity types:** `Topic`, `Technology`, `Concept`, `Tool`, `Process`, `Person`, `Organization`, `Document`
|
|
147
|
+
|
|
148
|
+
**Valid relations:** `REFERENCES`
|
|
149
|
+
|
|
150
|
+
11. **Confirmation**:
|
|
151
|
+
- Show the file path
|
|
152
|
+
- Show the generated summary
|
|
153
|
+
- List extracted entities with types
|
|
154
|
+
- List extracted relationships
|
|
155
|
+
- Confirm validation passed (or show fixes made)
|
|
156
|
+
|
|
157
|
+
## Important Notes
|
|
158
|
+
|
|
159
|
+
- **Preserve existing content**: Do not modify the markdown content itself, only the frontmatter
|
|
160
|
+
- **YAML validity**: Ensure all YAML is properly formatted
|
|
161
|
+
- **Replace strategy**: Always replace entities/relationships with fresh extraction (don't merge with old)
|
|
162
|
+
- **Be selective**: Focus on entities that would be valuable for knowledge graph connections
|
|
163
|
+
- **Descriptions**: Write descriptions from the perspective of how the entity is used/discussed in THIS document
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Extract entities from modified docs and sync to graph
|
|
3
|
+
model: sonnet
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
Identify modified documents, extract entities from them, and sync to the knowledge graph.
|
|
7
|
+
|
|
8
|
+
## Process
|
|
9
|
+
|
|
10
|
+
### Step 1: Check What Needs Syncing
|
|
11
|
+
|
|
12
|
+
Run the status command to identify modified documents:
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
lattice status
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
This will show:
|
|
19
|
+
- **New** documents not yet in the graph
|
|
20
|
+
- **Updated** documents that have changed since last sync
|
|
21
|
+
|
|
22
|
+
If no documents need syncing, report that and exit.
|
|
23
|
+
|
|
24
|
+
### Step 2: Run Entity Extraction (Parallel Execution)
|
|
25
|
+
|
|
26
|
+
For each new or updated document identified:
|
|
27
|
+
|
|
28
|
+
1. Use the **Task subagent pattern** with Haiku model for parallel execution
|
|
29
|
+
2. Launch multiple Task agents simultaneously (one per document)
|
|
30
|
+
3. Each agent should:
|
|
31
|
+
- Invoke `/entity-extract <path>`
|
|
32
|
+
- Follow expanded instructions
|
|
33
|
+
- Extract entities and update frontmatter
|
|
34
|
+
- Report completion
|
|
35
|
+
|
|
36
|
+
**Example Task agent invocation:**
|
|
37
|
+
```
|
|
38
|
+
Task(
|
|
39
|
+
subagent_type="general-purpose",
|
|
40
|
+
model="haiku",
|
|
41
|
+
prompt="Use /entity-extract docs/topic/document.md to extract entities. Follow all instructions and report completion."
|
|
42
|
+
)
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
**For multiple documents, launch agents in parallel:**
|
|
46
|
+
```
|
|
47
|
+
// In a single message, launch multiple Task tool calls:
|
|
48
|
+
Task(subagent_type="general-purpose", model="haiku", prompt="/entity-extract docs/topic-a/README.md ...")
|
|
49
|
+
Task(subagent_type="general-purpose", model="haiku", prompt="/entity-extract docs/topic-b/notes.md ...")
|
|
50
|
+
Task(subagent_type="general-purpose", model="haiku", prompt="/entity-extract docs/topic-c/README.md ...")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
This is much faster than sequential execution for multiple documents.
|
|
54
|
+
|
|
55
|
+
### Step 3: Sync to Graph
|
|
56
|
+
|
|
57
|
+
After all entity extractions are complete:
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
lattice sync
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**Note:** The sync command validates frontmatter schema and will fail with errors if:
|
|
64
|
+
- Entities are malformed (strings instead of objects with `name`/`type`)
|
|
65
|
+
- Relationships are malformed (strings instead of objects with `source`/`relation`/`target`)
|
|
66
|
+
|
|
67
|
+
If sync fails due to schema errors, the entity extraction didn't follow the correct format.
|
|
68
|
+
|
|
69
|
+
This will:
|
|
70
|
+
- Update document nodes in FalkorDB
|
|
71
|
+
- Generate embeddings for semantic search
|
|
72
|
+
- Create entity relationships
|
|
73
|
+
- Update the sync manifest
|
|
74
|
+
|
|
75
|
+
### Step 4: Report Results
|
|
76
|
+
|
|
77
|
+
Summarize what was processed:
|
|
78
|
+
- Number of documents with entity extraction
|
|
79
|
+
- Entities extracted per document
|
|
80
|
+
- Graph sync statistics (added, updated, unchanged)
|
|
81
|
+
- Any errors encountered
|
|
82
|
+
|
|
83
|
+
## Example Output
|
|
84
|
+
|
|
85
|
+
```
|
|
86
|
+
## Entity Extraction
|
|
87
|
+
|
|
88
|
+
Processed 3 documents:
|
|
89
|
+
|
|
90
|
+
1. docs/american-holidays/README.md
|
|
91
|
+
- 4 entities extracted
|
|
92
|
+
- 3 relationships defined
|
|
93
|
+
|
|
94
|
+
2. docs/american-holidays/thanksgiving-vs-christmas.md
|
|
95
|
+
- 8 entities extracted
|
|
96
|
+
- 5 relationships defined
|
|
97
|
+
|
|
98
|
+
3. docs/bun-nestjs/notes.md
|
|
99
|
+
- 5 entities extracted
|
|
100
|
+
- 4 relationships defined
|
|
101
|
+
|
|
102
|
+
## Graph Sync
|
|
103
|
+
|
|
104
|
+
- Added: 2
|
|
105
|
+
- Updated: 1
|
|
106
|
+
- Unchanged: 126
|
|
107
|
+
- Duration: 1.2s
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Important Notes
|
|
111
|
+
|
|
112
|
+
- **Parallel execution** - Launch all entity extractions simultaneously for speed
|
|
113
|
+
- Entity extraction runs per-document for quality
|
|
114
|
+
- Graph sync is incremental (only processes changes)
|
|
115
|
+
- Safe to run frequently - won't duplicate or corrupt data
|
|
116
|
+
- If extraction fails on a doc, other agents continue - report all errors at end
|
|
117
|
+
- **Batch syncing**: You don't need to run after each `/research` - run once after multiple sessions
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Research a topic - searches existing docs, asks before new research
|
|
3
|
+
argument-hint: topic-query
|
|
4
|
+
model: sonnet
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Research the topic "$ARGUMENTS" by first checking existing documentation, then performing new research if needed.
|
|
8
|
+
|
|
9
|
+
## Process
|
|
10
|
+
|
|
11
|
+
### Step 1: Search Existing Research
|
|
12
|
+
|
|
13
|
+
Run semantic search to find related documents:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
lattice search "$ARGUMENTS" --limit 10
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
### Step 2: Review Search Results
|
|
20
|
+
|
|
21
|
+
Review the top results from the semantic search:
|
|
22
|
+
|
|
23
|
+
1. **Read top results** regardless of path - high similarity may indicate related content
|
|
24
|
+
2. **Path/title matching** is a bonus signal, not a filter
|
|
25
|
+
3. **Don't dismiss** high-similarity docs just because path doesn't match query
|
|
26
|
+
4. Use judgment after reading - the doc content determines relevance, not the filename
|
|
27
|
+
|
|
28
|
+
**Calibration notes:**
|
|
29
|
+
- Exact topic matches often show 30-40% similarity
|
|
30
|
+
- Unrelated docs can sometimes show 60%+ similarity
|
|
31
|
+
- Read the actual content to determine true relevance
|
|
32
|
+
|
|
33
|
+
For each promising result:
|
|
34
|
+
- Read the document
|
|
35
|
+
- Check if it answers the user's question
|
|
36
|
+
- Note relevant sections
|
|
37
|
+
|
|
38
|
+
### Step 3: Present Findings to User
|
|
39
|
+
|
|
40
|
+
Summarize what you found in existing docs:
|
|
41
|
+
- What topics are covered
|
|
42
|
+
- Quote relevant sections if helpful
|
|
43
|
+
- Identify gaps in existing research
|
|
44
|
+
|
|
45
|
+
Ask the user: **"Does this existing research cover your question?"**
|
|
46
|
+
|
|
47
|
+
### Step 4: Ask About New Research
|
|
48
|
+
|
|
49
|
+
Use AskUserQuestion to ask:
|
|
50
|
+
- **"Should I perform new research on this topic?"**
|
|
51
|
+
- Options:
|
|
52
|
+
- Yes, research and create new docs
|
|
53
|
+
- Yes, research and update existing docs
|
|
54
|
+
- No, existing research is sufficient
|
|
55
|
+
|
|
56
|
+
If user says **No** → Done, conversation complete.
|
|
57
|
+
|
|
58
|
+
### Step 5: Perform Research (if requested)
|
|
59
|
+
|
|
60
|
+
If user wants new research:
|
|
61
|
+
1. Use WebSearch to find current information
|
|
62
|
+
2. Gather and synthesize findings
|
|
63
|
+
3. Focus on what's missing from existing docs
|
|
64
|
+
|
|
65
|
+
### Step 6: Determine Topic and Filename
|
|
66
|
+
|
|
67
|
+
**Identify the topic directory:**
|
|
68
|
+
- Check if a relevant `docs/{topic-name}/` directory already exists
|
|
69
|
+
- If not, derive a new topic name from the query (kebab-case)
|
|
70
|
+
|
|
71
|
+
**Derive the research filename:**
|
|
72
|
+
Auto-derive from the specific focus of the query:
|
|
73
|
+
|
|
74
|
+
| Query | Topic Dir | Research File |
|
|
75
|
+
|-------|-----------|---------------|
|
|
76
|
+
| "tesla model s value retention" | `tesla-model-s/` | `value-retention.md` |
|
|
77
|
+
| "bun vs node performance" | `bun-nodejs/` | `performance-comparison.md` |
|
|
78
|
+
| "graphql authentication patterns" | `graphql/` | `authentication-patterns.md` |
|
|
79
|
+
|
|
80
|
+
**Filename guidelines:**
|
|
81
|
+
- Use kebab-case
|
|
82
|
+
- Be descriptive of the specific research focus
|
|
83
|
+
- Avoid generic names like `notes.md` or `research.md`
|
|
84
|
+
- Keep it concise (2-4 words)
|
|
85
|
+
|
|
86
|
+
### Step 7: Create/Update Files
|
|
87
|
+
|
|
88
|
+
#### For NEW Topics (directory doesn't exist)
|
|
89
|
+
|
|
90
|
+
Create TWO files:
|
|
91
|
+
|
|
92
|
+
**1. `docs/{topic-name}/README.md`** (index):
|
|
93
|
+
```markdown
|
|
94
|
+
---
|
|
95
|
+
created: [TODAY'S DATE]
|
|
96
|
+
updated: [TODAY'S DATE]
|
|
97
|
+
status: active
|
|
98
|
+
topic: {topic-name}
|
|
99
|
+
summary: >
|
|
100
|
+
Brief description of the topic area for semantic search.
|
|
101
|
+
---
|
|
102
|
+
|
|
103
|
+
# {Topic Title}
|
|
104
|
+
|
|
105
|
+
Brief description of what this topic covers.
|
|
106
|
+
|
|
107
|
+
## Documents
|
|
108
|
+
|
|
109
|
+
| Document | Description |
|
|
110
|
+
|----------|-------------|
|
|
111
|
+
| [{Research Title}](./{research-filename}.md) | Brief description |
|
|
112
|
+
|
|
113
|
+
## Related Research
|
|
114
|
+
|
|
115
|
+
- [Related Topic](../related-topic/)
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
**2. `docs/{topic-name}/{research-filename}.md`** (content):
|
|
119
|
+
```markdown
|
|
120
|
+
---
|
|
121
|
+
created: [TODAY'S DATE]
|
|
122
|
+
updated: [TODAY'S DATE]
|
|
123
|
+
status: complete
|
|
124
|
+
topic: {topic-name}
|
|
125
|
+
summary: >
|
|
126
|
+
Detailed summary of this specific research for semantic search.
|
|
127
|
+
---
|
|
128
|
+
|
|
129
|
+
# {Research Title}
|
|
130
|
+
|
|
131
|
+
## Purpose
|
|
132
|
+
|
|
133
|
+
What this research addresses.
|
|
134
|
+
|
|
135
|
+
## Key Findings
|
|
136
|
+
|
|
137
|
+
- Finding 1
|
|
138
|
+
- Finding 2
|
|
139
|
+
|
|
140
|
+
## [Content sections as needed...]
|
|
141
|
+
|
|
142
|
+
## Sources
|
|
143
|
+
|
|
144
|
+
1. [Source](URL)
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
#### For EXISTING Topics (directory exists)
|
|
148
|
+
|
|
149
|
+
**1. Create** `docs/{topic-name}/{research-filename}.md` with content template above
|
|
150
|
+
|
|
151
|
+
**2. Update** `docs/{topic-name}/README.md`:
|
|
152
|
+
- Add new row to the Documents table
|
|
153
|
+
- Update the `updated` date in frontmatter
|
|
154
|
+
|
|
155
|
+
### Step 8: Confirmation
|
|
156
|
+
|
|
157
|
+
After creating files, confirm:
|
|
158
|
+
- Topic directory path
|
|
159
|
+
- README.md created/updated
|
|
160
|
+
- Research file created with name
|
|
161
|
+
- Remind user to run `/graph-sync` to extract entities
|
|
162
|
+
|
|
163
|
+
## Important Notes
|
|
164
|
+
|
|
165
|
+
- **Do NOT** auto-run entity extraction - use `/graph-sync` separately
|
|
166
|
+
- **Always create README.md** for new topics (lightweight index)
|
|
167
|
+
- **Always create separate research file** (never put research content in README)
|
|
168
|
+
- Use kebab-case for all directory and file names
|
|
169
|
+
- Include today's date in YYYY-MM-DD format
|
|
170
|
+
- Always cite sources with URLs
|
|
171
|
+
- Cross-link to related research topics when relevant
|
|
172
|
+
|
|
173
|
+
## File Structure Standard
|
|
174
|
+
|
|
175
|
+
```
|
|
176
|
+
docs/{topic-name}/
|
|
177
|
+
├── README.md # Index: links to docs, brief overview
|
|
178
|
+
├── {research-1}.md # Specific research
|
|
179
|
+
├── {research-2}.md # Additional research
|
|
180
|
+
└── {research-n}.md # Expandable as needed
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
This structure allows topics to grow organically while keeping README as a clean navigation index.
|
package/dist/cli.js
CHANGED
|
@@ -2385,98 +2385,65 @@ Relationship Types (${stats.relationshipTypes.length}):`);
|
|
|
2385
2385
|
process.exit(1);
|
|
2386
2386
|
}
|
|
2387
2387
|
});
|
|
2388
|
-
program.command("search").description("
|
|
2388
|
+
program.command("search <query>").description("Semantic search across the knowledge graph").option("-l, --label <label>", "Filter by entity label (e.g., Technology, Concept, Document)").option("--limit <n>", "Limit results", "20").action(async (query, options) => {
|
|
2389
2389
|
let app;
|
|
2390
2390
|
try {
|
|
2391
2391
|
app = await NestFactory3.createApplicationContext(AppModule, {
|
|
2392
2392
|
logger: ["error"]
|
|
2393
2393
|
});
|
|
2394
2394
|
const graph = app.get(GraphService);
|
|
2395
|
-
|
|
2396
|
-
const embedding = app.get(EmbeddingService);
|
|
2397
|
-
const limit2 = Math.min(parseInt(options.limit, 10), 100);
|
|
2398
|
-
try {
|
|
2399
|
-
const queryEmbedding = await embedding.generateEmbedding(options.semantic);
|
|
2400
|
-
const results2 = await graph.vectorSearchAll(queryEmbedding, limit2);
|
|
2401
|
-
console.log(`
|
|
2402
|
-
=== Semantic Search Results for "${options.semantic}" ===
|
|
2403
|
-
`);
|
|
2404
|
-
if (results2.length === 0) {
|
|
2405
|
-
console.log(`No results found with semantic search.
|
|
2406
|
-
`);
|
|
2407
|
-
await app.close();
|
|
2408
|
-
process.exit(0);
|
|
2409
|
-
}
|
|
2410
|
-
results2.forEach((result2, idx) => {
|
|
2411
|
-
console.log(`${idx + 1}. [${result2.label}] ${result2.name}`);
|
|
2412
|
-
if (result2.title) {
|
|
2413
|
-
console.log(` Title: ${result2.title}`);
|
|
2414
|
-
}
|
|
2415
|
-
if (result2.description && result2.label !== "Document") {
|
|
2416
|
-
const desc = result2.description.length > 80 ? result2.description.slice(0, 80) + "..." : result2.description;
|
|
2417
|
-
console.log(` ${desc}`);
|
|
2418
|
-
}
|
|
2419
|
-
console.log(` Similarity: ${(result2.score * 100).toFixed(2)}%`);
|
|
2420
|
-
});
|
|
2421
|
-
console.log();
|
|
2422
|
-
await app.close();
|
|
2423
|
-
process.exit(0);
|
|
2424
|
-
} catch (semanticError) {
|
|
2425
|
-
const errorMsg = semanticError instanceof Error ? semanticError.message : String(semanticError);
|
|
2426
|
-
console.error("Semantic search error:", errorMsg);
|
|
2427
|
-
if (errorMsg.includes("no embeddings") || errorMsg.includes("vector")) {
|
|
2428
|
-
console.log(`
|
|
2429
|
-
Note: Semantic search requires embeddings to be generated first.`);
|
|
2430
|
-
console.log(`Run 'lattice sync' to generate embeddings for documents.
|
|
2431
|
-
`);
|
|
2432
|
-
}
|
|
2433
|
-
await app.close();
|
|
2434
|
-
process.exit(1);
|
|
2435
|
-
}
|
|
2436
|
-
}
|
|
2437
|
-
let cypher;
|
|
2395
|
+
const embedding = app.get(EmbeddingService);
|
|
2438
2396
|
const limit = Math.min(parseInt(options.limit, 10), 100);
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
|
|
2446
|
-
|
|
2447
|
-
|
|
2448
|
-
|
|
2397
|
+
const queryEmbedding = await embedding.generateEmbedding(query);
|
|
2398
|
+
let results;
|
|
2399
|
+
if (options.label) {
|
|
2400
|
+
const labelResults = await graph.vectorSearch(options.label, queryEmbedding, limit);
|
|
2401
|
+
results = labelResults.map((r) => ({
|
|
2402
|
+
name: r.name,
|
|
2403
|
+
label: options.label,
|
|
2404
|
+
title: r.title,
|
|
2405
|
+
score: r.score
|
|
2406
|
+
}));
|
|
2449
2407
|
} else {
|
|
2450
|
-
|
|
2408
|
+
results = await graph.vectorSearchAll(queryEmbedding, limit);
|
|
2451
2409
|
}
|
|
2452
|
-
const
|
|
2453
|
-
const results = result.resultSet || [];
|
|
2410
|
+
const labelSuffix = options.label ? ` (${options.label})` : "";
|
|
2454
2411
|
console.log(`
|
|
2455
|
-
=== Search Results
|
|
2412
|
+
=== Semantic Search Results for "${query}"${labelSuffix} ===
|
|
2456
2413
|
`);
|
|
2457
2414
|
if (results.length === 0) {
|
|
2458
|
-
console.log(`No
|
|
2415
|
+
console.log(`No results found.
|
|
2416
|
+
`);
|
|
2417
|
+
if (options.label) {
|
|
2418
|
+
console.log(`Tip: Try without --label to search all entity types.
|
|
2459
2419
|
`);
|
|
2420
|
+
}
|
|
2460
2421
|
await app.close();
|
|
2461
2422
|
process.exit(0);
|
|
2462
2423
|
}
|
|
2463
|
-
results.forEach((
|
|
2464
|
-
|
|
2465
|
-
|
|
2466
|
-
|
|
2467
|
-
console.log(`[${labels}] ${name}`);
|
|
2468
|
-
if (node.properties?.description) {
|
|
2469
|
-
console.log(` Description: ${node.properties.description}`);
|
|
2424
|
+
results.forEach((result, idx) => {
|
|
2425
|
+
console.log(`${idx + 1}. [${result.label}] ${result.name}`);
|
|
2426
|
+
if (result.title) {
|
|
2427
|
+
console.log(` Title: ${result.title}`);
|
|
2470
2428
|
}
|
|
2471
|
-
if (
|
|
2472
|
-
|
|
2429
|
+
if (result.description && result.label !== "Document") {
|
|
2430
|
+
const desc = result.description.length > 80 ? result.description.slice(0, 80) + "..." : result.description;
|
|
2431
|
+
console.log(` ${desc}`);
|
|
2473
2432
|
}
|
|
2433
|
+
console.log(` Similarity: ${(result.score * 100).toFixed(2)}%`);
|
|
2474
2434
|
});
|
|
2475
2435
|
console.log();
|
|
2476
2436
|
await app.close();
|
|
2477
2437
|
process.exit(0);
|
|
2478
2438
|
} catch (error) {
|
|
2479
|
-
|
|
2439
|
+
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
2440
|
+
console.error("Error:", errorMsg);
|
|
2441
|
+
if (errorMsg.includes("no embeddings") || errorMsg.includes("vector")) {
|
|
2442
|
+
console.log(`
|
|
2443
|
+
Note: Semantic search requires embeddings to be generated first.`);
|
|
2444
|
+
console.log(`Run 'lattice sync' to generate embeddings for documents.
|
|
2445
|
+
`);
|
|
2446
|
+
}
|
|
2480
2447
|
if (app)
|
|
2481
2448
|
await app.close();
|
|
2482
2449
|
process.exit(1);
|
|
@@ -2699,8 +2666,88 @@ function registerOntologyCommand(program) {
|
|
|
2699
2666
|
}
|
|
2700
2667
|
});
|
|
2701
2668
|
}
|
|
2669
|
+
// src/commands/init.command.ts
|
|
2670
|
+
import * as fs from "fs/promises";
|
|
2671
|
+
import * as path from "path";
|
|
2672
|
+
import { fileURLToPath } from "url";
|
|
2673
|
+
import { homedir } from "os";
|
|
2674
|
+
var __filename2 = fileURLToPath(import.meta.url);
|
|
2675
|
+
var __dirname2 = path.dirname(__filename2);
|
|
2676
|
+
var COMMANDS = ["research.md", "graph-sync.md", "entity-extract.md"];
|
|
2677
|
+
function registerInitCommand(program) {
|
|
2678
|
+
program.command("init").description("Install Claude Code slash commands for Lattice").option("-g, --global", "Install to ~/.claude/commands/ (available in all projects)").action(async (options) => {
|
|
2679
|
+
try {
|
|
2680
|
+
const targetDir = options.global ? path.join(homedir(), ".claude", "commands") : path.join(process.cwd(), ".claude", "commands");
|
|
2681
|
+
let commandsSourceDir = path.resolve(__dirname2, "..", "commands");
|
|
2682
|
+
try {
|
|
2683
|
+
await fs.access(commandsSourceDir);
|
|
2684
|
+
} catch {
|
|
2685
|
+
commandsSourceDir = path.resolve(__dirname2, "..", "..", "commands");
|
|
2686
|
+
}
|
|
2687
|
+
try {
|
|
2688
|
+
await fs.access(commandsSourceDir);
|
|
2689
|
+
} catch {
|
|
2690
|
+
console.error("Error: Commands source directory not found at", commandsSourceDir);
|
|
2691
|
+
console.error("This may indicate a corrupted installation. Try reinstalling @zabaca/lattice.");
|
|
2692
|
+
process.exit(1);
|
|
2693
|
+
}
|
|
2694
|
+
await fs.mkdir(targetDir, { recursive: true });
|
|
2695
|
+
let copied = 0;
|
|
2696
|
+
let skipped = 0;
|
|
2697
|
+
const installed = [];
|
|
2698
|
+
for (const file of COMMANDS) {
|
|
2699
|
+
const sourcePath = path.join(commandsSourceDir, file);
|
|
2700
|
+
const targetPath = path.join(targetDir, file);
|
|
2701
|
+
try {
|
|
2702
|
+
await fs.access(sourcePath);
|
|
2703
|
+
try {
|
|
2704
|
+
await fs.access(targetPath);
|
|
2705
|
+
const sourceContent = await fs.readFile(sourcePath, "utf-8");
|
|
2706
|
+
const targetContent = await fs.readFile(targetPath, "utf-8");
|
|
2707
|
+
if (sourceContent === targetContent) {
|
|
2708
|
+
skipped++;
|
|
2709
|
+
continue;
|
|
2710
|
+
}
|
|
2711
|
+
} catch {}
|
|
2712
|
+
await fs.copyFile(sourcePath, targetPath);
|
|
2713
|
+
installed.push(file);
|
|
2714
|
+
copied++;
|
|
2715
|
+
} catch (err) {
|
|
2716
|
+
console.error(`Warning: Could not copy ${file}:`, err instanceof Error ? err.message : String(err));
|
|
2717
|
+
}
|
|
2718
|
+
}
|
|
2719
|
+
console.log();
|
|
2720
|
+
console.log(`\u2705 Lattice commands installed to ${targetDir}`);
|
|
2721
|
+
console.log();
|
|
2722
|
+
if (copied > 0) {
|
|
2723
|
+
console.log(`Installed ${copied} command(s):`);
|
|
2724
|
+
installed.forEach((f) => {
|
|
2725
|
+
const name = f.replace(".md", "");
|
|
2726
|
+
console.log(` - /${name}`);
|
|
2727
|
+
});
|
|
2728
|
+
}
|
|
2729
|
+
if (skipped > 0) {
|
|
2730
|
+
console.log(`Skipped ${skipped} unchanged command(s)`);
|
|
2731
|
+
}
|
|
2732
|
+
console.log();
|
|
2733
|
+
console.log("Available commands in Claude Code:");
|
|
2734
|
+
console.log(" /research <topic> - AI-assisted research workflow");
|
|
2735
|
+
console.log(" /graph-sync - Extract entities and sync to graph");
|
|
2736
|
+
console.log(" /entity-extract - Extract entities from a single document");
|
|
2737
|
+
console.log();
|
|
2738
|
+
if (!options.global) {
|
|
2739
|
+
console.log("\uD83D\uDCA1 Tip: Use 'lattice init --global' to install for all projects");
|
|
2740
|
+
}
|
|
2741
|
+
process.exit(0);
|
|
2742
|
+
} catch (error) {
|
|
2743
|
+
console.error("Error:", error instanceof Error ? error.message : String(error));
|
|
2744
|
+
process.exit(1);
|
|
2745
|
+
}
|
|
2746
|
+
});
|
|
2747
|
+
}
|
|
2702
2748
|
// src/main.ts
|
|
2703
|
-
program.name("lattice").description("Human-initiated, AI-powered knowledge graph for markdown documentation").version("0.
|
|
2749
|
+
program.name("lattice").description("Human-initiated, AI-powered knowledge graph for markdown documentation").version("0.3.0");
|
|
2750
|
+
registerInitCommand(program);
|
|
2704
2751
|
registerSyncCommand(program);
|
|
2705
2752
|
registerStatusCommand(program);
|
|
2706
2753
|
registerQueryCommands(program);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zabaca/lattice",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Human-initiated, AI-powered knowledge graph for markdown documentation",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
},
|
|
9
9
|
"files": [
|
|
10
10
|
"dist",
|
|
11
|
+
"commands",
|
|
11
12
|
"README.md"
|
|
12
13
|
],
|
|
13
14
|
"scripts": {
|