claude-self-reflect 2.5.4 โ 2.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -343,13 +343,25 @@ async function importConversations() {
|
|
|
343
343
|
console.log('\n๐ Checking conversation baseline...');
|
|
344
344
|
|
|
345
345
|
// Check if baseline exists by looking for imported files state
|
|
346
|
+
const configDir = path.join(os.homedir(), '.claude-self-reflect', 'config');
|
|
346
347
|
const stateFile = path.join(configDir, 'imported-files.json');
|
|
347
348
|
let hasBaseline = false;
|
|
349
|
+
let needsMetadataMigration = false;
|
|
348
350
|
|
|
349
351
|
try {
|
|
350
|
-
if (
|
|
351
|
-
const state = JSON.parse(
|
|
352
|
+
if (fsSync.existsSync(stateFile)) {
|
|
353
|
+
const state = JSON.parse(fsSync.readFileSync(stateFile, 'utf8'));
|
|
352
354
|
hasBaseline = state.imported_files && Object.keys(state.imported_files).length > 0;
|
|
355
|
+
|
|
356
|
+
// Check if any imported files are in old format (string timestamp vs object)
|
|
357
|
+
if (hasBaseline) {
|
|
358
|
+
for (const [file, data] of Object.entries(state.imported_files)) {
|
|
359
|
+
if (typeof data === 'string') {
|
|
360
|
+
needsMetadataMigration = true;
|
|
361
|
+
break;
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
}
|
|
353
365
|
}
|
|
354
366
|
} catch (e) {
|
|
355
367
|
// State file doesn't exist or is invalid
|
|
@@ -359,13 +371,18 @@ async function importConversations() {
|
|
|
359
371
|
console.log('\nโ ๏ธ No baseline detected. Initial import STRONGLY recommended.');
|
|
360
372
|
console.log(' Without this, historical conversations won\'t be searchable.');
|
|
361
373
|
console.log(' The watcher only handles NEW conversations going forward.');
|
|
374
|
+
} else if (needsMetadataMigration) {
|
|
375
|
+
console.log('\n๐ Detected old import format. Metadata enhancement available!');
|
|
376
|
+
console.log(' Re-importing will add file analysis, tool usage, and concept tracking.');
|
|
377
|
+
console.log(' This enables advanced search features like search_by_file and search_by_concept.');
|
|
362
378
|
}
|
|
363
379
|
|
|
364
380
|
const answer = await question('\nImport existing Claude conversations? (y/n) [recommended: y]: ');
|
|
365
381
|
|
|
366
382
|
if (answer.toLowerCase() === 'y') {
|
|
367
|
-
console.log('๐ Starting baseline import...');
|
|
383
|
+
console.log('๐ Starting baseline import with metadata extraction...');
|
|
368
384
|
console.log(' This ensures ALL your conversations are searchable');
|
|
385
|
+
console.log(' Enhanced with tool usage tracking and file analysis');
|
|
369
386
|
console.log(' This may take a few minutes depending on your conversation history');
|
|
370
387
|
|
|
371
388
|
try {
|
|
@@ -373,8 +390,9 @@ async function importConversations() {
|
|
|
373
390
|
cwd: projectRoot,
|
|
374
391
|
stdio: 'inherit'
|
|
375
392
|
});
|
|
376
|
-
console.log('\nโ
Baseline import completed!');
|
|
393
|
+
console.log('\nโ
Baseline import completed with metadata!');
|
|
377
394
|
console.log(' Historical conversations are now searchable');
|
|
395
|
+
console.log(' Tool usage and file analysis metadata extracted');
|
|
378
396
|
} catch {
|
|
379
397
|
console.log('\nโ ๏ธ Import had some issues, but you can continue');
|
|
380
398
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "claude-self-reflect-mcp"
|
|
3
|
-
version = "2.5.
|
|
3
|
+
version = "2.5.5"
|
|
4
4
|
description = "MCP server for Claude self-reflection with memory decay"
|
|
5
5
|
# readme = "README.md"
|
|
6
6
|
requires-python = ">=3.10"
|
|
@@ -12,7 +12,7 @@ dependencies = [
|
|
|
12
12
|
"qdrant-client>=1.7.0,<2.0.0",
|
|
13
13
|
"voyageai>=0.1.0,<1.0.0",
|
|
14
14
|
"python-dotenv>=1.0.0,<2.0.0",
|
|
15
|
-
"pydantic>=2.
|
|
15
|
+
"pydantic>=2.11.7,<3.0.0", # Updated for fastmcp 2.10.6 compatibility
|
|
16
16
|
"pydantic-settings>=2.0.0,<3.0.0",
|
|
17
17
|
"fastembed>=0.4.0,<1.0.0",
|
|
18
18
|
]
|
package/mcp-server/src/server.py
CHANGED
|
@@ -956,7 +956,14 @@ async def search_by_file(
|
|
|
956
956
|
files_analyzed = payload.get('files_analyzed', [])
|
|
957
957
|
files_edited = payload.get('files_edited', [])
|
|
958
958
|
|
|
959
|
-
|
|
959
|
+
# Check for exact match or if any file ends with the normalized path
|
|
960
|
+
file_match = False
|
|
961
|
+
for file in files_analyzed + files_edited:
|
|
962
|
+
if file == normalized_path or file.endswith('/' + normalized_path) or file.endswith('\\' + normalized_path):
|
|
963
|
+
file_match = True
|
|
964
|
+
break
|
|
965
|
+
|
|
966
|
+
if file_match:
|
|
960
967
|
all_results.append({
|
|
961
968
|
'score': 1.0, # File match is always 1.0
|
|
962
969
|
'payload': payload,
|
package/package.json
CHANGED
|
@@ -9,8 +9,9 @@ import json
|
|
|
9
9
|
import glob
|
|
10
10
|
import hashlib
|
|
11
11
|
import gc
|
|
12
|
+
import re
|
|
12
13
|
from datetime import datetime
|
|
13
|
-
from typing import List, Dict, Any
|
|
14
|
+
from typing import List, Dict, Any, Set
|
|
14
15
|
import logging
|
|
15
16
|
from pathlib import Path
|
|
16
17
|
|
|
@@ -39,11 +40,252 @@ STATE_FILE = os.getenv("STATE_FILE", default_state_file)
|
|
|
39
40
|
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "10")) # Reduced from 100 to prevent OOM
|
|
40
41
|
PREFER_LOCAL_EMBEDDINGS = os.getenv("PREFER_LOCAL_EMBEDDINGS", "false").lower() == "true"
|
|
41
42
|
VOYAGE_API_KEY = os.getenv("VOYAGE_KEY")
|
|
43
|
+
CURRENT_METADATA_VERSION = 2 # Version 2: Added tool output extraction
|
|
42
44
|
|
|
43
45
|
# Set up logging
|
|
44
46
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
45
47
|
logger = logging.getLogger(__name__)
|
|
46
48
|
|
|
49
|
+
# ============= Metadata Extraction Functions =============
|
|
50
|
+
|
|
51
|
+
def normalize_path_for_metadata(path: str) -> str:
|
|
52
|
+
"""Normalize file paths for consistency in metadata."""
|
|
53
|
+
if not path:
|
|
54
|
+
return ""
|
|
55
|
+
|
|
56
|
+
# Remove common prefixes
|
|
57
|
+
path = path.replace("/Users/", "~/")
|
|
58
|
+
path = path.replace("\\Users\\", "~\\")
|
|
59
|
+
|
|
60
|
+
# Convert to forward slashes
|
|
61
|
+
path = path.replace("\\", "/")
|
|
62
|
+
|
|
63
|
+
# Remove duplicate slashes
|
|
64
|
+
path = re.sub(r'/+', '/', path)
|
|
65
|
+
|
|
66
|
+
return path
|
|
67
|
+
|
|
68
|
+
def extract_concepts(text: str, tool_usage: Dict[str, Any]) -> Set[str]:
|
|
69
|
+
"""Extract high-level concepts from conversation and tool usage."""
|
|
70
|
+
concepts = set()
|
|
71
|
+
|
|
72
|
+
# Common development concepts with patterns
|
|
73
|
+
concept_patterns = {
|
|
74
|
+
'security': r'(security|vulnerability|CVE|injection|sanitize|escape|auth|token|JWT)',
|
|
75
|
+
'performance': r'(performance|optimization|speed|memory|efficient|benchmark|latency)',
|
|
76
|
+
'testing': r'(test|pytest|unittest|coverage|TDD|spec|assert)',
|
|
77
|
+
'docker': r'(docker|container|compose|dockerfile|kubernetes|k8s)',
|
|
78
|
+
'api': r'(API|REST|GraphQL|endpoint|webhook|http|request)',
|
|
79
|
+
'database': r'(database|SQL|query|migration|schema|postgres|mysql|mongodb|qdrant)',
|
|
80
|
+
'authentication': r'(auth|login|token|JWT|session|oauth|permission)',
|
|
81
|
+
'debugging': r'(debug|error|exception|traceback|log|stack|trace)',
|
|
82
|
+
'refactoring': r'(refactor|cleanup|improve|restructure|optimize|technical debt)',
|
|
83
|
+
'deployment': r'(deploy|CI/CD|release|production|staging|rollout)',
|
|
84
|
+
'git': r'(git|commit|branch|merge|pull request|PR|rebase)',
|
|
85
|
+
'architecture': r'(architecture|design|pattern|structure|component|module)',
|
|
86
|
+
'mcp': r'(MCP|claude-self-reflect|tool|agent|claude code)',
|
|
87
|
+
'embeddings': r'(embedding|vector|semantic|similarity|fastembed|voyage)',
|
|
88
|
+
'search': r'(search|query|find|filter|match|relevance)'
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
# Check text content (limit to first 10000 chars for performance)
|
|
92
|
+
combined_text = text[:10000].lower() if text else ""
|
|
93
|
+
for concept, pattern in concept_patterns.items():
|
|
94
|
+
if re.search(pattern, combined_text, re.IGNORECASE):
|
|
95
|
+
concepts.add(concept)
|
|
96
|
+
|
|
97
|
+
# Check tool usage patterns
|
|
98
|
+
if tool_usage.get('grep_searches'):
|
|
99
|
+
concepts.add('search')
|
|
100
|
+
if tool_usage.get('files_edited') or tool_usage.get('files_created'):
|
|
101
|
+
concepts.add('development')
|
|
102
|
+
if any('test' in str(f).lower() for f in tool_usage.get('files_read', [])):
|
|
103
|
+
concepts.add('testing')
|
|
104
|
+
if any('docker' in str(cmd).lower() for cmd in tool_usage.get('bash_commands', [])):
|
|
105
|
+
concepts.add('docker')
|
|
106
|
+
|
|
107
|
+
return concepts
|
|
108
|
+
|
|
109
|
+
def extract_files_from_git_output(output_text: str) -> List[str]:
|
|
110
|
+
"""Extract file paths from git command outputs (diff, show, status, etc)."""
|
|
111
|
+
files = set()
|
|
112
|
+
|
|
113
|
+
# Patterns for different git output formats
|
|
114
|
+
patterns = [
|
|
115
|
+
r'diff --git a/(.*?) b/', # git diff format
|
|
116
|
+
r'^\+\+\+ b/(.+)$', # diff new file
|
|
117
|
+
r'^--- a/(.+)$', # diff old file
|
|
118
|
+
r'^modified:\s+(.+)$', # git status
|
|
119
|
+
r'^deleted:\s+(.+)$', # git status
|
|
120
|
+
r'^new file:\s+(.+)$', # git status
|
|
121
|
+
r'^renamed:\s+(.+) -> (.+)$', # git status (captures both)
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
for pattern in patterns:
|
|
125
|
+
matches = re.findall(pattern, output_text, re.MULTILINE)
|
|
126
|
+
for match in matches:
|
|
127
|
+
if isinstance(match, tuple):
|
|
128
|
+
# Handle renamed files (captures both old and new)
|
|
129
|
+
for f in match:
|
|
130
|
+
if f:
|
|
131
|
+
files.add(normalize_path_for_metadata(f))
|
|
132
|
+
else:
|
|
133
|
+
files.add(normalize_path_for_metadata(match))
|
|
134
|
+
|
|
135
|
+
return list(files)[:20] # Limit to 20 files
|
|
136
|
+
|
|
137
|
+
def extract_tool_data_from_message(tool_use: Dict[str, Any], usage_dict: Dict[str, Any], tool_output: str = None):
|
|
138
|
+
"""Extract tool usage data from a tool_use object in a message, including outputs."""
|
|
139
|
+
tool_name = tool_use.get('name', '')
|
|
140
|
+
inputs = tool_use.get('input', {})
|
|
141
|
+
|
|
142
|
+
# Track tool in summary
|
|
143
|
+
usage_dict['tools_summary'][tool_name] = usage_dict['tools_summary'].get(tool_name, 0) + 1
|
|
144
|
+
|
|
145
|
+
# Handle Read tool
|
|
146
|
+
if tool_name == 'Read':
|
|
147
|
+
file_path = inputs.get('file_path')
|
|
148
|
+
if file_path:
|
|
149
|
+
normalized = normalize_path_for_metadata(file_path)
|
|
150
|
+
if normalized not in usage_dict['files_read']:
|
|
151
|
+
usage_dict['files_read'].append(normalized)
|
|
152
|
+
|
|
153
|
+
# Handle Edit and MultiEdit tools
|
|
154
|
+
elif tool_name in ['Edit', 'MultiEdit']:
|
|
155
|
+
path = inputs.get('file_path')
|
|
156
|
+
if path:
|
|
157
|
+
normalized = normalize_path_for_metadata(path)
|
|
158
|
+
if normalized not in usage_dict['files_edited']:
|
|
159
|
+
usage_dict['files_edited'].append(normalized)
|
|
160
|
+
|
|
161
|
+
# Handle Write tool
|
|
162
|
+
elif tool_name == 'Write':
|
|
163
|
+
path = inputs.get('file_path')
|
|
164
|
+
if path:
|
|
165
|
+
normalized = normalize_path_for_metadata(path)
|
|
166
|
+
if normalized not in usage_dict['files_created']:
|
|
167
|
+
usage_dict['files_created'].append(normalized)
|
|
168
|
+
|
|
169
|
+
# Handle Grep tool
|
|
170
|
+
elif tool_name == 'Grep':
|
|
171
|
+
pattern = inputs.get('pattern')
|
|
172
|
+
if pattern and len(usage_dict['grep_searches']) < 10: # Limit
|
|
173
|
+
usage_dict['grep_searches'].append(pattern[:100]) # Truncate long patterns
|
|
174
|
+
|
|
175
|
+
# Handle Bash tool - Extract both command and output
|
|
176
|
+
elif tool_name == 'Bash':
|
|
177
|
+
command = inputs.get('command')
|
|
178
|
+
if command and len(usage_dict['bash_commands']) < 10:
|
|
179
|
+
usage_dict['bash_commands'].append(command[:200]) # Truncate
|
|
180
|
+
|
|
181
|
+
# Process tool output for git commands
|
|
182
|
+
if tool_output and any(cmd in command for cmd in ['git diff', 'git show', 'git status']):
|
|
183
|
+
git_files = extract_files_from_git_output(tool_output)
|
|
184
|
+
for file_path in git_files:
|
|
185
|
+
if file_path not in usage_dict['git_file_changes']:
|
|
186
|
+
usage_dict['git_file_changes'].append(file_path)
|
|
187
|
+
|
|
188
|
+
# Store tool output preview (for any tool)
|
|
189
|
+
if tool_output and len(usage_dict['tool_outputs']) < 15:
|
|
190
|
+
usage_dict['tool_outputs'].append({
|
|
191
|
+
'tool': tool_name,
|
|
192
|
+
'command': inputs.get('command', inputs.get('pattern', ''))[:100],
|
|
193
|
+
'output_preview': tool_output[:500], # First 500 chars
|
|
194
|
+
'output_length': len(tool_output)
|
|
195
|
+
})
|
|
196
|
+
|
|
197
|
+
def extract_metadata_from_jsonl(file_path: str) -> Dict[str, Any]:
|
|
198
|
+
"""Extract metadata from a JSONL conversation file."""
|
|
199
|
+
tool_usage = {
|
|
200
|
+
"files_read": [],
|
|
201
|
+
"files_edited": [],
|
|
202
|
+
"files_created": [],
|
|
203
|
+
"grep_searches": [],
|
|
204
|
+
"bash_commands": [],
|
|
205
|
+
"tools_summary": {},
|
|
206
|
+
"git_file_changes": [], # NEW: Files from git outputs
|
|
207
|
+
"tool_outputs": [] # NEW: Tool output previews
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
conversation_text = ""
|
|
211
|
+
tool_outputs = {} # Map tool_use_id to output text
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
# First pass: collect tool outputs
|
|
215
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
216
|
+
for line in f:
|
|
217
|
+
if line.strip():
|
|
218
|
+
try:
|
|
219
|
+
data = json.loads(line)
|
|
220
|
+
if 'message' in data and data['message']:
|
|
221
|
+
msg = data['message']
|
|
222
|
+
if msg.get('content') and isinstance(msg['content'], list):
|
|
223
|
+
for item in msg['content']:
|
|
224
|
+
if isinstance(item, dict) and item.get('type') == 'tool_result':
|
|
225
|
+
# Capture tool output
|
|
226
|
+
tool_id = item.get('tool_use_id')
|
|
227
|
+
output_content = item.get('content', '')
|
|
228
|
+
if tool_id and output_content:
|
|
229
|
+
tool_outputs[tool_id] = output_content
|
|
230
|
+
# Also check for toolUseResult in data
|
|
231
|
+
if 'toolUseResult' in data:
|
|
232
|
+
result = data['toolUseResult']
|
|
233
|
+
if isinstance(result, dict):
|
|
234
|
+
tool_outputs['last_result'] = json.dumps(result)[:1000]
|
|
235
|
+
except:
|
|
236
|
+
continue
|
|
237
|
+
|
|
238
|
+
# Second pass: extract tool uses and text with outputs available
|
|
239
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
240
|
+
for line in f:
|
|
241
|
+
if line.strip():
|
|
242
|
+
try:
|
|
243
|
+
data = json.loads(line)
|
|
244
|
+
if 'message' in data and data['message']:
|
|
245
|
+
msg = data['message']
|
|
246
|
+
# Extract text
|
|
247
|
+
if msg.get('content'):
|
|
248
|
+
if isinstance(msg['content'], str):
|
|
249
|
+
conversation_text += msg['content'] + "\n"
|
|
250
|
+
elif isinstance(msg['content'], list):
|
|
251
|
+
for item in msg['content']:
|
|
252
|
+
if isinstance(item, dict):
|
|
253
|
+
if item.get('type') == 'text' and item.get('text'):
|
|
254
|
+
conversation_text += item['text'] + "\n"
|
|
255
|
+
elif item.get('type') == 'tool_use':
|
|
256
|
+
# Process tool use with output now available
|
|
257
|
+
tool_id = item.get('id', '')
|
|
258
|
+
output = tool_outputs.get(tool_id, '')
|
|
259
|
+
extract_tool_data_from_message(item, tool_usage, output)
|
|
260
|
+
except:
|
|
261
|
+
continue
|
|
262
|
+
except Exception as e:
|
|
263
|
+
logger.warning(f"Error extracting metadata from {file_path}: {e}")
|
|
264
|
+
|
|
265
|
+
# Extract concepts from text
|
|
266
|
+
concepts = extract_concepts(conversation_text, tool_usage)
|
|
267
|
+
|
|
268
|
+
# Build metadata
|
|
269
|
+
metadata = {
|
|
270
|
+
"files_analyzed": tool_usage['files_read'][:20], # Limit to 20
|
|
271
|
+
"files_edited": tool_usage['files_edited'][:10], # Limit to 10
|
|
272
|
+
"files_created": tool_usage['files_created'][:10],
|
|
273
|
+
"tools_used": list(tool_usage['tools_summary'].keys())[:20],
|
|
274
|
+
"tool_summary": dict(list(tool_usage['tools_summary'].items())[:10]),
|
|
275
|
+
"concepts": list(concepts)[:15], # Limit to 15
|
|
276
|
+
"search_patterns": tool_usage['grep_searches'][:10],
|
|
277
|
+
"git_file_changes": tool_usage['git_file_changes'][:20], # NEW: Git file changes
|
|
278
|
+
"tool_outputs": tool_usage['tool_outputs'][:15], # NEW: Tool output previews
|
|
279
|
+
"analysis_only": len(tool_usage['files_edited']) == 0 and len(tool_usage['files_created']) == 0,
|
|
280
|
+
"has_file_metadata": True,
|
|
281
|
+
"metadata_version": CURRENT_METADATA_VERSION,
|
|
282
|
+
"metadata_extracted_at": datetime.now().isoformat()
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
return metadata
|
|
286
|
+
|
|
287
|
+
# ============= End Metadata Extraction Functions =============
|
|
288
|
+
|
|
47
289
|
# State management functions
|
|
48
290
|
def load_state():
|
|
49
291
|
"""Load the import state from file."""
|
|
@@ -273,6 +515,9 @@ def import_project(project_path: Path, collection_name: str, state: dict) -> int
|
|
|
273
515
|
created_at = datetime.now().isoformat()
|
|
274
516
|
conversation_id = jsonl_file.stem
|
|
275
517
|
|
|
518
|
+
# Extract tool usage metadata from the file
|
|
519
|
+
metadata = extract_metadata_from_jsonl(str(jsonl_file))
|
|
520
|
+
|
|
276
521
|
# Chunk the conversation
|
|
277
522
|
chunks = chunk_conversation(messages)
|
|
278
523
|
|
|
@@ -294,17 +539,22 @@ def import_project(project_path: Path, collection_name: str, state: dict) -> int
|
|
|
294
539
|
f"{conversation_id}_{chunk['chunk_index']}".encode()
|
|
295
540
|
).hexdigest()[:16]
|
|
296
541
|
|
|
542
|
+
# Combine basic payload with metadata
|
|
543
|
+
payload = {
|
|
544
|
+
"text": chunk["text"],
|
|
545
|
+
"conversation_id": conversation_id,
|
|
546
|
+
"chunk_index": chunk["chunk_index"],
|
|
547
|
+
"timestamp": created_at,
|
|
548
|
+
"project": project_path.name,
|
|
549
|
+
"start_role": chunk["start_role"]
|
|
550
|
+
}
|
|
551
|
+
# Add metadata fields
|
|
552
|
+
payload.update(metadata)
|
|
553
|
+
|
|
297
554
|
points.append(PointStruct(
|
|
298
555
|
id=int(point_id, 16) % (2**63), # Convert to valid integer ID
|
|
299
556
|
vector=embedding,
|
|
300
|
-
payload=
|
|
301
|
-
"text": chunk["text"],
|
|
302
|
-
"conversation_id": conversation_id,
|
|
303
|
-
"chunk_index": chunk["chunk_index"],
|
|
304
|
-
"timestamp": created_at,
|
|
305
|
-
"project": project_path.name,
|
|
306
|
-
"start_role": chunk["start_role"]
|
|
307
|
-
}
|
|
557
|
+
payload=payload
|
|
308
558
|
))
|
|
309
559
|
|
|
310
560
|
# Upload to Qdrant
|
package/.claude/agents/README.md
DELETED
|
@@ -1,155 +0,0 @@
|
|
|
1
|
-
# Claude Self Reflect - Specialized Agents
|
|
2
|
-
|
|
3
|
-
This directory contains specialized sub-agents that Claude will proactively use when working on different aspects of the Claude Self Reflect system. Each agent has focused expertise and will automatically activate when their domain is encountered.
|
|
4
|
-
|
|
5
|
-
## Available Agents
|
|
6
|
-
|
|
7
|
-
### ๐ง Core System Agents
|
|
8
|
-
|
|
9
|
-
1. **[qdrant-specialist](./qdrant-specialist.md)** - Vector database expert
|
|
10
|
-
- Collection management and health monitoring
|
|
11
|
-
- Search optimization and troubleshooting
|
|
12
|
-
- Embedding configuration and dimension issues
|
|
13
|
-
- Performance tuning for Qdrant
|
|
14
|
-
|
|
15
|
-
2. **[import-debugger](./import-debugger.md)** - Import pipeline specialist
|
|
16
|
-
- JSONL processing and parsing issues
|
|
17
|
-
- Conversation chunking optimization
|
|
18
|
-
- Batch processing and memory management
|
|
19
|
-
- State tracking and error recovery
|
|
20
|
-
|
|
21
|
-
3. **[docker-orchestrator](./docker-orchestrator.md)** - Container management expert
|
|
22
|
-
- Multi-container orchestration
|
|
23
|
-
- Service health monitoring
|
|
24
|
-
- Resource optimization
|
|
25
|
-
- Networking and volume management
|
|
26
|
-
|
|
27
|
-
4. **[mcp-integration](./mcp-integration.md)** - MCP server developer
|
|
28
|
-
- Claude Desktop integration
|
|
29
|
-
- Tool implementation and schemas
|
|
30
|
-
- TypeScript development
|
|
31
|
-
- Connection debugging
|
|
32
|
-
|
|
33
|
-
5. **[search-optimizer](./search-optimizer.md)** - Search quality expert
|
|
34
|
-
- Semantic search tuning
|
|
35
|
-
- Embedding model comparison
|
|
36
|
-
- Similarity threshold optimization
|
|
37
|
-
- A/B testing methodologies
|
|
38
|
-
|
|
39
|
-
6. **[reflection-specialist](./reflection-specialist.md)** - Conversation memory expert
|
|
40
|
-
- Searching past conversations with MCP tools
|
|
41
|
-
- Storing insights and reflections
|
|
42
|
-
- Maintaining knowledge continuity
|
|
43
|
-
- Cross-project conversation search
|
|
44
|
-
|
|
45
|
-
### ๐ Open Source Development Agents
|
|
46
|
-
|
|
47
|
-
7. **[open-source-maintainer](./open-source-maintainer.md)** - Project governance expert
|
|
48
|
-
- Release management and versioning
|
|
49
|
-
- Community building and engagement
|
|
50
|
-
- Issue and PR triage
|
|
51
|
-
- Contributor recognition
|
|
52
|
-
|
|
53
|
-
8. **[documentation-writer](./documentation-writer.md)** - Technical documentation specialist
|
|
54
|
-
- API documentation and references
|
|
55
|
-
- Tutorial and guide creation
|
|
56
|
-
- Architecture documentation
|
|
57
|
-
- Example code development
|
|
58
|
-
|
|
59
|
-
9. **[performance-tuner](./performance-tuner.md)** - Performance optimization specialist
|
|
60
|
-
- Search latency optimization
|
|
61
|
-
- Memory usage reduction
|
|
62
|
-
- Scalability improvements
|
|
63
|
-
- Benchmark creation and monitoring
|
|
64
|
-
|
|
65
|
-
### ๐งช Testing and Validation Agents
|
|
66
|
-
|
|
67
|
-
10. **[reflect-tester](./reflect-tester.md)** - Comprehensive testing specialist
|
|
68
|
-
- MCP configuration validation
|
|
69
|
-
- Tool functionality testing
|
|
70
|
-
- Collection health verification
|
|
71
|
-
- Import system validation
|
|
72
|
-
- Embedding mode testing
|
|
73
|
-
|
|
74
|
-
## How Agents Work
|
|
75
|
-
|
|
76
|
-
### Automatic Activation
|
|
77
|
-
|
|
78
|
-
Claude automatically engages the appropriate agent based on context. For example:
|
|
79
|
-
|
|
80
|
-
- Mentioning "search returns irrelevant results" โ `search-optimizer`
|
|
81
|
-
- Discussing "import showing 0 messages" โ `import-debugger`
|
|
82
|
-
- Working on "release v1.2.0" โ `open-source-maintainer`
|
|
83
|
-
- Asking about "Qdrant collection errors" โ `qdrant-specialist`
|
|
84
|
-
- Requesting "test all reflection functionality" โ `reflect-tester`
|
|
85
|
-
- Searching "past conversations about X" โ `reflection-specialist`
|
|
86
|
-
|
|
87
|
-
### Agent Capabilities
|
|
88
|
-
|
|
89
|
-
Each agent has:
|
|
90
|
-
- **Focused expertise** in their domain
|
|
91
|
-
- **Specific tool permissions** for their tasks
|
|
92
|
-
- **Contextual knowledge** about the project
|
|
93
|
-
- **Best practices** for their area
|
|
94
|
-
|
|
95
|
-
### Working with Multiple Agents
|
|
96
|
-
|
|
97
|
-
Agents can collaborate on complex issues:
|
|
98
|
-
|
|
99
|
-
```
|
|
100
|
-
User: "Search is slow and returning poor results after import"
|
|
101
|
-
โ import-debugger checks data quality
|
|
102
|
-
โ qdrant-specialist optimizes collection settings
|
|
103
|
-
โ search-optimizer tunes similarity thresholds
|
|
104
|
-
โ performance-tuner profiles the entire pipeline
|
|
105
|
-
```
|
|
106
|
-
|
|
107
|
-
## Creating New Agents
|
|
108
|
-
|
|
109
|
-
To add a new specialized agent:
|
|
110
|
-
|
|
111
|
-
1. Create a new `.md` file in this directory
|
|
112
|
-
2. Use the following template:
|
|
113
|
-
|
|
114
|
-
```markdown
|
|
115
|
-
---
|
|
116
|
-
name: agent-name
|
|
117
|
-
description: Brief description for proactive activation
|
|
118
|
-
tools: Read, Write, Edit, Bash, Grep, Glob, LS, WebFetch
|
|
119
|
-
---
|
|
120
|
-
|
|
121
|
-
You are a [role] for the Claude Self Reflect project. Your expertise covers [domains].
|
|
122
|
-
|
|
123
|
-
## Project Context
|
|
124
|
-
[Specific project knowledge relevant to this agent]
|
|
125
|
-
|
|
126
|
-
## Key Responsibilities
|
|
127
|
-
[Numbered list of main tasks]
|
|
128
|
-
|
|
129
|
-
## Essential Commands/Patterns
|
|
130
|
-
[Code blocks with common operations]
|
|
131
|
-
|
|
132
|
-
## Best Practices
|
|
133
|
-
[Domain-specific guidelines]
|
|
134
|
-
```
|
|
135
|
-
|
|
136
|
-
3. Update this README with the new agent
|
|
137
|
-
4. Test the agent activation with relevant prompts
|
|
138
|
-
|
|
139
|
-
## Agent Development Guidelines
|
|
140
|
-
|
|
141
|
-
- **Be specific**: Agents should have clear, focused roles
|
|
142
|
-
- **Include examples**: Provide code snippets and commands
|
|
143
|
-
- **Stay current**: Update agents as the project evolves
|
|
144
|
-
- **Cross-reference**: Mention when to use other agents
|
|
145
|
-
- **Be helpful**: Include troubleshooting sections
|
|
146
|
-
|
|
147
|
-
## Maintenance
|
|
148
|
-
|
|
149
|
-
Agents should be reviewed and updated:
|
|
150
|
-
- When new features are added
|
|
151
|
-
- When common issues emerge
|
|
152
|
-
- When best practices change
|
|
153
|
-
- During major version updates
|
|
154
|
-
|
|
155
|
-
Remember: These agents are here to help contributors work more effectively on the Claude Self Reflect project. They embody the project's expertise and best practices.
|