claude-self-reflect 2.4.15 ā 2.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/claude-self-reflect-test.md +528 -0
- package/.claude/agents/import-debugger.md +4 -1
- package/.claude/agents/mcp-integration.md +4 -2
- package/.claude/agents/qdrant-specialist.md +6 -3
- package/Dockerfile.streaming-importer +21 -8
- package/Dockerfile.watcher +6 -2
- package/docker-compose.yaml +40 -5
- package/installer/setup-wizard-docker.js +30 -5
- package/mcp-server/pyproject.toml +1 -1
- package/mcp-server/src/server.py +246 -7
- package/mcp-server/src/utils.py +21 -2
- package/package.json +1 -1
- package/scripts/import-conversations-enhanced.py +672 -0
- package/scripts/import-conversations-unified.py +15 -6
- package/scripts/import-watcher.py +0 -88
package/docker-compose.yaml
CHANGED
|
@@ -18,12 +18,13 @@ services:
|
|
|
18
18
|
- "${QDRANT_PORT:-6333}:6333"
|
|
19
19
|
volumes:
|
|
20
20
|
- qdrant_data:/qdrant/storage
|
|
21
|
+
- ./config/qdrant-config.yaml:/qdrant/config/config.yaml:ro
|
|
21
22
|
environment:
|
|
22
23
|
- QDRANT__LOG_LEVEL=INFO
|
|
23
24
|
- QDRANT__SERVICE__HTTP_PORT=6333
|
|
24
25
|
restart: unless-stopped
|
|
25
|
-
mem_limit: ${QDRANT_MEMORY:-
|
|
26
|
-
memswap_limit: ${QDRANT_MEMORY:-
|
|
26
|
+
mem_limit: ${QDRANT_MEMORY:-4g}
|
|
27
|
+
memswap_limit: ${QDRANT_MEMORY:-4g}
|
|
27
28
|
|
|
28
29
|
# One-time import service (runs once then exits)
|
|
29
30
|
importer:
|
|
@@ -53,7 +54,7 @@ services:
|
|
|
53
54
|
profiles: ["import"]
|
|
54
55
|
command: python /scripts/import-conversations-unified.py
|
|
55
56
|
|
|
56
|
-
# Continuous watcher service (optional)
|
|
57
|
+
# Continuous watcher service (optional) - DEPRECATED, use streaming-importer
|
|
57
58
|
watcher:
|
|
58
59
|
build:
|
|
59
60
|
context: .
|
|
@@ -73,10 +74,44 @@ services:
|
|
|
73
74
|
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
74
75
|
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
|
|
75
76
|
- VOYAGE_KEY=${VOYAGE_KEY:-}
|
|
76
|
-
- PREFER_LOCAL_EMBEDDINGS=${PREFER_LOCAL_EMBEDDINGS:-
|
|
77
|
+
- PREFER_LOCAL_EMBEDDINGS=${PREFER_LOCAL_EMBEDDINGS:-true}
|
|
77
78
|
- EMBEDDING_MODEL=${EMBEDDING_MODEL:-voyage-3}
|
|
78
|
-
- WATCH_INTERVAL=${WATCH_INTERVAL:-
|
|
79
|
+
- WATCH_INTERVAL=${WATCH_INTERVAL:-5}
|
|
80
|
+
- MAX_MEMORY_MB=${MAX_MEMORY_MB:-250}
|
|
81
|
+
- CHUNK_SIZE=${CHUNK_SIZE:-5}
|
|
82
|
+
- PYTHONUNBUFFERED=1
|
|
83
|
+
restart: unless-stopped
|
|
84
|
+
profiles: ["watch-old"]
|
|
85
|
+
mem_limit: 500m
|
|
86
|
+
memswap_limit: 500m
|
|
87
|
+
|
|
88
|
+
# Streaming importer service - Low memory continuous import
|
|
89
|
+
streaming-importer:
|
|
90
|
+
build:
|
|
91
|
+
context: .
|
|
92
|
+
dockerfile: Dockerfile.streaming-importer
|
|
93
|
+
container_name: claude-reflection-streaming
|
|
94
|
+
depends_on:
|
|
95
|
+
- init-permissions
|
|
96
|
+
- qdrant
|
|
97
|
+
volumes:
|
|
98
|
+
- ${CLAUDE_LOGS_PATH:-~/.claude/projects}:/logs:ro
|
|
99
|
+
- ${CONFIG_PATH:-~/.claude-self-reflect/config}:/config
|
|
100
|
+
- ./scripts:/scripts:ro
|
|
101
|
+
environment:
|
|
102
|
+
- QDRANT_URL=http://qdrant:6333
|
|
103
|
+
- STATE_FILE=/config/imported-files.json
|
|
104
|
+
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
|
|
105
|
+
- VOYAGE_KEY=${VOYAGE_KEY:-}
|
|
106
|
+
- PREFER_LOCAL_EMBEDDINGS=${PREFER_LOCAL_EMBEDDINGS:-true}
|
|
107
|
+
- WATCH_INTERVAL=${WATCH_INTERVAL:-5} # Testing with 5 second interval
|
|
108
|
+
- MAX_MEMORY_MB=${MAX_MEMORY_MB:-350} # Total memory including model
|
|
109
|
+
- OPERATIONAL_MEMORY_MB=${OPERATIONAL_MEMORY_MB:-100} # Memory for operations (increased for large file handling)
|
|
110
|
+
- CHUNK_SIZE=${CHUNK_SIZE:-5}
|
|
79
111
|
- PYTHONUNBUFFERED=1
|
|
112
|
+
- LOGS_DIR=/logs
|
|
113
|
+
- FASTEMBED_CACHE_PATH=/root/.cache/fastembed
|
|
114
|
+
- CURRENT_PROJECT_PATH=${PWD} # Pass current project path for prioritization
|
|
80
115
|
restart: unless-stopped
|
|
81
116
|
profiles: ["watch"]
|
|
82
117
|
mem_limit: 1g
|
|
@@ -340,12 +340,32 @@ function showManualConfig(mcpScript) {
|
|
|
340
340
|
}
|
|
341
341
|
|
|
342
342
|
async function importConversations() {
|
|
343
|
-
console.log('\nš
|
|
343
|
+
console.log('\nš Checking conversation baseline...');
|
|
344
344
|
|
|
345
|
-
|
|
345
|
+
// Check if baseline exists by looking for imported files state
|
|
346
|
+
const stateFile = path.join(configDir, 'imported-files.json');
|
|
347
|
+
let hasBaseline = false;
|
|
348
|
+
|
|
349
|
+
try {
|
|
350
|
+
if (fs.existsSync(stateFile)) {
|
|
351
|
+
const state = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
|
|
352
|
+
hasBaseline = state.imported_files && Object.keys(state.imported_files).length > 0;
|
|
353
|
+
}
|
|
354
|
+
} catch (e) {
|
|
355
|
+
// State file doesn't exist or is invalid
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
if (!hasBaseline) {
|
|
359
|
+
console.log('\nā ļø No baseline detected. Initial import STRONGLY recommended.');
|
|
360
|
+
console.log(' Without this, historical conversations won\'t be searchable.');
|
|
361
|
+
console.log(' The watcher only handles NEW conversations going forward.');
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
const answer = await question('\nImport existing Claude conversations? (y/n) [recommended: y]: ');
|
|
346
365
|
|
|
347
366
|
if (answer.toLowerCase() === 'y') {
|
|
348
|
-
console.log('š Starting import
|
|
367
|
+
console.log('š Starting baseline import...');
|
|
368
|
+
console.log(' This ensures ALL your conversations are searchable');
|
|
349
369
|
console.log(' This may take a few minutes depending on your conversation history');
|
|
350
370
|
|
|
351
371
|
try {
|
|
@@ -353,12 +373,17 @@ async function importConversations() {
|
|
|
353
373
|
cwd: projectRoot,
|
|
354
374
|
stdio: 'inherit'
|
|
355
375
|
});
|
|
356
|
-
console.log('\nā
|
|
376
|
+
console.log('\nā
Baseline import completed!');
|
|
377
|
+
console.log(' Historical conversations are now searchable');
|
|
357
378
|
} catch {
|
|
358
379
|
console.log('\nā ļø Import had some issues, but you can continue');
|
|
359
380
|
}
|
|
360
381
|
} else {
|
|
361
|
-
console.log('
|
|
382
|
+
console.log('\nā WARNING: Skipping baseline import means:');
|
|
383
|
+
console.log(' ⢠Historical conversations will NOT be searchable');
|
|
384
|
+
console.log(' ⢠Only NEW conversations from now on will be indexed');
|
|
385
|
+
console.log(' ⢠You may see "BASELINE_NEEDED" warnings in logs');
|
|
386
|
+
console.log('\nš You can run baseline import later with:');
|
|
362
387
|
console.log(' docker compose run --rm importer');
|
|
363
388
|
}
|
|
364
389
|
}
|
package/mcp-server/src/server.py
CHANGED
|
@@ -108,9 +108,16 @@ async def get_all_collections() -> List[str]:
|
|
|
108
108
|
return [c.name for c in collections.collections
|
|
109
109
|
if c.name.endswith('_voyage') or c.name.endswith('_local') or c.name.startswith('reflections')]
|
|
110
110
|
|
|
111
|
-
async def generate_embedding(text: str) -> List[float]:
|
|
112
|
-
"""Generate embedding using configured provider.
|
|
113
|
-
|
|
111
|
+
async def generate_embedding(text: str, force_type: Optional[str] = None) -> List[float]:
|
|
112
|
+
"""Generate embedding using configured provider or forced type.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
text: Text to embed
|
|
116
|
+
force_type: Force specific embedding type ('local' or 'voyage')
|
|
117
|
+
"""
|
|
118
|
+
use_local = force_type == 'local' if force_type else (PREFER_LOCAL_EMBEDDINGS or not voyage_client)
|
|
119
|
+
|
|
120
|
+
if use_local:
|
|
114
121
|
# Use local embeddings
|
|
115
122
|
if not local_embedding_model:
|
|
116
123
|
raise ValueError("Local embedding model not initialized")
|
|
@@ -123,6 +130,8 @@ async def generate_embedding(text: str) -> List[float]:
|
|
|
123
130
|
return embeddings[0].tolist()
|
|
124
131
|
else:
|
|
125
132
|
# Use Voyage AI
|
|
133
|
+
if not voyage_client:
|
|
134
|
+
raise ValueError("Voyage client not initialized")
|
|
126
135
|
result = voyage_client.embed(
|
|
127
136
|
texts=[text],
|
|
128
137
|
model="voyage-3-large",
|
|
@@ -218,10 +227,10 @@ async def reflect_on_past(
|
|
|
218
227
|
await ctx.debug(f"DECAY_WEIGHT: {DECAY_WEIGHT}, DECAY_SCALE_DAYS: {DECAY_SCALE_DAYS}")
|
|
219
228
|
|
|
220
229
|
try:
|
|
221
|
-
#
|
|
222
|
-
timing_info['
|
|
223
|
-
|
|
224
|
-
timing_info['
|
|
230
|
+
# We'll generate embeddings on-demand per collection type
|
|
231
|
+
timing_info['embedding_prep_start'] = time.time()
|
|
232
|
+
query_embeddings = {} # Cache embeddings by type
|
|
233
|
+
timing_info['embedding_prep_end'] = time.time()
|
|
225
234
|
|
|
226
235
|
# Get all collections
|
|
227
236
|
timing_info['get_collections_start'] = time.time()
|
|
@@ -237,6 +246,7 @@ async def reflect_on_past(
|
|
|
237
246
|
# Generate the collection name pattern for this project using normalized name
|
|
238
247
|
normalized_name = normalize_project_name(target_project)
|
|
239
248
|
project_hash = hashlib.md5(normalized_name.encode()).hexdigest()[:8]
|
|
249
|
+
# Search BOTH local and voyage collections for this project
|
|
240
250
|
project_collections = [
|
|
241
251
|
c for c in all_collections
|
|
242
252
|
if c.startswith(f"conv_{project_hash}_")
|
|
@@ -276,6 +286,18 @@ async def reflect_on_past(
|
|
|
276
286
|
)
|
|
277
287
|
|
|
278
288
|
try:
|
|
289
|
+
# Determine embedding type for this collection
|
|
290
|
+
embedding_type_for_collection = 'voyage' if collection_name.endswith('_voyage') else 'local'
|
|
291
|
+
|
|
292
|
+
# Generate or retrieve cached embedding for this type
|
|
293
|
+
if embedding_type_for_collection not in query_embeddings:
|
|
294
|
+
try:
|
|
295
|
+
query_embeddings[embedding_type_for_collection] = await generate_embedding(query, force_type=embedding_type_for_collection)
|
|
296
|
+
except Exception as e:
|
|
297
|
+
await ctx.debug(f"Failed to generate {embedding_type_for_collection} embedding: {e}")
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
query_embedding = query_embeddings[embedding_type_for_collection]
|
|
279
301
|
if should_use_decay and USE_NATIVE_DECAY and NATIVE_DECAY_AVAILABLE:
|
|
280
302
|
# Use native Qdrant decay with newer API
|
|
281
303
|
await ctx.debug(f"Using NATIVE Qdrant decay (new API) for {collection_name}")
|
|
@@ -887,6 +909,223 @@ async def get_more_results(
|
|
|
887
909
|
return response
|
|
888
910
|
|
|
889
911
|
|
|
912
|
+
@mcp.tool()
|
|
913
|
+
async def search_by_file(
|
|
914
|
+
ctx: Context,
|
|
915
|
+
file_path: str = Field(description="The file path to search for in conversations"),
|
|
916
|
+
limit: int = Field(default=10, description="Maximum number of results to return"),
|
|
917
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. Use 'all' to search across all projects.")
|
|
918
|
+
) -> str:
|
|
919
|
+
"""Search for conversations that analyzed a specific file."""
|
|
920
|
+
global qdrant_client
|
|
921
|
+
|
|
922
|
+
# Normalize file path
|
|
923
|
+
normalized_path = file_path.replace("\\", "/").replace("/Users/", "~/")
|
|
924
|
+
|
|
925
|
+
# Determine which collections to search
|
|
926
|
+
# If no project specified, search all collections
|
|
927
|
+
collections = await get_all_collections() if not project else []
|
|
928
|
+
|
|
929
|
+
if project and project != 'all':
|
|
930
|
+
# Filter collections for specific project
|
|
931
|
+
project_hash = hashlib.md5(project.encode()).hexdigest()[:8]
|
|
932
|
+
collection_prefix = f"conv_{project_hash}_"
|
|
933
|
+
collections = [c for c in await get_all_collections() if c.startswith(collection_prefix)]
|
|
934
|
+
elif project == 'all':
|
|
935
|
+
collections = await get_all_collections()
|
|
936
|
+
|
|
937
|
+
if not collections:
|
|
938
|
+
return "<search_by_file>\n<error>No collections found to search</error>\n</search_by_file>"
|
|
939
|
+
|
|
940
|
+
# Prepare results
|
|
941
|
+
all_results = []
|
|
942
|
+
|
|
943
|
+
for collection_name in collections:
|
|
944
|
+
try:
|
|
945
|
+
# Use scroll to get all points and filter manually
|
|
946
|
+
# Qdrant's array filtering can be tricky, so we'll filter in code
|
|
947
|
+
scroll_result = await qdrant_client.scroll(
|
|
948
|
+
collection_name=collection_name,
|
|
949
|
+
limit=1000, # Get a batch
|
|
950
|
+
with_payload=True
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
# Filter results that contain the file
|
|
954
|
+
for point in scroll_result[0]:
|
|
955
|
+
payload = point.payload
|
|
956
|
+
files_analyzed = payload.get('files_analyzed', [])
|
|
957
|
+
files_edited = payload.get('files_edited', [])
|
|
958
|
+
|
|
959
|
+
if normalized_path in files_analyzed or normalized_path in files_edited:
|
|
960
|
+
all_results.append({
|
|
961
|
+
'score': 1.0, # File match is always 1.0
|
|
962
|
+
'payload': payload,
|
|
963
|
+
'collection': collection_name
|
|
964
|
+
})
|
|
965
|
+
|
|
966
|
+
except Exception as e:
|
|
967
|
+
continue
|
|
968
|
+
|
|
969
|
+
# Sort by timestamp (newest first)
|
|
970
|
+
all_results.sort(key=lambda x: x['payload'].get('timestamp', ''), reverse=True)
|
|
971
|
+
|
|
972
|
+
# Format results
|
|
973
|
+
if not all_results:
|
|
974
|
+
return f"""<search_by_file>
|
|
975
|
+
<query>{file_path}</query>
|
|
976
|
+
<normalized_path>{normalized_path}</normalized_path>
|
|
977
|
+
<message>No conversations found that analyzed this file</message>
|
|
978
|
+
</search_by_file>"""
|
|
979
|
+
|
|
980
|
+
results_text = []
|
|
981
|
+
for i, result in enumerate(all_results[:limit]):
|
|
982
|
+
payload = result['payload']
|
|
983
|
+
timestamp = payload.get('timestamp', 'Unknown')
|
|
984
|
+
conversation_id = payload.get('conversation_id', 'Unknown')
|
|
985
|
+
project = payload.get('project', 'Unknown')
|
|
986
|
+
text_preview = payload.get('text', '')[:200] + '...' if len(payload.get('text', '')) > 200 else payload.get('text', '')
|
|
987
|
+
|
|
988
|
+
# Check if file was edited or just read
|
|
989
|
+
action = "edited" if normalized_path in payload.get('files_edited', []) else "analyzed"
|
|
990
|
+
|
|
991
|
+
# Get related tools used
|
|
992
|
+
tool_summary = payload.get('tool_summary', {})
|
|
993
|
+
tools_used = ', '.join(f"{tool}({count})" for tool, count in tool_summary.items())
|
|
994
|
+
|
|
995
|
+
results_text.append(f"""<result rank="{i+1}">
|
|
996
|
+
<conversation_id>{conversation_id}</conversation_id>
|
|
997
|
+
<project>{project}</project>
|
|
998
|
+
<timestamp>{timestamp}</timestamp>
|
|
999
|
+
<action>{action}</action>
|
|
1000
|
+
<tools_used>{tools_used}</tools_used>
|
|
1001
|
+
<preview>{text_preview}</preview>
|
|
1002
|
+
</result>""")
|
|
1003
|
+
|
|
1004
|
+
return f"""<search_by_file>
|
|
1005
|
+
<query>{file_path}</query>
|
|
1006
|
+
<normalized_path>{normalized_path}</normalized_path>
|
|
1007
|
+
<count>{len(all_results)}</count>
|
|
1008
|
+
<results>
|
|
1009
|
+
{''.join(results_text)}
|
|
1010
|
+
</results>
|
|
1011
|
+
</search_by_file>"""
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
@mcp.tool()
|
|
1015
|
+
async def search_by_concept(
|
|
1016
|
+
ctx: Context,
|
|
1017
|
+
concept: str = Field(description="The concept to search for (e.g., 'security', 'docker', 'testing')"),
|
|
1018
|
+
include_files: bool = Field(default=True, description="Include file information in results"),
|
|
1019
|
+
limit: int = Field(default=10, description="Maximum number of results to return"),
|
|
1020
|
+
project: Optional[str] = Field(default=None, description="Search specific project only. Use 'all' to search across all projects.")
|
|
1021
|
+
) -> str:
|
|
1022
|
+
"""Search for conversations about a specific development concept."""
|
|
1023
|
+
global qdrant_client
|
|
1024
|
+
|
|
1025
|
+
# Generate embedding for the concept
|
|
1026
|
+
embedding = await generate_embedding(concept)
|
|
1027
|
+
|
|
1028
|
+
# Determine which collections to search
|
|
1029
|
+
# If no project specified, search all collections
|
|
1030
|
+
collections = await get_all_collections() if not project else []
|
|
1031
|
+
|
|
1032
|
+
if project and project != 'all':
|
|
1033
|
+
# Filter collections for specific project
|
|
1034
|
+
project_hash = hashlib.md5(project.encode()).hexdigest()[:8]
|
|
1035
|
+
collection_prefix = f"conv_{project_hash}_"
|
|
1036
|
+
collections = [c for c in await get_all_collections() if c.startswith(collection_prefix)]
|
|
1037
|
+
elif project == 'all':
|
|
1038
|
+
collections = await get_all_collections()
|
|
1039
|
+
|
|
1040
|
+
if not collections:
|
|
1041
|
+
return "<search_by_concept>\n<error>No collections found to search</error>\n</search_by_concept>"
|
|
1042
|
+
|
|
1043
|
+
# Search all collections
|
|
1044
|
+
all_results = []
|
|
1045
|
+
|
|
1046
|
+
for collection_name in collections:
|
|
1047
|
+
try:
|
|
1048
|
+
# Hybrid search: semantic + concept filter
|
|
1049
|
+
results = await qdrant_client.search(
|
|
1050
|
+
collection_name=collection_name,
|
|
1051
|
+
query_vector=embedding,
|
|
1052
|
+
query_filter=models.Filter(
|
|
1053
|
+
should=[
|
|
1054
|
+
models.FieldCondition(
|
|
1055
|
+
key="concepts",
|
|
1056
|
+
match=models.MatchAny(any=[concept.lower()])
|
|
1057
|
+
)
|
|
1058
|
+
]
|
|
1059
|
+
),
|
|
1060
|
+
limit=limit * 2, # Get more results for better filtering
|
|
1061
|
+
with_payload=True
|
|
1062
|
+
)
|
|
1063
|
+
|
|
1064
|
+
for point in results:
|
|
1065
|
+
payload = point.payload
|
|
1066
|
+
# Boost score if concept is in the concepts list
|
|
1067
|
+
score_boost = 0.2 if concept.lower() in payload.get('concepts', []) else 0.0
|
|
1068
|
+
all_results.append({
|
|
1069
|
+
'score': float(point.score) + score_boost,
|
|
1070
|
+
'payload': payload,
|
|
1071
|
+
'collection': collection_name
|
|
1072
|
+
})
|
|
1073
|
+
|
|
1074
|
+
except Exception as e:
|
|
1075
|
+
continue
|
|
1076
|
+
|
|
1077
|
+
# Sort by score and limit
|
|
1078
|
+
all_results.sort(key=lambda x: x['score'], reverse=True)
|
|
1079
|
+
all_results = all_results[:limit]
|
|
1080
|
+
|
|
1081
|
+
# Format results
|
|
1082
|
+
if not all_results:
|
|
1083
|
+
return f"""<search_by_concept>
|
|
1084
|
+
<concept>{concept}</concept>
|
|
1085
|
+
<message>No conversations found about this concept</message>
|
|
1086
|
+
</search_by_concept>"""
|
|
1087
|
+
|
|
1088
|
+
results_text = []
|
|
1089
|
+
for i, result in enumerate(all_results):
|
|
1090
|
+
payload = result['payload']
|
|
1091
|
+
score = result['score']
|
|
1092
|
+
timestamp = payload.get('timestamp', 'Unknown')
|
|
1093
|
+
conversation_id = payload.get('conversation_id', 'Unknown')
|
|
1094
|
+
project = payload.get('project', 'Unknown')
|
|
1095
|
+
concepts = payload.get('concepts', [])
|
|
1096
|
+
|
|
1097
|
+
# Get text preview
|
|
1098
|
+
text_preview = payload.get('text', '')[:200] + '...' if len(payload.get('text', '')) > 200 else payload.get('text', '')
|
|
1099
|
+
|
|
1100
|
+
# File information
|
|
1101
|
+
files_info = ""
|
|
1102
|
+
if include_files:
|
|
1103
|
+
files_analyzed = payload.get('files_analyzed', [])[:5]
|
|
1104
|
+
if files_analyzed:
|
|
1105
|
+
files_info = f"\n<files_analyzed>{', '.join(files_analyzed)}</files_analyzed>"
|
|
1106
|
+
|
|
1107
|
+
# Related concepts
|
|
1108
|
+
related_concepts = [c for c in concepts if c != concept.lower()][:5]
|
|
1109
|
+
|
|
1110
|
+
results_text.append(f"""<result rank="{i+1}">
|
|
1111
|
+
<score>{score:.3f}</score>
|
|
1112
|
+
<conversation_id>{conversation_id}</conversation_id>
|
|
1113
|
+
<project>{project}</project>
|
|
1114
|
+
<timestamp>{timestamp}</timestamp>
|
|
1115
|
+
<concepts>{', '.join(concepts)}</concepts>
|
|
1116
|
+
<related_concepts>{', '.join(related_concepts)}</related_concepts>{files_info}
|
|
1117
|
+
<preview>{text_preview}</preview>
|
|
1118
|
+
</result>""")
|
|
1119
|
+
|
|
1120
|
+
return f"""<search_by_concept>
|
|
1121
|
+
<concept>{concept}</concept>
|
|
1122
|
+
<count>{len(all_results)}</count>
|
|
1123
|
+
<results>
|
|
1124
|
+
{''.join(results_text)}
|
|
1125
|
+
</results>
|
|
1126
|
+
</search_by_concept>"""
|
|
1127
|
+
|
|
1128
|
+
|
|
890
1129
|
# Debug output
|
|
891
1130
|
print(f"[DEBUG] FastMCP server created with name: {mcp.name}")
|
|
892
1131
|
|
package/mcp-server/src/utils.py
CHANGED
|
@@ -9,6 +9,8 @@ def normalize_project_name(project_path: str) -> str:
|
|
|
9
9
|
|
|
10
10
|
Handles various path formats:
|
|
11
11
|
- Claude logs format: -Users-kyle-Code-claude-self-reflect -> claude-self-reflect
|
|
12
|
+
- File paths in Claude logs: /path/to/-Users-kyle-Code-claude-self-reflect/file.jsonl -> claude-self-reflect
|
|
13
|
+
- Regular file paths: /path/to/project/file.txt -> project
|
|
12
14
|
- Regular paths: /path/to/project -> project
|
|
13
15
|
- Already normalized: project -> project
|
|
14
16
|
|
|
@@ -49,5 +51,22 @@ def normalize_project_name(project_path: str) -> str:
|
|
|
49
51
|
# Fallback: just use the last component
|
|
50
52
|
return path_parts[-1] if path_parts else project_path
|
|
51
53
|
|
|
52
|
-
#
|
|
53
|
-
|
|
54
|
+
# Check if this is a file path that contains a Claude logs directory
|
|
55
|
+
# Pattern: /path/to/-Users-...-projects-..../filename
|
|
56
|
+
path_obj = Path(project_path)
|
|
57
|
+
|
|
58
|
+
# Look for a parent directory that starts with dash (Claude logs format)
|
|
59
|
+
for parent in path_obj.parents:
|
|
60
|
+
parent_name = parent.name
|
|
61
|
+
if parent_name.startswith("-"):
|
|
62
|
+
# Found a Claude logs directory, process it
|
|
63
|
+
return normalize_project_name(parent_name)
|
|
64
|
+
|
|
65
|
+
# Handle regular paths - if it's a file, get the parent directory
|
|
66
|
+
# Otherwise use the directory/project name itself
|
|
67
|
+
if path_obj.suffix: # It's a file (has an extension)
|
|
68
|
+
# Use the parent directory name
|
|
69
|
+
return path_obj.parent.name
|
|
70
|
+
else:
|
|
71
|
+
# Use the directory name itself
|
|
72
|
+
return path_obj.name
|