claude-self-reflect 2.5.13 → 2.5.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/mcp-server/src/server.py
CHANGED
|
@@ -168,11 +168,13 @@ async def update_indexing_status():
|
|
|
168
168
|
if imported_files_path and imported_files_path.exists():
|
|
169
169
|
with open(imported_files_path, 'r') as f:
|
|
170
170
|
imported_data = json.load(f)
|
|
171
|
-
# The
|
|
172
|
-
#
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
171
|
+
# The actual structure has imported_files and file_metadata at the top level
|
|
172
|
+
# NOT nested under stream_position as previously assumed
|
|
173
|
+
imported_files_dict = imported_data.get("imported_files", {})
|
|
174
|
+
file_metadata = imported_data.get("file_metadata", {})
|
|
175
|
+
|
|
176
|
+
# Convert dict keys to list for compatibility with existing logic
|
|
177
|
+
imported_files_list = list(imported_files_dict.keys())
|
|
176
178
|
|
|
177
179
|
# Count files that have been imported
|
|
178
180
|
for file_path in jsonl_files:
|
|
@@ -714,18 +716,15 @@ async def reflect_on_past(
|
|
|
714
716
|
# Add upfront summary for immediate visibility (before collapsible XML)
|
|
715
717
|
upfront_summary = ""
|
|
716
718
|
|
|
717
|
-
# Show indexing status prominently
|
|
718
|
-
if indexing_status["percentage"] < 95.0:
|
|
719
|
-
upfront_summary += f"📊 INDEXING: {indexing_status['indexed_conversations']}/{indexing_status['total_conversations']} conversations ({indexing_status['percentage']:.1f}% complete, {indexing_status['backlog_count']} pending)\n"
|
|
720
|
-
|
|
721
719
|
# Show result summary
|
|
722
720
|
if all_results:
|
|
723
721
|
score_info = "high" if all_results[0].score >= 0.85 else "good" if all_results[0].score >= 0.75 else "partial"
|
|
724
722
|
upfront_summary += f"🎯 RESULTS: {len(all_results)} matches ({score_info} relevance, top score: {all_results[0].score:.3f})\n"
|
|
725
723
|
|
|
726
|
-
# Show performance
|
|
724
|
+
# Show performance with indexing status inline
|
|
727
725
|
total_time = time.time() - start_time
|
|
728
|
-
|
|
726
|
+
indexing_info = f" | 📊 {indexing_status['indexed_conversations']}/{indexing_status['total_conversations']} indexed" if indexing_status["percentage"] < 100.0 else ""
|
|
727
|
+
upfront_summary += f"⚡ PERFORMANCE: {int(total_time * 1000)}ms ({len(collections_to_search)} collections searched{indexing_info})\n"
|
|
729
728
|
else:
|
|
730
729
|
upfront_summary += f"❌ NO RESULTS: No conversations found matching '{query}'\n"
|
|
731
730
|
|
package/package.json
CHANGED
|
@@ -545,7 +545,7 @@ def import_project(project_path: Path, collection_name: str, state: dict) -> int
|
|
|
545
545
|
"conversation_id": conversation_id,
|
|
546
546
|
"chunk_index": chunk["chunk_index"],
|
|
547
547
|
"timestamp": created_at,
|
|
548
|
-
"project": project_path.name,
|
|
548
|
+
"project": normalize_project_name(project_path.name),
|
|
549
549
|
"start_role": chunk["start_role"]
|
|
550
550
|
}
|
|
551
551
|
# Add metadata fields
|