claude-self-reflect 3.2.4 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/.claude/agents/claude-self-reflect-test.md +595 -528
  2. package/.claude/agents/reflection-specialist.md +59 -3
  3. package/README.md +14 -5
  4. package/mcp-server/run-mcp.sh +49 -5
  5. package/mcp-server/src/app_context.py +64 -0
  6. package/mcp-server/src/config.py +57 -0
  7. package/mcp-server/src/connection_pool.py +286 -0
  8. package/mcp-server/src/decay_manager.py +106 -0
  9. package/mcp-server/src/embedding_manager.py +64 -40
  10. package/mcp-server/src/embeddings_old.py +141 -0
  11. package/mcp-server/src/models.py +64 -0
  12. package/mcp-server/src/parallel_search.py +371 -0
  13. package/mcp-server/src/project_resolver.py +5 -0
  14. package/mcp-server/src/reflection_tools.py +206 -0
  15. package/mcp-server/src/rich_formatting.py +196 -0
  16. package/mcp-server/src/search_tools.py +826 -0
  17. package/mcp-server/src/server.py +127 -1720
  18. package/mcp-server/src/temporal_design.py +132 -0
  19. package/mcp-server/src/temporal_tools.py +597 -0
  20. package/mcp-server/src/temporal_utils.py +384 -0
  21. package/mcp-server/src/utils.py +150 -67
  22. package/package.json +10 -1
  23. package/scripts/add-timestamp-indexes.py +134 -0
  24. package/scripts/check-collections.py +29 -0
  25. package/scripts/debug-august-parsing.py +76 -0
  26. package/scripts/debug-import-single.py +91 -0
  27. package/scripts/debug-project-resolver.py +82 -0
  28. package/scripts/debug-temporal-tools.py +135 -0
  29. package/scripts/delta-metadata-update.py +547 -0
  30. package/scripts/import-conversations-unified.py +53 -2
  31. package/scripts/precompact-hook.sh +33 -0
  32. package/scripts/streaming-watcher.py +1443 -0
  33. package/scripts/utils.py +39 -0
@@ -0,0 +1,384 @@
1
+ """
2
+ Temporal utilities for Claude Self Reflect.
3
+ Handles session detection, time parsing, and temporal query helpers.
4
+ """
5
+
6
+ import re
7
+ from datetime import datetime, timedelta, timezone
8
+ from typing import List, Dict, Any, Optional, Tuple, Union
9
+ from functools import lru_cache
10
+ import logging
11
+ from dataclasses import dataclass
12
+ from collections import defaultdict
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ @dataclass
17
+ class WorkSession:
18
+ """Represents a work session - a group of related conversations."""
19
+ session_id: str
20
+ start_time: datetime
21
+ end_time: datetime
22
+ conversation_ids: List[str]
23
+ project: str
24
+ duration_minutes: int
25
+ main_topics: List[str]
26
+ files_touched: List[str]
27
+ message_count: int
28
+
29
+ def to_dict(self) -> Dict[str, Any]:
30
+ """Convert to dictionary for JSON serialization."""
31
+ return {
32
+ 'session_id': self.session_id,
33
+ 'start_time': self.start_time.isoformat(),
34
+ 'end_time': self.end_time.isoformat(),
35
+ 'conversation_ids': self.conversation_ids,
36
+ 'project': self.project,
37
+ 'duration_minutes': self.duration_minutes,
38
+ 'main_topics': self.main_topics,
39
+ 'files_touched': self.files_touched,
40
+ 'message_count': self.message_count
41
+ }
42
+
43
+
44
+ class SessionDetector:
45
+ """Detects and groups conversations into work sessions."""
46
+
47
+ def __init__(self,
48
+ time_gap_minutes: int = 30,
49
+ min_session_chunks: int = 1,
50
+ merge_similar_topics: bool = True):
51
+ """
52
+ Initialize session detector.
53
+
54
+ Args:
55
+ time_gap_minutes: Minutes of inactivity to split sessions
56
+ min_session_chunks: Minimum chunks to constitute a session
57
+ merge_similar_topics: Whether to merge adjacent similar topics
58
+ """
59
+ self.time_gap = timedelta(minutes=time_gap_minutes)
60
+ self.min_chunks = min_session_chunks
61
+ self.merge_similar = merge_similar_topics
62
+
63
+ def detect_sessions(self, chunks: List[Dict[str, Any]]) -> List[WorkSession]:
64
+ """
65
+ Group conversation chunks into work sessions.
66
+
67
+ Args:
68
+ chunks: List of conversation chunks with metadata
69
+
70
+ Returns:
71
+ List of WorkSession objects
72
+ """
73
+ if not chunks:
74
+ return []
75
+
76
+ # Sort by timestamp
77
+ sorted_chunks = sorted(chunks, key=lambda x: self._parse_timestamp(x.get('timestamp')))
78
+
79
+ sessions = []
80
+ current_session_chunks = []
81
+
82
+ for chunk in sorted_chunks:
83
+ chunk_time = self._parse_timestamp(chunk.get('timestamp'))
84
+
85
+ if not current_session_chunks:
86
+ current_session_chunks.append(chunk)
87
+ continue
88
+
89
+ last_time = self._parse_timestamp(current_session_chunks[-1].get('timestamp'))
90
+ time_gap = chunk_time - last_time
91
+
92
+ # Check if we should start a new session
93
+ if time_gap > self.time_gap or chunk.get('project') != current_session_chunks[-1].get('project'):
94
+ # Finalize current session
95
+ if len(current_session_chunks) >= self.min_chunks:
96
+ session = self._create_session(current_session_chunks)
97
+ if session:
98
+ sessions.append(session)
99
+ current_session_chunks = [chunk]
100
+ else:
101
+ current_session_chunks.append(chunk)
102
+
103
+ # Don't forget the last session
104
+ if len(current_session_chunks) >= self.min_chunks:
105
+ session = self._create_session(current_session_chunks)
106
+ if session:
107
+ sessions.append(session)
108
+
109
+ return sessions
110
+
111
+ def _create_session(self, chunks: List[Dict[str, Any]]) -> Optional[WorkSession]:
112
+ """Create a WorkSession from chunks."""
113
+ if not chunks:
114
+ return None
115
+
116
+ start_time = self._parse_timestamp(chunks[0].get('timestamp'))
117
+ end_time = self._parse_timestamp(chunks[-1].get('timestamp'))
118
+
119
+ # Aggregate metadata
120
+ conversation_ids = list(set(c.get('conversation_id') for c in chunks if c.get('conversation_id')))
121
+ files = []
122
+ topics = []
123
+ message_count = 0
124
+
125
+ for chunk in chunks:
126
+ if chunk.get('files_analyzed'):
127
+ files.extend(chunk['files_analyzed'])
128
+ if chunk.get('concepts'):
129
+ topics.extend(chunk['concepts'])
130
+ message_count += chunk.get('message_count', 1)
131
+
132
+ # Deduplicate and limit
133
+ files = list(set(files))[:20]
134
+
135
+ # Get most common topics
136
+ topic_counts = defaultdict(int)
137
+ for topic in topics:
138
+ topic_counts[topic] += 1
139
+ main_topics = sorted(topic_counts.keys(), key=lambda x: topic_counts[x], reverse=True)[:10]
140
+
141
+ duration_minutes = int((end_time - start_time).total_seconds() / 60)
142
+
143
+ # Generate session ID from start time and project
144
+ project = chunks[0].get('project', 'unknown')
145
+ session_id = f"{project}_{start_time.strftime('%Y%m%d_%H%M%S')}"
146
+
147
+ return WorkSession(
148
+ session_id=session_id,
149
+ start_time=start_time,
150
+ end_time=end_time,
151
+ conversation_ids=conversation_ids,
152
+ project=project,
153
+ duration_minutes=duration_minutes,
154
+ main_topics=main_topics,
155
+ files_touched=files,
156
+ message_count=message_count
157
+ )
158
+
159
+ def _parse_timestamp(self, timestamp_str: str) -> datetime:
160
+ """Parse timestamp string to datetime."""
161
+ if not timestamp_str:
162
+ return datetime.now(timezone.utc)
163
+
164
+ # Handle ISO format with Z suffix
165
+ if timestamp_str.endswith('Z'):
166
+ timestamp_str = timestamp_str[:-1] + '+00:00'
167
+
168
+ try:
169
+ dt = datetime.fromisoformat(timestamp_str)
170
+ if dt.tzinfo is None:
171
+ dt = dt.replace(tzinfo=timezone.utc)
172
+ return dt
173
+ except (ValueError, AttributeError):
174
+ logger.warning(f"Failed to parse timestamp: {timestamp_str}")
175
+ return datetime.now(timezone.utc)
176
+
177
+
178
+ class TemporalParser:
179
+ """Parses natural language time expressions."""
180
+
181
+ def __init__(self):
182
+ """Initialize the temporal parser."""
183
+ self.relative_patterns = {
184
+ 'today': (0, 0),
185
+ 'yesterday': (-1, -1),
186
+ 'tomorrow': (1, 1),
187
+ 'this week': (-7, 0),
188
+ 'last week': (-14, -7),
189
+ 'this month': (-30, 0),
190
+ 'last month': (-60, -30),
191
+ 'past week': (-7, 0),
192
+ 'past month': (-30, 0),
193
+ 'past year': (-365, 0),
194
+ }
195
+
196
+ def parse_time_expression(self,
197
+ expr: str,
198
+ base_time: Optional[datetime] = None) -> Tuple[datetime, datetime]:
199
+ """
200
+ Parse natural language time expression into datetime range.
201
+
202
+ Args:
203
+ expr: Natural language time expression
204
+ base_time: Base time for relative calculations (default: now)
205
+
206
+ Returns:
207
+ Tuple of (start_datetime, end_datetime)
208
+ """
209
+ if not base_time:
210
+ base_time = datetime.now(timezone.utc)
211
+
212
+ expr_lower = expr.lower().strip()
213
+
214
+ # Check for ISO timestamp
215
+ if self._looks_like_iso(expr):
216
+ try:
217
+ dt = datetime.fromisoformat(expr.replace('Z', '+00:00'))
218
+ if dt.tzinfo is None:
219
+ dt = dt.replace(tzinfo=timezone.utc)
220
+ return (dt, dt)
221
+ except ValueError:
222
+ pass
223
+
224
+ # Check for relative patterns
225
+ for pattern, (start_days, end_days) in self.relative_patterns.items():
226
+ if pattern in expr_lower:
227
+ start = base_time + timedelta(days=start_days)
228
+ end = base_time + timedelta(days=end_days) if end_days != 0 else base_time
229
+
230
+ # Adjust to day boundaries
231
+ start = start.replace(hour=0, minute=0, second=0, microsecond=0)
232
+ if end_days == 0:
233
+ end = base_time
234
+ else:
235
+ end = end.replace(hour=23, minute=59, second=59, microsecond=999999)
236
+
237
+ return (start, end)
238
+
239
+ # Check for "N days/hours/minutes ago"
240
+ ago_match = re.match(r'(\d+)\s*(day|hour|minute)s?\s*ago', expr_lower)
241
+ if ago_match:
242
+ amount = int(ago_match.group(1))
243
+ unit = ago_match.group(2)
244
+
245
+ if unit == 'day':
246
+ delta = timedelta(days=amount)
247
+ elif unit == 'hour':
248
+ delta = timedelta(hours=amount)
249
+ else: # minute
250
+ delta = timedelta(minutes=amount)
251
+
252
+ target_time = base_time - delta
253
+ return (target_time, target_time)
254
+
255
+ # Check for "last N days/hours"
256
+ last_match = re.match(r'(?:last|past)\s*(\d+)\s*(day|hour|minute)s?', expr_lower)
257
+ if last_match:
258
+ amount = int(last_match.group(1))
259
+ unit = last_match.group(2)
260
+
261
+ if unit == 'day':
262
+ delta = timedelta(days=amount)
263
+ elif unit == 'hour':
264
+ delta = timedelta(hours=amount)
265
+ else: # minute
266
+ delta = timedelta(minutes=amount)
267
+
268
+ start = base_time - delta
269
+ return (start, base_time)
270
+
271
+ # Check for "since X"
272
+ if expr_lower.startswith('since '):
273
+ remaining = expr[6:].strip()
274
+ start, _ = self.parse_time_expression(remaining, base_time)
275
+ return (start, base_time)
276
+
277
+ # Default: treat as "today"
278
+ logger.warning(f"Could not parse time expression '{expr}', defaulting to today")
279
+ start = base_time.replace(hour=0, minute=0, second=0, microsecond=0)
280
+ return (start, base_time)
281
+
282
+ @lru_cache(maxsize=128)
283
+ def _looks_like_iso(self, expr: str) -> bool:
284
+ """Check if string looks like ISO timestamp."""
285
+ iso_pattern = r'^\d{4}-\d{2}-\d{2}'
286
+ return bool(re.match(iso_pattern, expr))
287
+
288
+ def format_relative_time(self, timestamp: Union[str, datetime]) -> str:
289
+ """
290
+ Format timestamp as relative time string.
291
+
292
+ Args:
293
+ timestamp: Timestamp to format
294
+
295
+ Returns:
296
+ Relative time string like "2 hours ago", "yesterday"
297
+ """
298
+ if isinstance(timestamp, str):
299
+ if timestamp.endswith('Z'):
300
+ timestamp = timestamp[:-1] + '+00:00'
301
+ try:
302
+ dt = datetime.fromisoformat(timestamp)
303
+ except ValueError:
304
+ return timestamp
305
+ else:
306
+ dt = timestamp
307
+
308
+ if dt.tzinfo is None:
309
+ dt = dt.replace(tzinfo=timezone.utc)
310
+
311
+ now = datetime.now(timezone.utc)
312
+ delta = now - dt
313
+
314
+ # Format based on time difference
315
+ if delta.total_seconds() < 60:
316
+ return "just now"
317
+ elif delta.total_seconds() < 3600:
318
+ minutes = int(delta.total_seconds() / 60)
319
+ return f"{minutes} minute{'s' if minutes != 1 else ''} ago"
320
+ elif delta.total_seconds() < 86400:
321
+ hours = int(delta.total_seconds() / 3600)
322
+ return f"{hours} hour{'s' if hours != 1 else ''} ago"
323
+ elif delta.days == 1:
324
+ return "yesterday"
325
+ elif delta.days < 7:
326
+ return f"{delta.days} days ago"
327
+ elif delta.days < 30:
328
+ weeks = delta.days // 7
329
+ return f"{weeks} week{'s' if weeks != 1 else ''} ago"
330
+ elif delta.days < 365:
331
+ months = delta.days // 30
332
+ return f"{months} month{'s' if months != 1 else ''} ago"
333
+ else:
334
+ years = delta.days // 365
335
+ return f"{years} year{'s' if years != 1 else ''} ago"
336
+
337
+
338
+ def group_by_time_period(chunks: List[Dict[str, Any]],
339
+ granularity: str = 'day') -> Dict[str, List[Dict[str, Any]]]:
340
+ """
341
+ Group chunks by time period.
342
+
343
+ Args:
344
+ chunks: List of conversation chunks
345
+ granularity: 'hour', 'day', 'week', or 'month'
346
+
347
+ Returns:
348
+ Dictionary mapping time period keys to chunks
349
+ """
350
+ grouped = defaultdict(list)
351
+
352
+ for chunk in chunks:
353
+ timestamp_str = chunk.get('timestamp')
354
+ if not timestamp_str:
355
+ continue
356
+
357
+ if timestamp_str.endswith('Z'):
358
+ timestamp_str = timestamp_str[:-1] + '+00:00'
359
+
360
+ try:
361
+ dt = datetime.fromisoformat(timestamp_str)
362
+ except ValueError:
363
+ continue
364
+
365
+ if dt.tzinfo is None:
366
+ dt = dt.replace(tzinfo=timezone.utc)
367
+
368
+ # Generate period key based on granularity
369
+ if granularity == 'hour':
370
+ key = dt.strftime('%Y-%m-%d %H:00')
371
+ elif granularity == 'day':
372
+ key = dt.strftime('%Y-%m-%d')
373
+ elif granularity == 'week':
374
+ # Get start of week
375
+ week_start = dt - timedelta(days=dt.weekday())
376
+ key = week_start.strftime('%Y-W%V')
377
+ elif granularity == 'month':
378
+ key = dt.strftime('%Y-%m')
379
+ else:
380
+ key = dt.strftime('%Y-%m-%d')
381
+
382
+ grouped[key].append(chunk)
383
+
384
+ return dict(grouped)
@@ -1,84 +1,167 @@
1
- """Shared utilities for claude-self-reflect MCP server and scripts."""
1
+ """Utility functions and ProjectResolver for Claude Self-Reflect MCP server."""
2
2
 
3
+ import os
4
+ import hashlib
5
+ import re
3
6
  from pathlib import Path
7
+ from typing import Optional, List, Tuple
8
+ from config import logger, CLAUDE_PROJECTS_PATH
4
9
 
5
-
6
- def normalize_project_name(project_path: str, _depth: int = 0) -> str:
7
- """
8
- Normalize project name for consistent hashing across import/search.
10
+ class ProjectResolver:
11
+ """Resolves project names and paths for Claude conversations."""
9
12
 
10
- Handles various path formats:
11
- - Claude logs format: -Users-kyle-Code-claude-self-reflect -> claude-self-reflect
12
- - File paths in Claude logs: /path/to/-Users-kyle-Code-claude-self-reflect/file.jsonl -> claude-self-reflect
13
- - Regular file paths: /path/to/project/file.txt -> project
14
- - Regular paths: /path/to/project -> project
15
- - Already normalized: project -> project
16
- - Docker mount paths: /logs/-Users-name-projects-project -> project
13
+ @staticmethod
14
+ def get_current_project() -> Optional[str]:
15
+ """Get the current project name from working directory."""
16
+ cwd = os.getcwd()
17
+
18
+ # Check if we're in a known project directory
19
+ if '/projects/' in cwd or '/repos/' in cwd or '/code/' in cwd:
20
+ # Extract project name from path
21
+ parts = cwd.split('/')
22
+ for i, part in enumerate(parts):
23
+ if part in ['projects', 'repos', 'code'] and i + 1 < len(parts):
24
+ return parts[i + 1]
25
+
26
+ # Fall back to last directory name
27
+ return Path(cwd).name
17
28
 
18
- Args:
19
- project_path: Project path or name in any format
20
- _depth: Internal recursion depth counter (do not use)
29
+ @staticmethod
30
+ def normalize_project_name(project_name: str) -> str:
31
+ """Normalize project name for consistent matching."""
32
+ # Remove common prefixes/suffixes
33
+ name = project_name
34
+ for prefix in ['/Users/', '/home/', 'projects/', 'repos/', 'code/']:
35
+ if name.startswith(prefix):
36
+ name = name[len(prefix):]
37
+
38
+ # Convert path separators to underscores
39
+ name = name.replace('/', '_').replace('-', '_')
21
40
 
22
- Returns:
23
- Normalized project name suitable for consistent hashing
24
- """
25
- if not project_path:
26
- return ""
41
+ # Remove trailing underscores
42
+ name = name.rstrip('_')
43
+
44
+ return name
27
45
 
28
- # Prevent infinite recursion on malformed inputs
29
- if _depth > 10:
30
- return Path(project_path).name
46
+ @staticmethod
47
+ def get_project_hash(project_name: str) -> str:
48
+ """Get hash for project name (used in collection naming)."""
49
+ normalized = ProjectResolver.normalize_project_name(project_name)
50
+ return hashlib.md5(normalized.encode()).hexdigest()[:8]
31
51
 
32
- # Remove trailing slashes
33
- project_path = project_path.rstrip('/')
52
+ @staticmethod
53
+ def find_project_collections(
54
+ all_collections: List[str],
55
+ project_name: str
56
+ ) -> List[str]:
57
+ """Find collections belonging to a specific project."""
58
+ normalized = ProjectResolver.normalize_project_name(project_name)
59
+ project_hash = ProjectResolver.get_project_hash(project_name)
60
+
61
+ matching = []
62
+ for collection in all_collections:
63
+ # Check if collection matches project hash
64
+ if collection.startswith(f"conv_{project_hash}_"):
65
+ matching.append(collection)
66
+ # Also check for project name in collection
67
+ elif normalized in collection.replace('-', '_'):
68
+ matching.append(collection)
69
+
70
+ return matching
34
71
 
35
- # Handle Claude logs format (starts with dash)
36
- if project_path.startswith('-'):
37
- # For paths like -Users-kyle-Code-claude-self-reflect
38
- # We want to extract the actual project name which may contain dashes
39
- # Strategy: Find common parent directories and extract what comes after
72
+ @staticmethod
73
+ def extract_project_from_collection(collection_name: str) -> str:
74
+ """Extract project name from collection name."""
75
+ # Remove conv_ prefix and suffixes
76
+ name = collection_name
77
+ if name.startswith('conv_'):
78
+ name = name[5:]
79
+
80
+ # Remove hash prefix if present
81
+ if '_' in name and len(name.split('_')[0]) == 8:
82
+ # Likely a hash, remove it
83
+ parts = name.split('_', 1)
84
+ if len(parts) > 1:
85
+ name = parts[1]
40
86
 
41
- # Remove leading dash and convert back to path-like format
42
- path_str = project_path[1:].replace('-', '/')
43
- path_parts = Path(path_str).parts
87
+ # Remove embedding type suffix
88
+ for suffix in ['_voyage', '_local']:
89
+ if name.endswith(suffix):
90
+ name = name[:-len(suffix)]
44
91
 
45
- # Look for common project parent directories
46
- project_parents = {'projects', 'code', 'Code', 'repos', 'repositories',
47
- 'dev', 'Development', 'work', 'src', 'github'}
92
+ return name
93
+
94
+ def parse_natural_language_time(time_str: str) -> Tuple[Optional[str], Optional[str]]:
95
+ """Parse natural language time strings into ISO timestamps."""
96
+ from datetime import datetime, timedelta, timezone
97
+
98
+ now = datetime.now(timezone.utc)
99
+ time_str_lower = time_str.lower().strip()
100
+
101
+ # Handle relative times
102
+ if 'yesterday' in time_str_lower:
103
+ start = (now - timedelta(days=1)).replace(hour=0, minute=0, second=0)
104
+ end = start + timedelta(days=1)
105
+ return start.isoformat(), end.isoformat()
106
+
107
+ if 'today' in time_str_lower:
108
+ start = now.replace(hour=0, minute=0, second=0)
109
+ return start.isoformat(), now.isoformat()
110
+
111
+ # Parse "last X" patterns
112
+ last_match = re.match(r'last (\d+) (hour|day|week|month)', time_str_lower)
113
+ if last_match:
114
+ amount = int(last_match.group(1))
115
+ unit = last_match.group(2)
48
116
 
49
- # Find the project name after a known parent directory
50
- for i, part in enumerate(path_parts):
51
- if part.lower() in project_parents and i + 1 < len(path_parts):
52
- # Everything after the parent directory is the project name
53
- # Join remaining parts with dash if project name has multiple components
54
- remaining = path_parts[i + 1:]
55
- return '-'.join(remaining)
117
+ if unit == 'hour':
118
+ delta = timedelta(hours=amount)
119
+ elif unit == 'day':
120
+ delta = timedelta(days=amount)
121
+ elif unit == 'week':
122
+ delta = timedelta(weeks=amount)
123
+ elif unit == 'month':
124
+ delta = timedelta(days=amount * 30)
125
+ else:
126
+ delta = timedelta(days=7)
56
127
 
57
- # Fallback: just use the last component
58
- return path_parts[-1] if path_parts else project_path
128
+ start = now - delta
129
+ return start.isoformat(), now.isoformat()
59
130
 
60
- # Check if this is a file path that contains a Claude logs directory
61
- # Pattern: /path/to/-Users-...-projects-..../filename
62
- path_obj = Path(project_path)
131
+ # Parse "past X" patterns
132
+ past_match = re.match(r'past (\d+) (hour|day|week|month)', time_str_lower)
133
+ if past_match:
134
+ amount = int(past_match.group(1))
135
+ unit = past_match.group(2)
136
+
137
+ if unit == 'hour':
138
+ delta = timedelta(hours=amount)
139
+ elif unit == 'day':
140
+ delta = timedelta(days=amount)
141
+ elif unit == 'week':
142
+ delta = timedelta(weeks=amount)
143
+ elif unit == 'month':
144
+ delta = timedelta(days=amount * 30)
145
+ else:
146
+ delta = timedelta(days=7)
147
+
148
+ start = now - delta
149
+ return start.isoformat(), now.isoformat()
63
150
 
64
- # Check if this is a Docker mount path specifically
65
- # e.g., /logs/-Users-ramakrishnanannaswamy-projects-claude-self-reflect
66
- if str(path_obj).startswith("/logs/") and path_obj.name.startswith("-"):
67
- # Process this directory name recursively (Docker case only)
68
- return normalize_project_name(path_obj.name, _depth + 1)
151
+ # Default to last week
152
+ if 'week' in time_str_lower:
153
+ start = now - timedelta(days=7)
154
+ return start.isoformat(), now.isoformat()
69
155
 
70
- # Look for a parent directory that starts with dash (Claude logs format)
71
- for parent in path_obj.parents:
72
- parent_name = parent.name
73
- if parent_name.startswith("-"):
74
- # Found a Claude logs directory, process it
75
- return normalize_project_name(parent_name, _depth + 1)
156
+ # Default to last 24 hours
157
+ start = now - timedelta(days=1)
158
+ return start.isoformat(), now.isoformat()
159
+
160
+ def escape_xml(text: str, attr: bool = False) -> str:
161
+ """Escape text for XML output."""
162
+ from xml.sax.saxutils import escape
76
163
 
77
- # Handle regular paths - if it's a file, get the parent directory
78
- # Otherwise use the directory/project name itself
79
- if path_obj.suffix: # It's a file (has an extension)
80
- # Use the parent directory name
81
- return path_obj.parent.name
82
- else:
83
- # Use the directory name itself
84
- return path_obj.name
164
+ if attr:
165
+ # For attributes, also escape quotes
166
+ return escape(text, {'"': '&quot;'})
167
+ return escape(text)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-self-reflect",
3
- "version": "3.2.4",
3
+ "version": "3.3.0",
4
4
  "description": "Give Claude perfect memory of all your conversations - Installation wizard for Python MCP server",
5
5
  "keywords": [
6
6
  "claude",
@@ -41,9 +41,18 @@
41
41
  "mcp-server/run-mcp-clean.sh",
42
42
  "mcp-server/run-mcp-docker.sh",
43
43
  "scripts/import-*.py",
44
+ "scripts/streaming-*.py",
45
+ "scripts/add-timestamp-indexes.py",
46
+ "scripts/debug-*.py",
47
+ "scripts/check-collections.py",
48
+ "scripts/status.py",
49
+ "scripts/utils.py",
44
50
  "scripts/importer/**/*.py",
51
+ "scripts/delta-metadata-update.py",
45
52
  "scripts/delta-metadata-update-safe.py",
46
53
  "scripts/force-metadata-recovery.py",
54
+ "scripts/precompact-hook.sh",
55
+ "scripts/import-latest.py",
47
56
  "shared/**/*.py",
48
57
  ".claude/agents/*.md",
49
58
  "config/qdrant-config.yaml",