get-claudia 1.42.2 → 1.42.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/index.js CHANGED
@@ -376,7 +376,7 @@ async function main() {
376
376
  stdio: 'inherit',
377
377
  env: {
378
378
  ...process.env,
379
- CLAUDIA_PROJECT_PATH: isUpgrade ? targetPath : '',
379
+ CLAUDIA_PROJECT_PATH: targetPath,
380
380
  CLAUDIA_NONINTERACTIVE: '1'
381
381
  }
382
382
  });
@@ -160,6 +160,31 @@ class HealthCheckHandler(BaseHTTPRequestHandler):
160
160
  else:
161
161
  self.send_error(404, "Not Found")
162
162
 
163
+ def do_POST(self):
164
+ """Handle POST requests"""
165
+ if self.path == "/backup":
166
+ self._send_backup_response()
167
+ else:
168
+ self.send_error(405, "Method Not Allowed")
169
+
170
+ def _send_backup_response(self):
171
+ """Trigger a database backup and return the path."""
172
+ try:
173
+ db = get_db()
174
+ path = db.backup()
175
+ self._send_json({"status": "ok", "path": str(path)})
176
+ except Exception as e:
177
+ logger.exception("Error triggering backup")
178
+ self._send_json({"status": "error", "message": str(e)}, code=500)
179
+
180
+ def _send_json(self, data: dict, code: int = 200):
181
+ """Helper to send a JSON response."""
182
+ body = json.dumps(data).encode()
183
+ self.send_response(code)
184
+ self.send_header("Content-Type", "application/json")
185
+ self.end_headers()
186
+ self.wfile.write(body)
187
+
163
188
  def _send_health_response(self):
164
189
  """Send basic health check response"""
165
190
  health = {
@@ -722,6 +722,19 @@ async def list_tools() -> ListToolsResult:
722
722
  "properties": {},
723
723
  },
724
724
  ),
725
+ Tool(
726
+ name="memory.backup",
727
+ title="Trigger Database Backup",
728
+ description=(
729
+ "Trigger an immediate backup of the memory database. Returns the path "
730
+ "of the newly created backup file. Backups use a timestamp suffix and "
731
+ "older backups are pruned automatically per the retention policy."
732
+ ),
733
+ inputSchema={
734
+ "type": "object",
735
+ "properties": {},
736
+ },
737
+ ),
725
738
  Tool(
726
739
  name="memory.project_health",
727
740
  title="Project Health Check",
@@ -2587,8 +2600,14 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> CallToolResult:
2587
2600
  )
2588
2601
 
2589
2602
  elif name == "memory.system_health":
2590
- from ..daemon.health import build_status_report
2591
- report = build_status_report()
2603
+ import urllib.request, urllib.error
2604
+ report = None
2605
+ try:
2606
+ with urllib.request.urlopen("http://localhost:3848/status", timeout=2) as resp:
2607
+ report = json.loads(resp.read().decode())
2608
+ except (urllib.error.URLError, OSError):
2609
+ from ..daemon.health import build_status_report
2610
+ report = build_status_report()
2592
2611
  embedding_svc = get_embedding_service()
2593
2612
  if hasattr(embedding_svc, '_model_mismatch') and embedding_svc._model_mismatch:
2594
2613
  if "components" not in report:
@@ -2603,6 +2622,28 @@ async def call_tool(name: str, arguments: Dict[str, Any]) -> CallToolResult:
2603
2622
  ]
2604
2623
  )
2605
2624
 
2625
+ elif name == "memory.backup":
2626
+ import urllib.request, urllib.error
2627
+ result = None
2628
+ try:
2629
+ req = urllib.request.Request(
2630
+ "http://localhost:3848/backup", method="POST", data=b""
2631
+ )
2632
+ with urllib.request.urlopen(req, timeout=10) as resp:
2633
+ result = json.loads(resp.read().decode())
2634
+ except (urllib.error.URLError, OSError):
2635
+ # Daemon not running — trigger backup directly
2636
+ backup_path = get_db().backup()
2637
+ result = {"status": "ok", "path": str(backup_path)}
2638
+ return CallToolResult(
2639
+ content=[
2640
+ TextContent(
2641
+ type="text",
2642
+ text=json.dumps(result, indent=2),
2643
+ )
2644
+ ]
2645
+ )
2646
+
2606
2647
  elif name == "memory.sync_vault":
2607
2648
  from ..config import _project_id
2608
2649
  from ..services.vault_sync import run_vault_sync
@@ -2010,6 +2010,49 @@ class ConsolidateService:
2010
2010
 
2011
2011
  logger.debug(f"Merged reflection {duplicate['id']} into {primary['id']}")
2012
2012
 
2013
+ def close_stale_episodes(self) -> int:
2014
+ """Auto-close orphan episodes that have no end_session call.
2015
+
2016
+ Single-turn sessions and interrupted sessions leave episodes with
2017
+ ``ended_at IS NULL``. After 24 hours these will never be closed
2018
+ naturally, so this pass marks them as summarized with a synthetic
2019
+ summary to prevent them from appearing as false positives in health
2020
+ reports.
2021
+
2022
+ Returns the number of episodes closed.
2023
+ """
2024
+ cutoff = (datetime.utcnow() - timedelta(hours=24)).isoformat()
2025
+ try:
2026
+ # Find stale open episodes and close them. ended_at is set to the
2027
+ # timestamp of the latest buffered turn if one exists, otherwise to
2028
+ # started_at itself (single-turn / empty sessions).
2029
+ self.db.execute(
2030
+ """
2031
+ UPDATE episodes
2032
+ SET
2033
+ ended_at = COALESCE(
2034
+ (
2035
+ SELECT MAX(created_at) FROM turn_buffer
2036
+ WHERE turn_buffer.session_id = episodes.session_id
2037
+ ),
2038
+ started_at
2039
+ ),
2040
+ is_summarized = 1,
2041
+ summary = 'Auto-closed: session ended without explicit end_session call'
2042
+ WHERE ended_at IS NULL
2043
+ AND started_at < ?
2044
+ """,
2045
+ (cutoff,),
2046
+ )
2047
+ rows = self.db.execute("SELECT changes()", fetch=True)
2048
+ count = rows[0][0] if rows else 0
2049
+ if count:
2050
+ logger.info(f"Auto-closed {count} stale open episode(s)")
2051
+ return count
2052
+ except Exception as e:
2053
+ logger.warning(f"close_stale_episodes failed: {e}")
2054
+ return 0
2055
+
2013
2056
  def run_retention_cleanup(self) -> Dict[str, int]:
2014
2057
  """Clean up old data per retention policies.
2015
2058
 
@@ -2018,6 +2061,7 @@ class ConsolidateService:
2018
2061
  - Expired predictions past retention window
2019
2062
  - Archived turn_buffer from old episodes
2020
2063
  - Old metrics rows
2064
+ - Auto-closes stale open episodes (no end_session after 24 h)
2021
2065
  """
2022
2066
  results = {}
2023
2067
  now = datetime.utcnow()
@@ -2074,6 +2118,9 @@ class ConsolidateService:
2074
2118
  logger.warning(f"Metrics cleanup failed: {e}")
2075
2119
  results["metrics_deleted"] = 0
2076
2120
 
2121
+ # Auto-close orphan episodes (no end_session after 24 h)
2122
+ results["stale_episodes_closed"] = self.close_stale_episodes()
2123
+
2077
2124
  logger.info(f"Retention cleanup: {results}")
2078
2125
  return results
2079
2126
 
@@ -366,9 +366,13 @@ try {
366
366
  }
367
367
 
368
368
  # Create the scheduled task action
369
+ $taskArgs = "-m claudia_memory --standalone"
370
+ if ($env:CLAUDIA_PROJECT_PATH) {
371
+ $taskArgs += " --project-dir `"$($env:CLAUDIA_PROJECT_PATH)`""
372
+ }
369
373
  $action = New-ScheduledTaskAction `
370
374
  -Execute $VENV_PYTHON `
371
- -Argument "-m claudia_memory --standalone" `
375
+ -Argument $taskArgs `
372
376
  -WorkingDirectory $DAEMON_DIR
373
377
 
374
378
  # Trigger: at logon for current user
@@ -518,7 +518,7 @@ if [[ "$OSTYPE" == "darwin"* ]]; then
518
518
  <string>$VENV_DIR/bin/python</string>
519
519
  <string>-m</string>
520
520
  <string>claudia_memory</string>
521
- <string>--standalone</string>
521
+ <string>--standalone</string>$(if [ -n "$CLAUDIA_PROJECT_PATH" ]; then printf '\n <string>--project-dir</string>\n <string>%s</string>' "$CLAUDIA_PROJECT_PATH"; fi)
522
522
  </array>
523
523
  <key>WorkingDirectory</key>
524
524
  <string>$DAEMON_DIR</string>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "get-claudia",
3
- "version": "1.42.2",
3
+ "version": "1.42.3",
4
4
  "description": "An AI assistant who learns how you work.",
5
5
  "keywords": [
6
6
  "claudia",