meshcode 2.10.49__tar.gz → 2.10.51__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {meshcode-2.10.49 → meshcode-2.10.51}/PKG-INFO +1 -1
  2. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/__init__.py +1 -1
  3. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/backend.py +4 -2
  4. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/realtime.py +41 -5
  5. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/server.py +169 -81
  6. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/PKG-INFO +1 -1
  7. {meshcode-2.10.49 → meshcode-2.10.51}/pyproject.toml +1 -1
  8. {meshcode-2.10.49 → meshcode-2.10.51}/README.md +0 -0
  9. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/ascii_art.py +0 -0
  10. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/cli.py +0 -0
  11. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/comms_v4.py +0 -0
  12. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/invites.py +0 -0
  13. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/launcher.py +0 -0
  14. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/launcher_install.py +0 -0
  15. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/__init__.py +0 -0
  16. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/__main__.py +0 -0
  17. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/test_backend.py +0 -0
  18. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/test_realtime.py +0 -0
  19. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/meshcode_mcp/test_server_wrapper.py +0 -0
  20. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/preferences.py +0 -0
  21. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/protocol_v2.py +0 -0
  22. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/run_agent.py +0 -0
  23. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/secrets.py +0 -0
  24. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/self_update.py +0 -0
  25. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode/setup_clients.py +0 -0
  26. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/SOURCES.txt +0 -0
  27. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/dependency_links.txt +0 -0
  28. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/entry_points.txt +0 -0
  29. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/requires.txt +0 -0
  30. {meshcode-2.10.49 → meshcode-2.10.51}/meshcode.egg-info/top_level.txt +0 -0
  31. {meshcode-2.10.49 → meshcode-2.10.51}/setup.cfg +0 -0
  32. {meshcode-2.10.49 → meshcode-2.10.51}/tests/test_status_enum_coverage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshcode
3
- Version: 2.10.49
3
+ Version: 2.10.51
4
4
  Summary: Real-time communication between AI agents — Supabase-backed CLI
5
5
  Author-email: MeshCode <hello@meshcode.io>
6
6
  License: MIT
@@ -1,2 +1,2 @@
1
1
  """MeshCode — Real-time communication between AI agents."""
2
- __version__ = "2.10.49"
2
+ __version__ = "2.10.51"
@@ -715,9 +715,10 @@ def read_inbox(project_id: str, agent: str, mark_read: bool = True, api_key: Opt
715
715
  # If RPC doesn't exist yet, fall through to direct query
716
716
 
717
717
  # Fallback: direct SELECT (tests/legacy — requires anon RLS bypass)
718
+ # Include broadcasts (to_agent='*') — matches RPC behavior.
718
719
  messages = sb_select(
719
720
  "mc_messages",
720
- f"project_id=eq.{project_id}&to_agent=eq.{quote(agent)}&read=eq.false",
721
+ f"project_id=eq.{project_id}&to_agent=in.({quote(agent)},*)&read=eq.false",
721
722
  order="created_at.asc",
722
723
  )
723
724
  # Auto-decrypt encrypted messages
@@ -792,9 +793,10 @@ def count_pending(project_id: str, agent: str, api_key: Optional[str] = None) ->
792
793
  # Fall through to direct query if RPC doesn't exist yet
793
794
 
794
795
  # Fallback: direct SELECT (tests/legacy)
796
+ # Include broadcasts (to_agent='*') — matches mc_read_inbox behavior.
795
797
  pending = sb_select(
796
798
  "mc_messages",
797
- f"project_id=eq.{project_id}&to_agent=eq.{quote(agent)}&read=eq.false&type=neq.ack",
799
+ f"project_id=eq.{project_id}&to_agent=in.({quote(agent)},*)&read=eq.false&type=neq.ack",
798
800
  limit=1000,
799
801
  )
800
802
  return len(pending)
@@ -173,6 +173,18 @@ class RealtimeListener:
173
173
  "schema": "meshcode",
174
174
  "table": "mc_messages",
175
175
  "filter": "to_agent=eq.*",
176
+ },
177
+ {
178
+ "event": "INSERT",
179
+ "schema": "meshcode",
180
+ "table": "mc_tasks",
181
+ "filter": f"assignee=eq.{url_quote(self.agent_name, safe='')}",
182
+ },
183
+ {
184
+ "event": "UPDATE",
185
+ "schema": "meshcode",
186
+ "table": "mc_tasks",
187
+ "filter": f"assignee=eq.{url_quote(self.agent_name, safe='')}",
176
188
  }
177
189
  ]
178
190
  }
@@ -249,7 +261,13 @@ class RealtimeListener:
249
261
  "ref": str(ref),
250
262
  }))
251
263
  except Exception as e:
252
- log.warning(f"Realtime heartbeat send failed: {e}")
264
+ log.warning(f"Realtime heartbeat send failed: {e} — closing WS to force reconnect")
265
+ # Close WS to trigger recv loop exit → outer _run() reconnects.
266
+ # Without this, half-dead WS stays open for 45-60s silent blackout.
267
+ try:
268
+ await ws.close()
269
+ except Exception:
270
+ pass
253
271
  return
254
272
 
255
273
  async def _handle_message(self, msg: Dict[str, Any]) -> None:
@@ -260,7 +278,22 @@ class RealtimeListener:
260
278
  # {"event": "postgres_changes", "payload": {"data": {"record": {...}, "type": "INSERT", ...}}}
261
279
  if event == "postgres_changes":
262
280
  data = payload.get("data") or {}
263
- if data.get("type") == "INSERT":
281
+ table = data.get("table", (data.get("record") or {}).get("_table", ""))
282
+ change_type = data.get("type")
283
+
284
+ # ── mc_tasks events: wake the agent when a task is assigned/updated ──
285
+ if table == "mc_tasks" and change_type in ("INSERT", "UPDATE"):
286
+ record = data.get("record") or {}
287
+ assignee = record.get("assignee", "")
288
+ if assignee == self.agent_name and record.get("status") in ("open", "in_progress"):
289
+ log.info(f"task event: {change_type} task '{record.get('title', '?')[:60]}' for {self.agent_name}")
290
+ try:
291
+ self.message_event.set()
292
+ except Exception:
293
+ pass
294
+ return
295
+
296
+ if change_type == "INSERT":
264
297
  record = data.get("record") or {}
265
298
  to = record.get("to_agent")
266
299
  from_agent = record.get("from_agent")
@@ -302,13 +335,16 @@ class RealtimeListener:
302
335
 
303
336
  def drain(self) -> list:
304
337
  """Pop and return all queued messages."""
305
- out = list(self.queue)
306
- self.queue.clear()
307
- # Queue is empty reset the wake event so the next wait blocks again.
338
+ # Clear event FIRST, then drain queue. If a message arrives between
339
+ # clear() and the drain, it re-sets the event and lands in queue —
340
+ # the next wait_for_message returns immediately. This avoids the race
341
+ # where clear() after drain() could eat a wake signal.
308
342
  try:
309
343
  self.message_event.clear()
310
344
  except Exception:
311
345
  pass
346
+ out = list(self.queue)
347
+ self.queue.clear()
312
348
  return out
313
349
 
314
350
  async def wait_for_message(self, timeout: Optional[float] = None) -> bool:
@@ -13,6 +13,7 @@ import logging
13
13
  import os
14
14
  import sys
15
15
  import hashlib as _hashlib
16
+ import threading as _threading
16
17
  import traceback as _traceback
17
18
  from collections import deque
18
19
  from contextlib import asynccontextmanager
@@ -103,13 +104,15 @@ def _mc_log(msg: str, level: str = "info") -> None:
103
104
  _SEEN_MSG_IDS: dict = {} # key -> timestamp (monotonic)
104
105
  _SEEN_MSG_ORDER: deque = deque()
105
106
  _SEEN_MSG_CAP = 2000
106
- _SEEN_TTL = 300.0 # 5 minutes
107
+ _SEEN_TTL = 1800.0 # 30 minutes — prevents duplicate delivery during long sessions
108
+ _SEEN_LOCK = _threading.Lock() # Guards _SEEN_MSG_IDS + _SEEN_MSG_ORDER
107
109
 
108
110
  # ============================================================
109
111
  # Auto-wake: when agent is NOT in meshcode_wait and a message
110
112
  # arrives, inject text into the terminal to wake the agent.
111
113
  # ============================================================
112
114
  _IN_WAIT = False # True while meshcode_wait is blocking
115
+ _STATE_LOCK = _threading.Lock() # Guards _IN_WAIT, _CURRENT_STATE, _last_tool_at
113
116
  # Default OFF — keystroke injection can corrupt stdin on some terminals.
114
117
  # The primary nudge path is now in comms_v4.nudge_agent() which uses
115
118
  # platform-specific window activation (SetForegroundWindow on Windows,
@@ -292,8 +295,8 @@ def _seen_key(msg: Dict[str, Any]) -> str:
292
295
  return f"{msg.get('from') or msg.get('from_agent')}|{msg.get('ts') or msg.get('created_at')}|{payload_str}"
293
296
 
294
297
 
295
- def _evict_expired() -> None:
296
- """Remove entries older than _SEEN_TTL from the dedup cache."""
298
+ def _evict_expired_unlocked() -> None:
299
+ """Remove entries older than _SEEN_TTL. Caller MUST hold _SEEN_LOCK."""
297
300
  now = _time.monotonic()
298
301
  while _SEEN_MSG_ORDER:
299
302
  oldest_key = _SEEN_MSG_ORDER[0]
@@ -305,31 +308,43 @@ def _evict_expired() -> None:
305
308
  break
306
309
 
307
310
 
311
+ def _evict_expired() -> None:
312
+ """Thread-safe wrapper for eviction."""
313
+ with _SEEN_LOCK:
314
+ _evict_expired_unlocked()
315
+
316
+
308
317
  def _mark_seen(key: str) -> None:
309
- now = _time.monotonic()
310
- if key in _SEEN_MSG_IDS:
311
- # Refresh timestamp on re-sight (extends TTL)
318
+ with _SEEN_LOCK:
319
+ now = _time.monotonic()
320
+ if key in _SEEN_MSG_IDS:
321
+ _SEEN_MSG_IDS[key] = now
322
+ return
312
323
  _SEEN_MSG_IDS[key] = now
313
- return
314
- _SEEN_MSG_IDS[key] = now
315
- _SEEN_MSG_ORDER.append(key)
316
- # Evict expired + cap enforcement
317
- _evict_expired()
318
- while len(_SEEN_MSG_ORDER) > _SEEN_MSG_CAP:
319
- old = _SEEN_MSG_ORDER.popleft()
320
- _SEEN_MSG_IDS.pop(old, None)
324
+ _SEEN_MSG_ORDER.append(key)
325
+ _evict_expired_unlocked()
326
+ while len(_SEEN_MSG_ORDER) > _SEEN_MSG_CAP:
327
+ old = _SEEN_MSG_ORDER.popleft()
328
+ _SEEN_MSG_IDS.pop(old, None)
321
329
 
322
330
 
323
331
  def _filter_and_mark(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
324
- """Drop already-seen messages; mark the rest as seen."""
325
- _evict_expired() # Clean stale entries before checking
326
- out = []
327
- for m in messages:
328
- k = _seen_key(m)
329
- if k in _SEEN_MSG_IDS:
330
- continue
331
- _mark_seen(k)
332
- out.append(m)
332
+ """Drop already-seen messages; mark the rest as seen. Thread-safe."""
333
+ with _SEEN_LOCK:
334
+ _evict_expired_unlocked()
335
+ out = []
336
+ for m in messages:
337
+ k = _seen_key(m)
338
+ if k in _SEEN_MSG_IDS:
339
+ continue
340
+ now = _time.monotonic()
341
+ _SEEN_MSG_IDS[k] = now
342
+ _SEEN_MSG_ORDER.append(k)
343
+ out.append(m)
344
+ # Cap enforcement
345
+ while len(_SEEN_MSG_ORDER) > _SEEN_MSG_CAP:
346
+ old = _SEEN_MSG_ORDER.popleft()
347
+ _SEEN_MSG_IDS.pop(old, None)
333
348
  return out
334
349
 
335
350
 
@@ -1330,11 +1345,12 @@ def _heartbeat_loop_inner():
1330
1345
  try:
1331
1346
  be.sb_rpc("mc_heartbeat", {"p_project_id": _PROJECT_ID, "p_agent_name": AGENT_NAME, "p_version": _SDK_VERSION})
1332
1347
 
1333
- # CPU-based status detection
1348
+ # CPU-based status detection — read shared state under lock
1334
1349
  parent_cpu = _get_parent_cpu()
1335
- cur_state = _current_state
1336
- in_wait = _IN_WAIT
1337
- idle_secs = _time.time() - _last_tool_at
1350
+ with _STATE_LOCK:
1351
+ cur_state = _current_state
1352
+ in_wait = _IN_WAIT
1353
+ idle_secs = _time.time() - _last_tool_at
1338
1354
 
1339
1355
  if _is_windows and lease_counter % 12 == 0:
1340
1356
  # Periodic Windows debug dump (every ~60s on pro, ~180s on free)
@@ -1454,13 +1470,15 @@ async def lifespan(_app):
1454
1470
  # Without this, the lifespan yields before the WS is ready, and Claude
1455
1471
  # Code's handshake can time out on slower network paths — one agent
1456
1472
  # fails while siblings on the same box succeed.
1457
- for _rt_check in range(10):
1458
- if getattr(_REALTIME, '_connected', False):
1459
- log.info(f"Realtime connected for {AGENT_NAME}")
1473
+ for _rt_check in range(20):
1474
+ if getattr(_REALTIME, '_subscription_ok', False):
1475
+ log.info(f"Realtime connected + subscribed for {AGENT_NAME}")
1460
1476
  break
1477
+ if _rt_check == 10 and getattr(_REALTIME, '_connected', False):
1478
+ log.info(f"Realtime connected (not yet subscribed) for {AGENT_NAME} — waiting for subscription...")
1461
1479
  await asyncio.sleep(0.5)
1462
1480
  else:
1463
- log.warning(f"Realtime not connected after 5s for {AGENT_NAME} — continuing with polling fallback")
1481
+ log.warning(f"Realtime not fully subscribed after 10s for {AGENT_NAME} — continuing with polling fallback")
1464
1482
 
1465
1483
  # IMMEDIATE: send first heartbeat + set online status BEFORE any tool calls.
1466
1484
  # Without this, the agent appears offline for up to 30s after boot.
@@ -1501,6 +1519,10 @@ async def lifespan(_app):
1501
1519
  _heartbeat_stop.set()
1502
1520
  except Exception:
1503
1521
  pass
1522
+ try:
1523
+ be.set_status(_PROJECT_ID, AGENT_NAME, "offline", "terminal closed", api_key=_get_api_key())
1524
+ except Exception:
1525
+ pass
1504
1526
  try:
1505
1527
  _remove_pid_lockfile()
1506
1528
  except Exception:
@@ -1867,6 +1889,13 @@ try:
1867
1889
  except Exception as _e:
1868
1890
  print(f"[meshcode] Could not restore last_seen: {_e}", file=sys.stderr)
1869
1891
 
1892
+ # Fallback: if last_seen is still None, default to now minus 5 minutes
1893
+ # to avoid flooding agent with ancient messages on cold boot.
1894
+ if _LAST_SEEN_TS is None:
1895
+ from datetime import datetime, timezone, timedelta
1896
+ _LAST_SEEN_TS = (datetime.now(timezone.utc) - timedelta(minutes=5)).isoformat()
1897
+ print(f"[meshcode] last_seen fallback: {_LAST_SEEN_TS} (now minus 5min)", file=sys.stderr)
1898
+
1870
1899
 
1871
1900
  def _get_pending_tasks_summary() -> Optional[List[Dict[str, str]]]:
1872
1901
  """Fetch tasks that THIS agent should work on. Returns compact list or None.
@@ -1949,19 +1978,17 @@ async def meshcode_wait(timeout_seconds: int = 20, include_acks: bool = False) -
1949
1978
  db_pending = be.count_pending(_PROJECT_ID, AGENT_NAME, api_key=_get_api_key())
1950
1979
  if db_pending and db_pending > 0:
1951
1980
  raw = be.read_inbox(_PROJECT_ID, AGENT_NAME, mark_read=True, api_key=_get_api_key())
1952
- msgs = [
1953
- {"from": m["from_agent"], "type": m.get("type", "msg"),
1954
- "ts": m.get("created_at"), "payload": m.get("payload", {}),
1955
- "id": m.get("id"), "parent_id": m.get("parent_msg_id")}
1956
- for m in raw
1957
- ]
1958
- # Dedup against already-seen messages (fixes race where
1959
- # background mark_read hasn't completed yet)
1960
- deduped = _filter_and_mark(msgs)
1961
- if not deduped:
1962
- pass # All messages already seen — fall through to wait loop
1963
- else:
1964
- split = _split_messages(deduped)
1981
+ if raw:
1982
+ msgs = [
1983
+ {"from": m["from_agent"], "type": m.get("type", "msg"),
1984
+ "ts": m.get("created_at"), "payload": m.get("payload", {}),
1985
+ "id": m.get("id"), "parent_id": m.get("parent_msg_id")}
1986
+ for m in raw
1987
+ ]
1988
+ # DB is source of truth: if unread in DB, deliver to agent.
1989
+ # Don't let in-memory dedup cache block delivery.
1990
+ _filter_and_mark(msgs) # mark as seen but don't filter
1991
+ split = _split_messages(msgs)
1965
1992
  # Only refuse for real messages — ack-only batches should not block wait
1966
1993
  if split["messages"] or split["done_signals"]:
1967
1994
  return {
@@ -1974,7 +2001,8 @@ async def meshcode_wait(timeout_seconds: int = 20, include_acks: bool = False) -
1974
2001
  except Exception:
1975
2002
  pass
1976
2003
 
1977
- _IN_WAIT = True
2004
+ with _STATE_LOCK:
2005
+ _IN_WAIT = True
1978
2006
  _set_state("waiting", "listening for messages")
1979
2007
  capped_timeout = min(max(1, int(timeout_seconds)), 20)
1980
2008
  try:
@@ -1996,6 +2024,13 @@ async def meshcode_wait(timeout_seconds: int = 20, include_acks: bool = False) -
1996
2024
  _CONSECUTIVE_IDLE_SECONDS = 0
1997
2025
  break
1998
2026
 
2027
+ # Tasks detected mid-wait (via Realtime mc_tasks subscription
2028
+ # or periodic sub-iteration polling) — return immediately.
2029
+ if result.get("pending_tasks"):
2030
+ _set_state("online", "")
2031
+ _CONSECUTIVE_IDLE_SECONDS = 0
2032
+ break
2033
+
1999
2034
  if result.get("timed_out"):
2000
2035
  _CONSECUTIVE_IDLE_SECONDS += capped_timeout
2001
2036
 
@@ -2047,7 +2082,8 @@ async def meshcode_wait(timeout_seconds: int = 20, include_acks: bool = False) -
2047
2082
  log.debug(f"last_seen memory persist failed: {e}")
2048
2083
  return result
2049
2084
  finally:
2050
- _IN_WAIT = False
2085
+ with _STATE_LOCK:
2086
+ _IN_WAIT = False
2051
2087
 
2052
2088
 
2053
2089
  def _mark_realtime_msgs_read_in_db(messages: List[Dict[str, Any]]) -> None:
@@ -2065,16 +2101,26 @@ def _mark_realtime_msgs_read_in_db(messages: List[Dict[str, Any]]) -> None:
2065
2101
  return
2066
2102
 
2067
2103
  def _do():
2068
- for mid in msg_ids:
2069
- try:
2070
- be.sb_rpc("mc_mark_message_read", {
2071
- "p_api_key": api_key,
2072
- "p_project_id": _PROJECT_ID,
2073
- "p_message_id": mid,
2074
- })
2075
- except Exception as e:
2076
- log.debug(f"mark_read failed for msg {mid}: {e}")
2077
- threading.Thread(target=_do, daemon=True).start()
2104
+ # Batch mark_read: single RPC instead of N individual calls
2105
+ try:
2106
+ be.sb_rpc("mc_mark_messages_read_batch", {
2107
+ "p_api_key": api_key,
2108
+ "p_project_id": _PROJECT_ID,
2109
+ "p_message_ids": msg_ids,
2110
+ })
2111
+ except Exception:
2112
+ # Fallback: individual mark_read if batch RPC not deployed yet
2113
+ for mid in msg_ids:
2114
+ try:
2115
+ be.sb_rpc("mc_mark_message_read", {
2116
+ "p_api_key": api_key,
2117
+ "p_project_id": _PROJECT_ID,
2118
+ "p_message_id": mid,
2119
+ })
2120
+ except Exception as e:
2121
+ log.debug(f"mark_read failed for msg {mid}: {e}")
2122
+ # Non-daemon: survives MCP shutdown so mark_read RPCs complete
2123
+ threading.Thread(target=_do, daemon=False, name="mark_read_bg").start()
2078
2124
 
2079
2125
 
2080
2126
  async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[str, Any]:
@@ -2111,6 +2157,9 @@ async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[
2111
2157
  "got_message": True,
2112
2158
  **split,
2113
2159
  }
2160
+ # Expose queue overflow metric so agents know messages were lost
2161
+ if _REALTIME and _REALTIME.dropped_count > 0:
2162
+ out["dropped_messages"] = _REALTIME.dropped_count
2114
2163
  done = _detect_global_done(deduped)
2115
2164
  if done:
2116
2165
  out["got_done"] = True
@@ -2130,10 +2179,10 @@ async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[
2130
2179
  _rt_live = _REALTIME and _REALTIME.is_subscribed
2131
2180
 
2132
2181
  if _rt_live:
2133
- # 2a) Real async wait zero CPU, zero Supabase calls.
2134
- # Split into 5s sub-waits so we can detect Realtime subscription
2135
- # drops mid-wait and fall through to DB poll immediately instead
2136
- # of waiting the full timeout with a dead connection.
2182
+ # 2a) Realtime wait with DB safety net.
2183
+ # Split into 5s sub-waits. Between each sub-wait, also check DB
2184
+ # as a safety net Realtime WS can die silently and is_subscribed
2185
+ # stays True, so we must not rely on it exclusively.
2137
2186
  _rt_sub_timeout = 5.0
2138
2187
  _rt_elapsed = 0.0
2139
2188
  woke = False
@@ -2153,6 +2202,34 @@ async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[
2153
2202
  if woke:
2154
2203
  break
2155
2204
  _rt_elapsed += _this_wait
2205
+ # Check for new tasks between sub-iterations
2206
+ _sub_tasks = _get_pending_tasks_summary()
2207
+ if _sub_tasks:
2208
+ return {"timed_out": False, "got_message": False, "pending_tasks": _sub_tasks, "reason": "task_detected_mid_wait"}
2209
+ # DB safety net: check for unread messages even when Realtime
2210
+ # claims to be alive. Realtime WS can die silently.
2211
+ try:
2212
+ _safety_key = _get_api_key()
2213
+ if _safety_key:
2214
+ _safety_cnt = be.count_pending(_PROJECT_ID, AGENT_NAME, api_key=_safety_key)
2215
+ if _safety_cnt and _safety_cnt > 0:
2216
+ log.info(f"[meshcode] DB safety net: {_safety_cnt} unread msgs despite Realtime — reading from DB")
2217
+ _safety_raw = be.read_inbox(_PROJECT_ID, AGENT_NAME, mark_read=True, api_key=_safety_key)
2218
+ if _safety_raw:
2219
+ _safety_msgs = [
2220
+ {"from": m["from_agent"], "type": m.get("type", "msg"),
2221
+ "ts": m.get("created_at"), "payload": m.get("payload", {}),
2222
+ "id": m.get("id"), "parent_id": m.get("parent_msg_id")}
2223
+ for m in _safety_raw
2224
+ ]
2225
+ _filter_and_mark(_safety_msgs)
2226
+ _safety_split = _split_messages(_safety_msgs)
2227
+ if not include_acks:
2228
+ _safety_split["acks"] = []
2229
+ if _safety_split["messages"] or _safety_split["done_signals"]:
2230
+ return {"got_message": True, **_safety_split}
2231
+ except Exception as _db_err:
2232
+ log.warning(f"[meshcode] DB safety net error: {_db_err}")
2156
2233
  # Health check: if subscription dropped, switch to DB poll
2157
2234
  if not (_REALTIME and _REALTIME.is_subscribed):
2158
2235
  log.info("[meshcode] Realtime subscription lost mid-wait — switching to DB poll")
@@ -2188,15 +2265,17 @@ async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[
2188
2265
  "id": m.get("id"), "parent_id": m.get("parent_msg_id")}
2189
2266
  for m in raw
2190
2267
  ]
2191
- deduped = _filter_and_mark(msgs)
2192
- if deduped:
2193
- split = _split_messages(deduped)
2194
- if not include_acks:
2195
- split["acks"] = []
2196
- if split["messages"] or split["done_signals"]:
2197
- return {"got_message": True, **split}
2268
+ # DB is source of truth: if read=false in DB, deliver
2269
+ # regardless of in-memory dedup cache (fixes stuck loop
2270
+ # where dedup blocks delivery but mark_read already fired).
2271
+ _filter_and_mark(msgs) # mark as seen but don't filter
2272
+ split = _split_messages(msgs)
2273
+ if not include_acks:
2274
+ split["acks"] = []
2275
+ if split["messages"] or split["done_signals"]:
2276
+ return {"got_message": True, **split}
2198
2277
  except Exception as e:
2199
- log.debug(f"DB poll fallback error: {e}")
2278
+ log.warning(f"DB poll fallback error: {e}")
2200
2279
 
2201
2280
  # Final fallback: one last DB check (covers realtime path missing msgs)
2202
2281
  try:
@@ -2212,15 +2291,15 @@ async def _meshcode_wait_inner(actual_timeout: int, include_acks: bool) -> Dict[
2212
2291
  "id": m.get("id"), "parent_id": m.get("parent_msg_id")}
2213
2292
  for m in raw
2214
2293
  ]
2215
- deduped = _filter_and_mark(msgs)
2216
- if deduped:
2217
- split = _split_messages(deduped)
2218
- if not include_acks:
2219
- split["acks"] = []
2220
- if split["messages"] or split["done_signals"]:
2221
- return {"got_message": True, **split}
2294
+ # DB is source of truth: deliver all unread messages.
2295
+ _filter_and_mark(msgs) # mark as seen but don't filter
2296
+ split = _split_messages(msgs)
2297
+ if not include_acks:
2298
+ split["acks"] = []
2299
+ if split["messages"] or split["done_signals"]:
2300
+ return {"got_message": True, **split}
2222
2301
  except Exception as e:
2223
- log.debug(f"final DB fallback error: {e}")
2302
+ log.warning(f"final DB fallback error: {e}")
2224
2303
 
2225
2304
  # Check if there's any pending work before returning timeout
2226
2305
  pending_tasks = _get_pending_tasks_summary()
@@ -2278,9 +2357,14 @@ def meshcode_check(include_acks: bool = False, since: Optional[str] = None, mark
2278
2357
  # Don't mark as seen — meshcode_check is a peek, not a consume
2279
2358
  deduped = [m for m in realtime_buffered if _seen_key(m) not in _SEEN_MSG_IDS]
2280
2359
 
2281
- # Fallback: if realtime buffer is empty but DB has pending messages,
2282
- # fetch them from the DB. mark_read controls whether we consume or peek.
2283
- if not deduped and pending > 0:
2360
+ # When mark_read=True and we have RT-buffered messages, also mark them
2361
+ # as read in DB so they don't inflate pending counts on restart.
2362
+ if mark_read and deduped:
2363
+ _mark_realtime_msgs_read_in_db(deduped)
2364
+
2365
+ # Fetch from DB when: (a) RT buffer empty and DB has pending, OR
2366
+ # (b) mark_read=True and DB has pending (must hit DB to actually mark read).
2367
+ if (not deduped and pending > 0) or (mark_read and pending > 0):
2284
2368
  raw = be.read_inbox(_PROJECT_ID, AGENT_NAME, mark_read=mark_read, api_key=_get_api_key())
2285
2369
  deduped = [
2286
2370
  {
@@ -2302,8 +2386,12 @@ def meshcode_check(include_acks: bool = False, since: Optional[str] = None, mark
2302
2386
 
2303
2387
  # When mark_read=True, update tracking state so messages aren't re-processed
2304
2388
  if mark_read and deduped:
2305
- for m in deduped:
2306
- _SEEN_MSG_IDS.add(_seen_key(m))
2389
+ with _SEEN_LOCK:
2390
+ _now = _time.monotonic()
2391
+ for m in deduped:
2392
+ k = _seen_key(m)
2393
+ _SEEN_MSG_IDS[k] = _now
2394
+ _SEEN_MSG_ORDER.append(k)
2307
2395
  latest_ts = max((str(m.get("ts", "")) for m in deduped), default=None)
2308
2396
  if latest_ts and (not _LAST_SEEN_TS or latest_ts > _LAST_SEEN_TS):
2309
2397
  _LAST_SEEN_TS = latest_ts
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: meshcode
3
- Version: 2.10.49
3
+ Version: 2.10.51
4
4
  Summary: Real-time communication between AI agents — Supabase-backed CLI
5
5
  Author-email: MeshCode <hello@meshcode.io>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "meshcode"
7
- version = "2.10.49"
7
+ version = "2.10.51"
8
8
  description = "Real-time communication between AI agents — Supabase-backed CLI"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
File without changes
File without changes
File without changes