aster-cli 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aster_cli/shell/app.py ADDED
@@ -0,0 +1,2390 @@
1
+ """
2
+ aster_cli.shell.app -- Shell REPL entry point and CLI wiring.
3
+
4
+ Usage::
5
+
6
+ aster shell <peer-addr> [--rcan <path>]
7
+
8
+ Connects to a peer and launches an interactive shell with filesystem-like
9
+ navigation, service discovery, dynamic RPC invocation, and smart autocomplete.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import argparse
15
+ import asyncio
16
+ import copy
17
+ import json
18
+ import os
19
+ import sys
20
+ import time
21
+ from types import SimpleNamespace
22
+ from pathlib import Path
23
+ from typing import Any
24
+
25
+ from prompt_toolkit import PromptSession
26
+ from prompt_toolkit.formatted_text import HTML
27
+ from prompt_toolkit.history import FileHistory
28
+ from prompt_toolkit.styles import Style
29
+ from rich.console import Console
30
+
31
+ from aster.json_codec import JsonProxyCodec
32
+
33
+ from aster_cli.shell.completer import ShellCompleter
34
+ from aster_cli.shell.display import Display
35
+ from aster_cli.shell.plugin import CommandContext, get_command
36
+ from aster_cli.shell.vfs import (
37
+ NodeKind,
38
+ build_root,
39
+ build_directory_root,
40
+ ensure_loaded,
41
+ resolve_path,
42
+ )
43
+
44
+ # Import commands to trigger @register decorators
45
+ import aster_cli.shell.commands # noqa: F401
46
+
47
+
48
+ # ── Prompt styling ────────────────────────────────────────────────────────────
49
+
50
+ SHELL_STYLE = Style.from_dict({
51
+ "peer": "#78A6FF bold", # Signal Blue
52
+ "colon": "#666666",
53
+ "path": "#61D6C2", # Relay Teal
54
+ "dollar": "#E6C06B", # Trust Gold
55
+ "": "#D4D4D4", # default text
56
+ })
57
+
58
+
59
+ def _make_prompt(peer_name: str, cwd: str) -> HTML:
60
+ """Build the styled prompt string."""
61
+ return HTML(
62
+ f"<peer>{peer_name}</peer>"
63
+ f"<colon>:</colon>"
64
+ f"<path>{cwd}</path>"
65
+ f"<dollar>$ </dollar>"
66
+ )
67
+
68
+
69
+ # ── Helpers ───────────────────────────────────────────────────────────────────
70
+
71
+
72
+ def _node_addr_to_b64(addr: Any) -> str:
73
+ """Serialize a NodeAddr to a base64 string for _coerce_node_addr."""
74
+ import base64
75
+ return base64.b64encode(addr.to_bytes()).decode("ascii")
76
+
77
+
78
+ def _load_root_secret_key(config: Any) -> bytes | None:
79
+ """Load the root private key for the shell's node identity.
80
+
81
+ Checks (in order):
82
+ 1. OS keyring (via active profile from ``~/.aster/config.toml``)
83
+ 2. ``config.root_pubkey_file`` (explicit config)
84
+ 3. ``~/.aster/root.key`` (default file location)
85
+
86
+ Returns 32-byte secret key or None if unavailable.
87
+ """
88
+ import json as _json
89
+
90
+ # 1) Try keyring -- scoped by active profile
91
+ try:
92
+ from aster_cli.credentials import get_root_privkey, has_keyring
93
+ if has_keyring():
94
+ profile = _get_active_profile()
95
+ if profile:
96
+ hex_key = get_root_privkey(profile)
97
+ if hex_key:
98
+ return bytes.fromhex(hex_key)
99
+ except Exception:
100
+ pass
101
+
102
+ # 2) Try file-based locations
103
+ candidates = []
104
+ if config.root_pubkey_file:
105
+ candidates.append(config.root_pubkey_file)
106
+ candidates.append("~/.aster/root.key")
107
+
108
+ for path in candidates:
109
+ expanded = os.path.expanduser(path)
110
+ if not os.path.exists(expanded):
111
+ continue
112
+ try:
113
+ with open(expanded) as f:
114
+ content = f.read().strip()
115
+ if content.startswith("{"):
116
+ d = _json.loads(content)
117
+ if "private_key" in d:
118
+ return bytes.fromhex(d["private_key"])
119
+ except (ValueError, KeyError, _json.JSONDecodeError, OSError):
120
+ continue
121
+ return None
122
+
123
+
124
+ def _get_active_profile() -> str | None:
125
+ """Read the active profile name from ``~/.aster/config.toml``."""
126
+ config_path = os.path.expanduser("~/.aster/config.toml")
127
+ if not os.path.exists(config_path):
128
+ return None
129
+ try:
130
+ if sys.version_info >= (3, 11):
131
+ import tomllib
132
+ else:
133
+ import tomli as tomllib # type: ignore[no-redef]
134
+ with open(config_path, "rb") as f:
135
+ data = tomllib.load(f)
136
+ return data.get("active_profile")
137
+ except Exception:
138
+ return None
139
+
140
+
141
+ def _resolve_peer_arg(peer_arg: str) -> tuple[str, str]:
142
+ """Resolve a peer argument to (endpoint_addr, friendly_name).
143
+
144
+ If ``peer_arg`` matches a peer name in ``.aster-identity``, returns
145
+ that peer's endpoint_id and the name. Otherwise returns the raw
146
+ value as-is with a truncated display name.
147
+
148
+ Raises ``SystemExit`` with a helpful message if it looks like a name
149
+ but can't be resolved.
150
+ """
151
+ # Handle compact aster1... ticket format
152
+ if peer_arg.startswith("aster1"):
153
+ try:
154
+ from aster import AsterTicket, NodeAddr
155
+ ticket = AsterTicket.from_string(peer_arg)
156
+ na = NodeAddr(
157
+ endpoint_id=ticket.endpoint_id,
158
+ direct_addresses=ticket.direct_addrs,
159
+ )
160
+ addr_b64 = _node_addr_to_b64(na)
161
+ display = f"{ticket.endpoint_id[:8]}... (ticket)"
162
+ return addr_b64, display
163
+ except Exception as exc:
164
+ print(f"error: invalid aster ticket: {exc}", file=sys.stderr)
165
+ sys.exit(1)
166
+
167
+ # Check if it looks like a name (short, no base64/hex, no special chars)
168
+ looks_like_name = (
169
+ len(peer_arg) < 64
170
+ and "=" not in peer_arg
171
+ and "\n" not in peer_arg
172
+ and not all(c in "0123456789abcdef" for c in peer_arg.lower())
173
+ )
174
+
175
+ if looks_like_name:
176
+ # Try loading .aster-identity
177
+ identity_path = os.path.join(os.getcwd(), ".aster-identity")
178
+ if os.path.exists(identity_path):
179
+ try:
180
+ from aster_cli.identity import load_identity
181
+ identity = load_identity(identity_path)
182
+ known_names = []
183
+ for peer in identity.get("peers", []):
184
+ name = peer.get("name")
185
+ if name:
186
+ known_names.append(name)
187
+ if name == peer_arg:
188
+ eid = peer.get("endpoint_id")
189
+ if eid:
190
+ # Return raw endpoint ID -- AsterClient handles
191
+ # NodeAddr construction (avoids loading native module here)
192
+ return eid, peer_arg
193
+ print(
194
+ f"error: peer '{peer_arg}' found in .aster-identity "
195
+ f"but has no endpoint_id",
196
+ file=sys.stderr,
197
+ )
198
+ sys.exit(1)
199
+ # Name not found -- show available names
200
+ if known_names:
201
+ names_str = ", ".join(known_names)
202
+ print(
203
+ f"error: peer '{peer_arg}' not found in .aster-identity\n"
204
+ f" known peers: {names_str}\n"
205
+ f" or pass an aster1... ticket / base64 NodeAddr / hex EndpointId directly",
206
+ file=sys.stderr,
207
+ )
208
+ else:
209
+ print(
210
+ f"error: peer '{peer_arg}' not found -- "
211
+ f".aster-identity has no peer entries\n"
212
+ f" pass a base64 NodeAddr / hex EndpointId directly",
213
+ file=sys.stderr,
214
+ )
215
+ sys.exit(1)
216
+ except SystemExit:
217
+ raise
218
+ except Exception:
219
+ pass
220
+ else:
221
+ # No identity file and it looks like a name
222
+ print(
223
+ f"error: '{peer_arg}' looks like a peer name but no "
224
+ f".aster-identity file found in {os.getcwd()}\n"
225
+ f" pass an aster1... ticket / base64 NodeAddr / hex EndpointId directly, or\n"
226
+ f" run 'aster enroll node' to create an identity file",
227
+ file=sys.stderr,
228
+ )
229
+ sys.exit(1)
230
+
231
+ # Also try reverse lookup: even if it's a raw addr, find the name
232
+ friendly = _lookup_peer_name(peer_arg)
233
+ if friendly:
234
+ return peer_arg, friendly
235
+
236
+ # Fallback: truncated display
237
+ display = peer_arg[:16] + "…" if len(peer_arg) > 16 else peer_arg
238
+ return peer_arg, display
239
+
240
+
241
+ def _lookup_peer_name(addr: str) -> str | None:
242
+ """Try to find a friendly name for an address from .aster-identity."""
243
+ identity_path = os.path.join(os.getcwd(), ".aster-identity")
244
+ if not os.path.exists(identity_path):
245
+ return None
246
+ try:
247
+ from aster_cli.identity import load_identity
248
+ identity = load_identity(identity_path)
249
+ for peer in identity.get("peers", []):
250
+ eid = peer.get("endpoint_id", "")
251
+ # Check if the addr contains this endpoint_id (NodeAddr encodes it)
252
+ if eid and eid in addr:
253
+ return peer.get("name")
254
+ except Exception:
255
+ pass
256
+ return None
257
+
258
+
259
+ # ── Manifest cache ───────────────────────────────────────────────────────────
260
+
261
+ _MANIFEST_CACHE_DIR = Path("~/.aster/cache/manifests").expanduser()
262
+ _MANIFEST_CACHE_MAX_AGE_DAYS = 30
263
+
264
+
265
+ def _load_cached_manifests(services: list[Any]) -> dict[str, dict[str, Any]]:
266
+ """Load cached manifests for services that have a contract_id."""
267
+ result: dict[str, dict[str, Any]] = {}
268
+ if not _MANIFEST_CACHE_DIR.exists():
269
+ return result
270
+ for svc in services:
271
+ cid = svc.contract_id if hasattr(svc, "contract_id") else ""
272
+ if not cid:
273
+ continue
274
+ cache_file = _MANIFEST_CACHE_DIR / f"{cid}.json"
275
+ if cache_file.exists():
276
+ try:
277
+ age_days = (time.time() - cache_file.stat().st_mtime) / 86400
278
+ if age_days > _MANIFEST_CACHE_MAX_AGE_DAYS:
279
+ cache_file.unlink(missing_ok=True)
280
+ continue
281
+ manifest = json.loads(cache_file.read_text(encoding="utf-8"))
282
+ result[svc.name] = manifest
283
+ except Exception:
284
+ pass
285
+ return result
286
+
287
+
288
+ def _save_manifests_to_cache(
289
+ manifests: dict[str, dict[str, Any]],
290
+ services: list[Any],
291
+ ) -> None:
292
+ """Cache manifests keyed by contract_id."""
293
+ if not manifests:
294
+ return
295
+ try:
296
+ _MANIFEST_CACHE_DIR.mkdir(parents=True, exist_ok=True)
297
+ # Build name→contract_id map
298
+ cid_map = {
299
+ s.name: s.contract_id
300
+ for s in services
301
+ if hasattr(s, "contract_id") and s.contract_id
302
+ }
303
+ for name, manifest in manifests.items():
304
+ cid = cid_map.get(name, "")
305
+ if not cid:
306
+ continue
307
+ cache_file = _MANIFEST_CACHE_DIR / f"{cid}.json"
308
+ cache_file.write_text(
309
+ json.dumps(manifest, indent=2, default=str),
310
+ encoding="utf-8",
311
+ )
312
+ except Exception:
313
+ pass # caching is best-effort
314
+
315
+
316
+ def _prune_manifest_cache() -> None:
317
+ """Remove cached manifests older than 30 days."""
318
+ if not _MANIFEST_CACHE_DIR.exists():
319
+ return
320
+ cutoff = time.time() - (_MANIFEST_CACHE_MAX_AGE_DAYS * 86400)
321
+ try:
322
+ for f in _MANIFEST_CACHE_DIR.iterdir():
323
+ if f.suffix == ".json" and f.stat().st_mtime < cutoff:
324
+ f.unlink(missing_ok=True)
325
+ except Exception:
326
+ pass
327
+
328
+
329
+ # ── Connection adapter ────────────────────────────────────────────────────────
330
+
331
+ class PeerConnection:
332
+ """Adapter wrapping a real Aster peer connection for shell use.
333
+
334
+ Handles:
335
+ - Consumer admission handshake (trusted or open-gate)
336
+ - Service discovery from admission response
337
+ - Blob operations via BlobsClient
338
+ - RPC invocation via IrohTransport
339
+ - All four streaming patterns
340
+ """
341
+
342
+ def __init__(self, peer_addr: str, rcan_path: str | None = None) -> None:
343
+ self._peer_addr = peer_addr
344
+ self._rcan_path = rcan_path
345
+ self._aster_client: Any = None
346
+ self._ep: Any = None
347
+ self._blobs: Any = None
348
+ self._services: list[Any] = [] # ServiceSummary objects
349
+ self._manifests: dict[str, dict[str, Any]] = {} # service name → manifest dict
350
+ self._rpc_conns: dict[str, Any] = {} # channel addr → IrohConnection
351
+ self._transports: dict[str, Any] = {} # service name → IrohTransport
352
+ self._type_factory: Any = None # DynamicTypeFactory for typeless invocation
353
+ self._registry_doc: Any = None # DocHandle for the registry doc
354
+ self._registry_event_rx: Any = None # DocEventReceiver for live events
355
+ self._artifact_refs: dict[str, dict[str, Any]] = {} # contract_id → ArtifactRef dict
356
+
357
+ async def connect(self) -> None:
358
+ """Connect to the peer via consumer admission, then fetch contract manifests."""
359
+ from aster import AsterClient
360
+ from aster.config import AsterConfig
361
+
362
+ # Identity resolution for the shell's QUIC node:
363
+ #
364
+ # 1. If --rcan is provided, the user has their own identity.
365
+ # Load secret_key from the .aster-identity in CWD (which
366
+ # was created by `aster enroll node`). The RCAN credential's
367
+ # endpoint_id must match this key.
368
+ #
369
+ # 2. If no --rcan, use the root private key (if available).
370
+ # This gives the shell the root owner's identity -- free auth.
371
+ #
372
+ # 3. Fall back to ephemeral identity (dev mode).
373
+ config = AsterConfig.from_env()
374
+ config.storage_path = None
375
+
376
+ if self._rcan_path:
377
+ # Look for .aster-identity alongside the credential file.
378
+ # `aster enroll node` creates them as a pair, so the directory
379
+ # containing the .cred is the most reliable place to find the
380
+ # matching identity. Fall back to CWD if not present there.
381
+ cred_dir = os.path.dirname(os.path.abspath(self._rcan_path))
382
+ paired = os.path.join(cred_dir, ".aster-identity")
383
+ if not config.identity_file or not os.path.exists(config.identity_file):
384
+ if os.path.exists(paired):
385
+ config.identity_file = paired
386
+ else:
387
+ # No credential -- use root key or ephemeral
388
+ config.identity_file = "/dev/null/.aster-identity"
389
+ root_secret = _load_root_secret_key(config)
390
+ config.secret_key = root_secret
391
+
392
+ self._aster_client = AsterClient(
393
+ config=config,
394
+ endpoint_addr=self._peer_addr,
395
+ enrollment_credential_file=self._rcan_path,
396
+ )
397
+ await self._aster_client.connect()
398
+ self._services = list(self._aster_client.services)
399
+
400
+ # Fetch manifests in the background -- the shell is usable immediately
401
+ # with basic service info from the admission response. Manifests add
402
+ # rich metadata (field types, descriptions) and are merged in when ready.
403
+ self._manifest_task = asyncio.create_task(self._fetch_manifests_background())
404
+
405
+ async def wait_for_manifests(self, timeout: float = 20.0) -> None:
406
+ """Wait for background manifest fetch to complete (for MCP / non-interactive use)."""
407
+ if hasattr(self, "_manifest_task") and not self._manifest_task.done():
408
+ try:
409
+ await asyncio.wait_for(self._manifest_task, timeout=timeout)
410
+ except asyncio.TimeoutError:
411
+ pass
412
+
413
+ async def _fetch_manifests_background(self) -> None:
414
+ """Fetch manifests and synthesize types, logging errors instead of raising.
415
+
416
+ Uses a local cache (~/.aster/cache/manifests/) keyed by contract_id.
417
+ Cached manifests older than 30 days are pruned on each run.
418
+ """
419
+ try:
420
+ # Prune stale cache entries
421
+ _prune_manifest_cache()
422
+
423
+ # Try loading from cache first
424
+ cached = _load_cached_manifests(self._services)
425
+ if cached:
426
+ self._manifests.update(cached)
427
+ import logging
428
+ logging.getLogger(__name__).debug(
429
+ "Loaded %d manifests from cache", len(cached),
430
+ )
431
+
432
+ # Fetch any missing manifests from the network
433
+ missing = [s for s in self._services if s.name not in self._manifests]
434
+ if missing:
435
+ await self._fetch_manifests()
436
+
437
+ # Cache newly fetched manifests
438
+ _save_manifests_to_cache(self._manifests, self._services)
439
+
440
+ self._synthesize_types()
441
+ except Exception:
442
+ import logging
443
+ logging.getLogger(__name__).debug(
444
+ "Background manifest fetch failed", exc_info=True,
445
+ )
446
+
447
+ async def _fetch_manifests(self) -> None:
448
+ """Fetch manifest.json for each service from the blob store.
449
+
450
+ The registry doc (accessed via registry_namespace) maps contract_ids
451
+ to ArtifactRefs which point to blob collections containing manifest.json.
452
+ If registry is unavailable, manifests will be empty (basic service info only).
453
+ """
454
+ import json as _json
455
+ import logging
456
+
457
+ logger = logging.getLogger(__name__)
458
+ namespace = self._aster_client.registry_namespace if self._aster_client else ""
459
+
460
+ if not namespace or not self._aster_client._node:
461
+ logger.debug("No registry namespace or node -- skipping manifest fetch")
462
+ return
463
+
464
+ try:
465
+ from aster import blobs_client, docs_client
466
+ from aster.contract.publication import fetch_from_collection
467
+ from aster.registry.keys import contract_key
468
+
469
+ bc = blobs_client(self._aster_client._node)
470
+ dc = docs_client(self._aster_client._node)
471
+
472
+ # Join the registry doc (read-only) and wait for initial sync
473
+ remote_node_id = self._get_remote_node_id() or ""
474
+ doc, event_receiver = await dc.join_and_subscribe_namespace(
475
+ namespace, remote_node_id
476
+ )
477
+ self._registry_doc = doc
478
+ self._registry_event_rx = event_receiver
479
+
480
+ # Wait for sync with retry -- the doc needs to pull entries from
481
+ # the producer. We try up to 3 times with increasing timeouts.
482
+ import asyncio as _asyncio
483
+
484
+ sync_done = False
485
+ for attempt in range(3):
486
+ timeout = 3.0 * (attempt + 1) # 3s, 6s, 9s
487
+ try:
488
+ deadline = _asyncio.get_event_loop().time() + timeout
489
+ while _asyncio.get_event_loop().time() < deadline:
490
+ remaining = deadline - _asyncio.get_event_loop().time()
491
+ if remaining <= 0:
492
+ break
493
+ try:
494
+ event = await _asyncio.wait_for(
495
+ event_receiver.recv(), timeout=min(remaining, 1.0)
496
+ )
497
+ kind = event.kind if hasattr(event, "kind") else str(event)
498
+ logger.debug("Registry doc event: %s", kind)
499
+ if kind == "sync_finished":
500
+ await _asyncio.sleep(0.3)
501
+ sync_done = True
502
+ break
503
+ except _asyncio.TimeoutError:
504
+ test_entries = await doc.query_key_prefix(b"contracts/")
505
+ if test_entries:
506
+ logger.debug("Entries found before sync_finished")
507
+ sync_done = True
508
+ break
509
+ except Exception as exc:
510
+ logger.debug("Sync wait attempt %d interrupted: %s", attempt + 1, exc)
511
+
512
+ if sync_done:
513
+ break
514
+ logger.debug("Sync attempt %d timed out, retrying...", attempt + 1)
515
+
516
+ for svc in self._services:
517
+ try:
518
+ # Fast path: read the manifest directly from the doc
519
+ # via the ``manifests/{contract_id}`` shortcut. Both the
520
+ # Python and TypeScript producers write this entry alongside
521
+ # the ArtifactRef -- it lets the shell get method schemas
522
+ # without a blob-collection round-trip and works even when
523
+ # the blob store sync is slow or broken (e.g. cross-language
524
+ # peers where doc sync is the only reliable transport).
525
+ manifest_key = f"manifests/{svc.contract_id}".encode()
526
+ shortcut_entries = await doc.query_key_exact(manifest_key)
527
+ if shortcut_entries:
528
+ try:
529
+ manifest_bytes = await doc.read_entry_content(
530
+ shortcut_entries[0].content_hash
531
+ )
532
+ manifest = _json.loads(manifest_bytes)
533
+ self._manifests[svc.name] = manifest
534
+ logger.debug(
535
+ "Fetched manifest shortcut for %s: %d methods",
536
+ svc.name,
537
+ len(manifest.get("methods", [])),
538
+ )
539
+ continue
540
+ except Exception as exc:
541
+ logger.debug(
542
+ "Manifest shortcut failed for %s: %s -- "
543
+ "falling back to blob collection",
544
+ svc.name,
545
+ exc,
546
+ )
547
+
548
+ # Slow path: read ArtifactRef and download the collection
549
+ key = contract_key(svc.contract_id)
550
+ entries = await doc.query_key_exact(key)
551
+ if not entries:
552
+ logger.debug("No ArtifactRef found for %s", svc.name)
553
+ continue
554
+
555
+ # Read the ArtifactRef JSON from the doc entry content
556
+ entry = entries[0]
557
+ content = await doc.read_entry_content(entry.content_hash)
558
+ artifact = _json.loads(content)
559
+ self._artifact_refs[svc.contract_id] = artifact
560
+ collection_hash = artifact.get("collection_hash", "")
561
+
562
+ if not collection_hash:
563
+ logger.debug("No collection_hash for %s", svc.name)
564
+ continue
565
+
566
+ # Download collection by hash from the remote peer.
567
+ # We already know the remote node_id from the connection.
568
+ remote_node_id = self._get_remote_node_id()
569
+ if remote_node_id:
570
+ try:
571
+ files = await bc.download_collection_hash(
572
+ collection_hash, remote_node_id
573
+ )
574
+ total_size = sum(len(data) for _, data in files)
575
+ artifact["size"] = total_size
576
+ for name, data in files:
577
+ if name == "manifest.json":
578
+ manifest = _json.loads(data)
579
+ self._manifests[svc.name] = manifest
580
+ logger.debug(
581
+ "Fetched manifest for %s: %d methods",
582
+ svc.name,
583
+ len(manifest.get("methods", [])),
584
+ )
585
+ break
586
+ except Exception as dl_exc:
587
+ logger.debug("Collection download failed for %s: %s", svc.name, dl_exc)
588
+ else:
589
+ # Fallback: try reading from local store
590
+ manifest_bytes = await fetch_from_collection(
591
+ bc, collection_hash, "manifest.json"
592
+ )
593
+ if manifest_bytes:
594
+ manifest = _json.loads(manifest_bytes)
595
+ self._manifests[svc.name] = manifest
596
+ except Exception as exc:
597
+ logger.debug("Failed to fetch manifest for %s: %s", svc.name, exc)
598
+
599
+ except Exception as exc:
600
+ logger.debug("Registry manifest fetch failed: %s", exc)
601
+
602
+ def _get_remote_node_id(self) -> str | None:
603
+ """Get the remote peer's endpoint ID (node_id hex)."""
604
+ try:
605
+ from aster.runtime import _coerce_node_addr
606
+ addr = _coerce_node_addr(self._aster_client._endpoint_addr_in)
607
+ return addr.endpoint_id or None
608
+ except Exception:
609
+ return None
610
+
611
+ async def list_services(self) -> list[dict[str, Any]]:
612
+ """List services from admission, enriched with manifest data."""
613
+ results = []
614
+ for svc in self._services:
615
+ manifest = self._manifests.get(svc.name, {})
616
+ svc_dict: dict[str, Any] = {
617
+ "name": svc.name,
618
+ "version": svc.version,
619
+ "contract_id": svc.contract_id,
620
+ "scoped": getattr(svc, "pattern", None) or manifest.get("scoped", "shared"),
621
+ "channels": svc.channels if hasattr(svc, "channels") else {},
622
+ }
623
+
624
+ # Include methods from manifest if available
625
+ if "methods" in manifest:
626
+ svc_dict["methods"] = manifest["methods"]
627
+
628
+ results.append(svc_dict)
629
+ return results
630
+
631
+ async def get_contract(self, service_name: str) -> dict[str, Any] | None:
632
+ """Get contract details for a service.
633
+
634
+ Returns manifest data if available, otherwise basic admission info.
635
+ """
636
+ for svc in self._services:
637
+ if svc.name == service_name:
638
+ manifest = self._manifests.get(service_name, {})
639
+ return {
640
+ "name": svc.name,
641
+ "version": svc.version,
642
+ "contract_id": svc.contract_id,
643
+ "methods": manifest.get("methods", []),
644
+ "types": [
645
+ {"name": m.get("request_type", "?"), "hash": ""}
646
+ for m in manifest.get("methods", [])
647
+ if m.get("request_type")
648
+ ],
649
+ }
650
+ return None
651
+
652
+ def get_manifests(self) -> dict[str, dict[str, Any]]:
653
+ """Get all fetched manifests: service_name -> manifest dict."""
654
+ return dict(self._manifests)
655
+
656
+ def get_peer_display(self) -> str:
657
+ """Get the peer's display name (handle or endpoint_id prefix)."""
658
+ try:
659
+ from aster.runtime import _coerce_node_addr
660
+ addr = _coerce_node_addr(self._aster_client._endpoint_addr_in)
661
+ return addr.endpoint_id[:8] if addr.endpoint_id else "unknown"
662
+ except Exception:
663
+ return "unknown"
664
+
665
+ async def list_blobs(self) -> list[dict[str, Any]]:
666
+ """List blobs: tags from the local store + collection entries from manifests."""
667
+ results: list[dict[str, Any]] = []
668
+ if not self._aster_client or not self._aster_client._node:
669
+ return results
670
+
671
+ from aster import blobs_client
672
+ bc = blobs_client(self._aster_client._node)
673
+
674
+ # 1) Named tags (GC-protected blobs)
675
+ try:
676
+ tags = await bc.tag_list()
677
+ for t in tags:
678
+ results.append({
679
+ "hash": t.hash,
680
+ "size": 0,
681
+ "tag": t.name,
682
+ "source": "tag",
683
+ })
684
+ except Exception:
685
+ pass
686
+
687
+ # 2) Collection entries from artifact refs (contract blobs)
688
+ seen = {r["hash"] for r in results}
689
+ for contract_id, artifact in self._artifact_refs.items():
690
+ coll_hash = artifact.get("collection_hash", "")
691
+ if not coll_hash or coll_hash in seen:
692
+ continue
693
+ # Find service name + version for this contract
694
+ svc = next(
695
+ (s for s in self._services if s.contract_id == contract_id),
696
+ None,
697
+ )
698
+ svc_name = svc.name if svc else contract_id[:12]
699
+ svc_ver = svc.version if svc else 1
700
+ results.append({
701
+ "hash": coll_hash,
702
+ "size": artifact.get("size", 0),
703
+ "tag": f"{svc_name}.v{svc_ver}",
704
+ "source": "collection",
705
+ "is_collection": True,
706
+ })
707
+ seen.add(coll_hash)
708
+
709
+ return results
710
+
711
+ async def list_collection_entries(self, collection_hash: str) -> list[dict[str, Any]]:
712
+ """List entries inside a HashSeq collection."""
713
+ if not self._aster_client or not self._aster_client._node:
714
+ return []
715
+ from aster import blobs_client
716
+ bc = blobs_client(self._aster_client._node)
717
+ try:
718
+ entries = await bc.list_collection(collection_hash)
719
+ return [
720
+ {"name": name, "hash": h, "size": size}
721
+ for name, h, size in entries
722
+ ]
723
+ except Exception:
724
+ return []
725
+
726
+ async def read_blob(self, blob_hash: str) -> bytes:
727
+ """Read blob content by hash."""
728
+ if self._aster_client and self._aster_client._node:
729
+ from aster import blobs_client
730
+ bc = blobs_client(self._aster_client._node)
731
+ return await bc.read_to_bytes(blob_hash)
732
+ raise RuntimeError("blob reading not available")
733
+
734
+ def _synthesize_types(self) -> None:
735
+ """Create dynamic dataclasses from manifest method schemas.
736
+
737
+ These are wire-compatible with the producer's types -- same
738
+ @wire_type tag, same field names. Enables invocation without
739
+ having the producer's Python types locally installed.
740
+ """
741
+ from aster.dynamic import DynamicTypeFactory
742
+ import logging
743
+
744
+ logger = logging.getLogger(__name__)
745
+ self._type_factory = DynamicTypeFactory()
746
+
747
+ for svc_name, manifest in self._manifests.items():
748
+ methods = manifest.get("methods", [])
749
+ self._type_factory.register_from_manifest(methods)
750
+
751
+ if self._type_factory.type_count > 0:
752
+ logger.debug(
753
+ "Synthesized %d dynamic types from manifests",
754
+ self._type_factory.type_count,
755
+ )
756
+
757
+ async def _get_transport(self, service_name: str) -> Any:
758
+ """Get or create an IrohTransport for a service."""
759
+ if service_name in self._transports:
760
+ return self._transports[service_name]
761
+
762
+ from aster import IrohTransport, ForyCodec
763
+ from aster.rpc_types import SerializationMode
764
+
765
+ # Find the service summary
766
+ summary = None
767
+ for svc in self._services:
768
+ if svc.name == service_name:
769
+ summary = svc
770
+ break
771
+ if summary is None:
772
+ raise RuntimeError(f"service {service_name!r} not found on this peer")
773
+
774
+ # Get RPC connection for the service's channel
775
+ channel_addr = None
776
+ for _name, addr in summary.channels.items():
777
+ channel_addr = addr
778
+ break
779
+ if channel_addr is None:
780
+ raise RuntimeError(f"service {service_name!r} has no RPC channel")
781
+
782
+ if channel_addr not in self._rpc_conns:
783
+ conn = await self._aster_client._rpc_conn_for(channel_addr)
784
+ self._rpc_conns[channel_addr] = conn
785
+
786
+ conn = self._rpc_conns[channel_addr]
787
+
788
+ # If the peer advertises JSON-only (e.g. the TypeScript binding,
789
+ # whose Fory implementation is not yet XLANG-compliant), use the
790
+ # JSON proxy codec -- otherwise the Fory client would send bytes
791
+ # the server can't decode and the call would fail with the opaque
792
+ # "Expected RpcStatus, got NoneType". Mirrors the auto-selection
793
+ # in AsterClient._rpc_for_service.
794
+ codec = None
795
+ modes = list(getattr(summary, "serialization_modes", None) or [])
796
+ if modes and "xlang" not in modes and "json" in modes:
797
+ codec = JsonProxyCodec()
798
+ elif self._type_factory and self._type_factory.type_count > 0:
799
+ try:
800
+ codec = ForyCodec(
801
+ mode=SerializationMode.XLANG,
802
+ types=self._type_factory.get_all_types(),
803
+ )
804
+ except Exception as exc:
805
+ import logging
806
+ logging.getLogger(__name__).debug("Dynamic codec failed: %s", exc)
807
+
808
+ transport = IrohTransport(conn, codec=codec)
809
+ self._transports[service_name] = transport
810
+ return transport
811
+
812
+ def _get_method_meta(self, service: str, method: str) -> dict[str, Any] | None:
813
+ """Look up method metadata from the manifest."""
814
+ manifest = self._manifests.get(service, {})
815
+ for m in manifest.get("methods", []):
816
+ if m["name"] == method:
817
+ return m
818
+ return None
819
+
820
+ def _check_session_scope(self, service: str, method: str) -> None:
821
+ """Raise a clear error if the service is session-scoped.
822
+
823
+ Session-scoped services need a persistent session stream -- the shell's
824
+ per-call invocation model can't drive them. Use a generated typed
825
+ client instead.
826
+ """
827
+ manifest = self._manifests.get(service, {})
828
+ scoped = manifest.get("scoped", "shared")
829
+ if scoped in ("session", "stream"):
830
+ raise RuntimeError(
831
+ f"{service}.{method}: {service!r} is session-scoped and cannot be "
832
+ f"invoked from the shell. Session-scoped services need a persistent "
833
+ f"session stream that holds per-connection state. Use a generated "
834
+ f"typed client instead:\n"
835
+ f" aster contract gen-client <address> --out ./clients --package my_app --lang python\n"
836
+ f" # then in Python:\n"
837
+ f" from my_app.services.{service.lower()}_v1 import {service}Client\n"
838
+ f" stub = await {service}Client.from_connection(client)"
839
+ )
840
+
841
+ async def open_session(self, service: str) -> Any:
842
+ """Open a persistent session against a session-scoped service.
843
+
844
+ Returns a ``SessionProxyClient`` that multiplexes calls over a
845
+ single bidi stream. The caller is responsible for closing it via
846
+ :meth:`close_session` (or the session's own ``close()`` method).
847
+
848
+ Used by the shell's ``session <ServiceName>`` subshell to route
849
+ method invocations through the same persistent stream so per-agent
850
+ state survives across calls.
851
+ """
852
+ if not self._aster_client:
853
+ raise RuntimeError("not connected")
854
+ return await self._aster_client.session(service)
855
+
856
+ async def close_session(self, session: Any) -> None:
857
+ """Close a session opened via :meth:`open_session`."""
858
+ if session is None:
859
+ return
860
+ try:
861
+ await session.close()
862
+ except Exception:
863
+ pass
864
+
865
+ def _build_typed_request(
866
+ self,
867
+ transport: Any,
868
+ service: str,
869
+ method: str,
870
+ payload: dict[str, Any],
871
+ ) -> Any:
872
+ """Convert a dict payload into a typed Fory dataclass when needed.
873
+
874
+ The Fory codec expects @wire_type-decorated dataclass instances,
875
+ not raw dicts. When the transport uses Fory (Python servers), we
876
+ synthesize the right typed instance from the manifest's field
877
+ descriptors via ``DynamicTypeFactory``. When the transport uses
878
+ ``JsonProxyCodec`` (TS servers), we leave the dict alone -- the
879
+ JSON codec sends keys verbatim.
880
+
881
+ This was originally only called from ``invoke()`` for unary
882
+ calls; the streaming methods (server_stream, client_stream,
883
+ bidi_stream) used to pass raw dicts unconditionally, which
884
+ worked for TS servers but tripped Fory's "expected a typed
885
+ object but received a dict" against Python servers. Now all
886
+ four invocation paths share this helper.
887
+ """
888
+ if not isinstance(payload, dict):
889
+ return payload # already typed (e.g. user passed a dataclass)
890
+ if not self._type_factory:
891
+ return payload
892
+ if isinstance(getattr(transport, "_codec", None), JsonProxyCodec):
893
+ return payload # JSON path takes raw dicts
894
+ meta = self._get_method_meta(service, method)
895
+ if not meta or not meta.get("request_wire_tag"):
896
+ return payload
897
+ try:
898
+ return self._type_factory.build_request(meta, payload)
899
+ except Exception:
900
+ return payload # fall back to dict; Fory will give the same error
901
+
902
+ async def invoke(
903
+ self, service: str, method: str, payload: dict[str, Any]
904
+ ) -> Any:
905
+ """Invoke a unary RPC.
906
+
907
+ If dynamic types are available (from manifest), builds a typed
908
+ request from the dict payload. Otherwise passes the dict directly.
909
+ """
910
+ self._check_session_scope(service, method)
911
+ transport = await self._get_transport(service)
912
+ request = self._build_typed_request(transport, service, method, payload)
913
+ return await transport.unary(service, method, request)
914
+
915
+ async def server_stream(
916
+ self, service: str, method: str, payload: dict[str, Any]
917
+ ) -> Any:
918
+ """Start a server-streaming RPC."""
919
+ self._check_session_scope(service, method)
920
+ transport = await self._get_transport(service)
921
+ request = self._build_typed_request(transport, service, method, payload)
922
+ return transport.server_stream(service, method, request)
923
+
924
+ async def client_stream(
925
+ self, service: str, method: str, values: list[Any]
926
+ ) -> Any:
927
+ """Send a client-streaming RPC.
928
+
929
+ Each input dict is converted to a typed Fory dataclass when the
930
+ transport uses Fory; JSON-codec transports pass dicts through.
931
+ """
932
+ self._check_session_scope(service, method)
933
+ transport = await self._get_transport(service)
934
+
935
+ typed_values = [
936
+ self._build_typed_request(transport, service, method, v)
937
+ for v in values
938
+ ]
939
+
940
+ async def _iter():
941
+ for v in typed_values:
942
+ yield v
943
+
944
+ return await transport.client_stream(service, method, _iter())
945
+
946
+ def bidi_stream(
947
+ self, service: str, method: str, values: Any
948
+ ) -> Any:
949
+ """Start a bidi-streaming RPC.
950
+
951
+ Note: bidi_stream returns a BidiChannel, not an async iterator.
952
+ The invoker handles the read/write loop and is responsible for
953
+ calling :meth:`build_typed_request_for_bidi` on each outgoing
954
+ value before sending it -- bidi can't pre-convert because the
955
+ values are produced lazily.
956
+ """
957
+ self._check_session_scope(service, method)
958
+
959
+ # We need async transport setup, so return a coroutine wrapper
960
+ async def _start():
961
+ transport = await self._get_transport(service)
962
+ return transport.bidi_stream(service, method)
963
+ return _start()
964
+
965
+ def build_typed_request_for_bidi(
966
+ self, service: str, method: str, value: Any
967
+ ) -> Any:
968
+ """Public helper for the bidi invoker loop.
969
+
970
+ Bidi streams produce values lazily on a separate task, so we
971
+ can't pre-convert them like server/client streams. The invoker
972
+ calls this on each outgoing value just before pushing it onto
973
+ the bidi channel.
974
+ """
975
+ transport = self._transports.get(service)
976
+ if transport is None:
977
+ return value # transport not yet built; the underlying call will fail anyway
978
+ return self._build_typed_request(transport, service, method, value)
979
+
980
+ async def list_doc_entries(self) -> list[dict[str, Any]]:
981
+ """List all entries in the registry doc."""
982
+ if not self._registry_doc:
983
+ return []
984
+ try:
985
+ entries = await self._registry_doc.query_key_prefix(b"")
986
+ results = []
987
+ for e in entries:
988
+ key_str = e.key.decode("utf-8", errors="replace")
989
+ results.append({
990
+ "key": key_str,
991
+ "author": e.author_id[:12] + "…",
992
+ "hash": e.content_hash[:16] + "…",
993
+ "size": e.content_len,
994
+ "timestamp": e.timestamp,
995
+ })
996
+ return results
997
+ except Exception:
998
+ return []
999
+
1000
+ async def read_doc_entry(self, key: str) -> bytes | None:
1001
+ """Read content of a registry doc entry by key."""
1002
+ if not self._registry_doc:
1003
+ return None
1004
+ try:
1005
+ entries = await self._registry_doc.query_key_exact(key.encode("utf-8"))
1006
+ if not entries:
1007
+ return None
1008
+ return await self._registry_doc.read_entry_content(entries[0].content_hash)
1009
+ except Exception:
1010
+ return None
1011
+
1012
+ async def subscribe_gossip(self) -> Any:
1013
+ """Subscribe to the producer mesh gossip topic.
1014
+
1015
+ The gossip topic is returned by the producer during consumer
1016
+ admission -- but only when the connecting node is the root key
1017
+ holder. Returns a GossipTopicHandle or raises.
1018
+ """
1019
+ if not self._aster_client or not self._aster_client._node:
1020
+ raise RuntimeError("not connected")
1021
+
1022
+ topic_hex = self._aster_client.gossip_topic
1023
+ if not topic_hex:
1024
+ raise RuntimeError(
1025
+ "gossip topic not available -- the producer only shares it "
1026
+ "with the root node. Connect with the root key to access gossip."
1027
+ )
1028
+
1029
+ topic_bytes = bytes.fromhex(topic_hex)
1030
+
1031
+ from aster import gossip_client
1032
+
1033
+ # Need bootstrap peers -- the producer we connected to
1034
+ bootstrap = []
1035
+ if self._aster_client._endpoint_addr_in:
1036
+ from aster.runtime import _coerce_node_addr
1037
+ addr = _coerce_node_addr(self._aster_client._endpoint_addr_in)
1038
+ if addr.endpoint_id:
1039
+ bootstrap.append(addr.endpoint_id)
1040
+
1041
+ gc = gossip_client(self._aster_client._node)
1042
+ return await gc.subscribe(list(topic_bytes), bootstrap)
1043
+
1044
+ async def close(self) -> None:
1045
+ """Close the connection and clean up.
1046
+
1047
+ Fire-and-forget -- the shell doesn't need to wait for graceful
1048
+ QUIC teardown. The OS reclaims sockets on process exit anyway.
1049
+ """
1050
+ # Cancel background manifest fetch if still running
1051
+ if hasattr(self, "_manifest_task") and not self._manifest_task.done():
1052
+ self._manifest_task.cancel()
1053
+
1054
+ self._transports.clear()
1055
+ self._rpc_conns.clear()
1056
+
1057
+ # Don't await graceful shutdown -- just let the process exit.
1058
+ # AsterClient.close() does node.shutdown() which waits for
1059
+ # iroh protocols to drain, but the shell doesn't need that.
1060
+
1061
+
1062
+ # ── Offline / demo mode ──────────────────────────────────────────────────────
1063
+
1064
+ class DemoConnection:
1065
+ """Offline demo connection for testing the shell without a live peer.
1066
+
1067
+ Provides sample services and blobs for exploring the shell UX.
1068
+ """
1069
+
1070
+ async def connect(self) -> None:
1071
+ pass
1072
+
1073
+ async def list_services(self) -> list[dict[str, Any]]:
1074
+ return [
1075
+ {
1076
+ "name": "HelloWorld",
1077
+ "version": 1,
1078
+ "scoped": "shared",
1079
+ "methods": [
1080
+ {
1081
+ "name": "sayHello",
1082
+ "pattern": "unary",
1083
+ "request_type": "HelloRequest",
1084
+ "response_type": "HelloResponse",
1085
+ "timeout": 30.0,
1086
+ "fields": [
1087
+ {"name": "name", "type": "str", "required": True},
1088
+ {"name": "greeting", "type": "str", "required": False, "default": "Hello"},
1089
+ ],
1090
+ },
1091
+ {
1092
+ "name": "streamGreetings",
1093
+ "pattern": "server_stream",
1094
+ "request_type": "StreamRequest",
1095
+ "response_type": "HelloResponse",
1096
+ "timeout": None,
1097
+ "fields": [
1098
+ {"name": "names", "type": "list[str]", "required": True},
1099
+ ],
1100
+ },
1101
+ ],
1102
+ },
1103
+ {
1104
+ "name": "FileStore",
1105
+ "version": 2,
1106
+ "scoped": "shared",
1107
+ "methods": [
1108
+ {"name": "get", "pattern": "unary", "request_type": "GetRequest", "response_type": "FileData"},
1109
+ {"name": "put", "pattern": "unary", "request_type": "PutRequest", "response_type": "PutResponse"},
1110
+ {"name": "list", "pattern": "server_stream", "request_type": "ListRequest", "response_type": "FileInfo"},
1111
+ {"name": "upload", "pattern": "client_stream", "request_type": "Chunk", "response_type": "UploadResult"},
1112
+ {"name": "sync", "pattern": "bidi_stream", "request_type": "SyncMessage", "response_type": "SyncMessage"},
1113
+ ],
1114
+ },
1115
+ {
1116
+ "name": "Analytics",
1117
+ "version": 1,
1118
+ "scoped": "session",
1119
+ "methods": [
1120
+ {"name": "getMetrics", "pattern": "unary", "request_type": "MetricsQuery", "response_type": "MetricsResult"},
1121
+ {"name": "watchMetrics", "pattern": "server_stream", "request_type": "WatchRequest", "response_type": "MetricEvent"},
1122
+ {"name": "ingest", "pattern": "client_stream", "request_type": "DataPoint", "response_type": "IngestSummary"},
1123
+ ],
1124
+ },
1125
+ ]
1126
+
1127
+ async def get_contract(self, service_name: str) -> dict[str, Any] | None:
1128
+ services = await self.list_services()
1129
+ for svc in services:
1130
+ if svc["name"] == service_name:
1131
+ return {
1132
+ "name": svc["name"],
1133
+ "version": svc["version"],
1134
+ "contract_id": "a1b2c3d4e5f6…",
1135
+ "methods": svc["methods"],
1136
+ "types": [
1137
+ {"name": m.get("request_type", "?"), "hash": "aabbccdd…"}
1138
+ for m in svc["methods"]
1139
+ if m.get("request_type")
1140
+ ],
1141
+ }
1142
+ return None
1143
+
1144
+ async def list_blobs(self) -> list[dict[str, Any]]:
1145
+ return [
1146
+ {"hash": "abc123def456789012345678", "size": 0, "tag": "HelloWorld.v1", "source": "collection", "is_collection": True},
1147
+ ]
1148
+
1149
+ async def list_collection_entries(self, collection_hash: str) -> list[dict[str, Any]]:
1150
+ return [
1151
+ {"name": "manifest.json", "hash": "deadbeef0123456789abcdef", "size": 340},
1152
+ {"name": "contract.bin", "hash": "cafebabe9876543210fedcba", "size": 1258291},
1153
+ {"name": "types/HelloRequest.bin", "hash": "1234567890abcdef12345678", "size": 128},
1154
+ ]
1155
+
1156
+ async def list_doc_entries(self) -> list[dict[str, Any]]:
1157
+ return [
1158
+ {"key": "contracts/a1b2c3d4…", "author": "7f3e9a1b2c…", "hash": "abc123def456…", "size": 256, "timestamp": 1712567890000},
1159
+ {"key": "manifests/a1b2c3d4…", "author": "7f3e9a1b2c…", "hash": "deadbeef0123…", "size": 340, "timestamp": 1712567890000},
1160
+ {"key": "versions/HelloWorld/v1", "author": "7f3e9a1b2c…", "hash": "cafebabe9876…", "size": 64, "timestamp": 1712567890000},
1161
+ ]
1162
+
1163
+ async def read_doc_entry(self, key: str) -> bytes | None:
1164
+ if "manifest" in key:
1165
+ import json as _json
1166
+ return _json.dumps({"service": "HelloWorld", "version": 1, "methods": [{"name": "sayHello"}]}, indent=2).encode()
1167
+ if "versions" in key:
1168
+ return b"a1b2c3d4e5f6"
1169
+ return b'{"contract_id":"a1b2c3d4...","collection_hash":"abc123..."}'
1170
+
1171
+ async def read_blob(self, blob_hash: str) -> bytes:
1172
+ if "deadbeef" in blob_hash:
1173
+ return b"Hello from the mesh!\n\nThis is sample blob content."
1174
+ return b"(binary data placeholder -- connect to a real peer for actual content)"
1175
+
1176
+ async def invoke(
1177
+ self, service: str, method: str, payload: dict[str, Any]
1178
+ ) -> Any:
1179
+ import time
1180
+ # Simulate latency
1181
+ await asyncio.sleep(0.05)
1182
+
1183
+ if service == "HelloWorld" and method == "sayHello":
1184
+ name = payload.get("name", payload.get("_positional", "World"))
1185
+ greeting = payload.get("greeting", "Hello")
1186
+ return {
1187
+ "message": f"{greeting}, {name}!",
1188
+ "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
1189
+ }
1190
+
1191
+ return {"status": "ok", "service": service, "method": method, "args": payload}
1192
+
1193
+ async def server_stream(
1194
+ self, service: str, method: str, payload: dict[str, Any]
1195
+ ) -> Any:
1196
+ async def _gen():
1197
+ import time
1198
+ for i in range(5):
1199
+ await asyncio.sleep(0.1)
1200
+ yield {"index": i, "timestamp": time.strftime("%H:%M:%S")}
1201
+ return _gen()
1202
+
1203
+ async def client_stream(
1204
+ self, service: str, method: str, values: list[Any]
1205
+ ) -> Any:
1206
+ return {"received": len(values), "status": "ok"}
1207
+
1208
+ def bidi_stream(
1209
+ self, service: str, method: str, values: Any
1210
+ ) -> Any:
1211
+ async def _echo():
1212
+ async for v in values:
1213
+ yield {"echo": v}
1214
+ return _echo()
1215
+
1216
+ async def close(self) -> None:
1217
+ pass
1218
+
1219
+
1220
+ # ── Directory demo mode (aster.site) ────────────────────────────────────────
1221
+
1222
+
1223
+ class DirectoryConnection:
1224
+ """Live directory connection backed by the Day 0 @aster service."""
1225
+
1226
+ def __init__(self, address: str) -> None:
1227
+ from aster_cli.join import get_local_identity_state
1228
+
1229
+ self._address = address
1230
+ self._runtime = None
1231
+ self._publication_client = None
1232
+ self._types_mod = None
1233
+ self._handle_cache: dict[str, dict[str, Any]] = {}
1234
+ self._contract_cache: dict[str, dict[str, Any]] = {}
1235
+ state = get_local_identity_state()
1236
+ self.my_handle = state["handle"] or state["display_handle"]
1237
+ self._state = state
1238
+
1239
+ async def connect(self) -> None:
1240
+ from aster_cli.aster_service import open_aster_service
1241
+
1242
+ self._runtime = await open_aster_service(self._address)
1243
+ self._publication_client = await self._runtime.publication_client()
1244
+ self._types_mod = __import__(
1245
+ self._publication_client.__module__.replace(".services.", ".types."),
1246
+ fromlist=["*"],
1247
+ )
1248
+
1249
+ async def close(self) -> None:
1250
+ if self._runtime is not None:
1251
+ await self._runtime.close()
1252
+
1253
+ async def list_handles(self) -> list[dict[str, Any]]:
1254
+ handles: list[dict[str, Any]] = []
1255
+ current_name = self.my_handle if str(self.my_handle).startswith("@") else f"@{self.my_handle}"
1256
+ handles.append(
1257
+ {
1258
+ "handle": current_name,
1259
+ "pubkey_hash": str(self._state.get("root_pubkey", ""))[:12],
1260
+ "registered": bool(self._state.get("handle")),
1261
+ "service_count": len(self._local_manifest_services()),
1262
+ "description": "current user",
1263
+ }
1264
+ )
1265
+
1266
+ if self._publication_client is None or self._types_mod is None:
1267
+ return handles
1268
+
1269
+ result = await self._publication_client.list_directory_handles(
1270
+ self._types_mod.ListDirectoryHandlesRequest(limit=20)
1271
+ )
1272
+ for entry in getattr(result, "handles", []):
1273
+ handle = f"@{getattr(entry, 'handle', '')}".rstrip("@")
1274
+ if not handle or handle == current_name:
1275
+ continue
1276
+ handles.append(
1277
+ {
1278
+ "handle": handle,
1279
+ "pubkey_hash": "",
1280
+ "registered": True,
1281
+ "service_count": getattr(entry, "service_count", 0),
1282
+ "description": getattr(entry, "display_name", None) or "",
1283
+ "registered_at": getattr(entry, "registered_at", ""),
1284
+ "last_published_at": getattr(entry, "last_published_at", None),
1285
+ }
1286
+ )
1287
+ return handles
1288
+
1289
+ async def get_handle_info(self, handle: str) -> dict[str, Any]:
1290
+ normalized = handle.lstrip("@")
1291
+ if normalized in self._handle_cache:
1292
+ return self._handle_cache[normalized]
1293
+
1294
+ services: list[dict[str, Any]] = []
1295
+ if self._publication_client is not None and self._types_mod is not None:
1296
+ try:
1297
+ result = await self._publication_client.list_services(
1298
+ self._types_mod.ListServicesRequest(handle=normalized)
1299
+ )
1300
+ except Exception:
1301
+ result = None
1302
+ for service in getattr(result, "services", []) if result is not None else []:
1303
+ service_name = getattr(service, "service_name", "")
1304
+ try:
1305
+ manifest_result = await self._publication_client.get_manifest(
1306
+ self._types_mod.GetManifestRequest(
1307
+ handle=normalized,
1308
+ service_name=service_name,
1309
+ )
1310
+ )
1311
+ manifest = json.loads(getattr(manifest_result, "manifest_json", "{}") or "{}")
1312
+ except Exception:
1313
+ manifest = {}
1314
+ contract = {
1315
+ "name": service_name,
1316
+ "version": getattr(service, "version", manifest.get("version", 1)),
1317
+ "contract_id": getattr(service, "contract_id", manifest.get("contract_id", "")),
1318
+ "methods": manifest.get("methods", []),
1319
+ "types": [],
1320
+ }
1321
+ svc_record = {
1322
+ "name": service_name,
1323
+ "published": True,
1324
+ "version": getattr(service, "version", manifest.get("version", 1)),
1325
+ "scoped": manifest.get("scoped", "shared"),
1326
+ "description": getattr(service, "description", ""),
1327
+ "endpoints": getattr(service, "endpoint_count", 0),
1328
+ "contract_hash": getattr(service, "contract_id", manifest.get("contract_id", "")),
1329
+ "status": getattr(service, "status", ""),
1330
+ "visibility": getattr(service, "visibility", ""),
1331
+ "delegation_mode": getattr(service, "delegation_mode", ""),
1332
+ "published_at": getattr(service, "published_at", ""),
1333
+ "methods": manifest.get("methods", []),
1334
+ "contract": contract,
1335
+ }
1336
+ self._contract_cache[f"{normalized}/{service_name}"] = contract
1337
+ self._contract_cache[service_name] = contract
1338
+ services.append(svc_record)
1339
+
1340
+ if normalized == str(self._state.get("handle", "")).strip():
1341
+ published_names = {svc["name"] for svc in services}
1342
+ for manifest in self._local_manifest_services():
1343
+ if manifest["name"] in published_names:
1344
+ continue
1345
+ services.append(manifest)
1346
+ self._contract_cache[f"{normalized}/{manifest['name']}"] = manifest["contract"]
1347
+ self._contract_cache[manifest["name"]] = manifest["contract"]
1348
+
1349
+ info = {"readme": "", "services": services}
1350
+ self._handle_cache[normalized] = info
1351
+ return info
1352
+
1353
+ async def get_contract(self, service_name: str) -> dict[str, Any] | None:
1354
+ return self._contract_cache.get(service_name)
1355
+
1356
+ async def list_services(self) -> list[dict[str, Any]]:
1357
+ return []
1358
+
1359
+ async def list_blobs(self) -> list[dict[str, Any]]:
1360
+ return []
1361
+
1362
+ async def read_blob(self, blob_hash: str) -> bytes:
1363
+ return b"(directory mode -- no blob content)"
1364
+
1365
+ async def invoke(self, service: str, method: str, payload: dict[str, Any]) -> Any:
1366
+ return {"status": "not_implemented_in_directory_mode", "service": service, "method": method, "args": payload}
1367
+
1368
+ async def server_stream(self, service: str, method: str, payload: dict[str, Any]) -> Any:
1369
+ async def _gen():
1370
+ if False:
1371
+ yield None
1372
+ return _gen()
1373
+
1374
+ def bidi_stream(self, service: str, method: str, values: Any) -> Any:
1375
+ async def _gen():
1376
+ if False:
1377
+ yield None
1378
+ return _gen()
1379
+
1380
+ def _local_manifest_services(self) -> list[dict[str, Any]]:
1381
+ from aster_cli.profile import get_published_services
1382
+
1383
+ local_services = []
1384
+ published_names = set(get_published_services(self._state["profile"]))
1385
+ manifest_path = Path(".aster/manifest.json")
1386
+ if not manifest_path.exists():
1387
+ return local_services
1388
+
1389
+ try:
1390
+ payload = json.loads(manifest_path.read_text())
1391
+ manifests = payload if isinstance(payload, list) else [payload]
1392
+ for manifest in manifests:
1393
+ service_name = manifest.get("service", "UnknownService")
1394
+ contract = {
1395
+ "name": service_name,
1396
+ "version": manifest.get("version", 1),
1397
+ "contract_id": manifest.get("contract_id", ""),
1398
+ "methods": manifest.get("methods", []),
1399
+ "types": [],
1400
+ }
1401
+ local_services.append(
1402
+ {
1403
+ "name": service_name,
1404
+ "published": service_name in published_names,
1405
+ "version": manifest.get("version", 1),
1406
+ "scoped": manifest.get("scoped", "shared"),
1407
+ "description": "Local manifest",
1408
+ "endpoints": 1 if service_name in published_names else 0,
1409
+ "contract_hash": manifest.get("contract_id", ""),
1410
+ "methods": manifest.get("methods", []),
1411
+ "contract": contract,
1412
+ }
1413
+ )
1414
+ except Exception:
1415
+ return []
1416
+ return local_services
1417
+
1418
+
1419
+ class DirectoryDemoConnection:
1420
+ """Offline demo simulating the aster.site directory experience.
1421
+
1422
+ Shows a browsable hierarchy of handles and published services.
1423
+ """
1424
+
1425
+ # Simulated current user
1426
+ MY_HANDLE = "emrul"
1427
+ MY_PUBKEY_HASH = "a1b2c3d4e5f6"
1428
+
1429
+ # Simulated directory data
1430
+ _HANDLES = [
1431
+ {"handle": "emrul", "pubkey_hash": "a1b2c3d4e5f6", "registered": True},
1432
+ {"handle": "acme-corp", "pubkey_hash": "f7e8d9c0b1a2", "registered": True},
1433
+ {"handle": "alice-dev", "pubkey_hash": "1234abcd5678", "registered": True},
1434
+ {"handle": None, "pubkey_hash": "7f3a2bc9de01", "registered": False},
1435
+ {"handle": None, "pubkey_hash": "9e8d7c6b5a43", "registered": False},
1436
+ ]
1437
+
1438
+ _HANDLE_INFO: dict[str, dict[str, Any]] = {
1439
+ "emrul": {
1440
+ "readme": (
1441
+ "# emrul\n\n"
1442
+ "Building distributed systems with Aster.\n\n"
1443
+ "Services:\n"
1444
+ "- TaskManager -- async task queue for AI agent workflows\n"
1445
+ "- InvoiceService -- invoice lifecycle management\n"
1446
+ ),
1447
+ "services": [
1448
+ {
1449
+ "name": "TaskManager",
1450
+ "published": True,
1451
+ "version": 3,
1452
+ "scoped": "session",
1453
+ "description": "Async task queue for AI agent workflows",
1454
+ "endpoints": 2,
1455
+ "methods": [
1456
+ {
1457
+ "name": "submitTask",
1458
+ "pattern": "unary",
1459
+ "request_type": "TaskRequest",
1460
+ "response_type": "TaskHandle",
1461
+ "timeout": 30.0,
1462
+ "fields": [
1463
+ {"name": "prompt", "type": "str", "required": True},
1464
+ {"name": "priority", "type": "int", "required": False, "default": 0},
1465
+ {"name": "tags", "type": "list[str]", "required": False},
1466
+ ],
1467
+ },
1468
+ {
1469
+ "name": "watchProgress",
1470
+ "pattern": "server_stream",
1471
+ "request_type": "TaskHandle",
1472
+ "response_type": "ProgressEvent",
1473
+ "timeout": None,
1474
+ "fields": [
1475
+ {"name": "task_id", "type": "str", "required": True},
1476
+ ],
1477
+ },
1478
+ {
1479
+ "name": "cancelTask",
1480
+ "pattern": "unary",
1481
+ "request_type": "CancelRequest",
1482
+ "response_type": "CancelResult",
1483
+ "timeout": 10.0,
1484
+ "fields": [
1485
+ {"name": "task_id", "type": "str", "required": True},
1486
+ {"name": "reason", "type": "str", "required": False},
1487
+ ],
1488
+ },
1489
+ ],
1490
+ },
1491
+ {
1492
+ "name": "InvoiceService",
1493
+ "published": False,
1494
+ "version": 1,
1495
+ "scoped": "shared",
1496
+ "contract_hash": "b7c8d9e0f1a2b3c4d5e6f7a8b9c0d1e2",
1497
+ "description": "Invoice lifecycle management",
1498
+ "endpoints": 0,
1499
+ "methods": [
1500
+ {
1501
+ "name": "create",
1502
+ "pattern": "unary",
1503
+ "request_type": "InvoiceRequest",
1504
+ "response_type": "Invoice",
1505
+ },
1506
+ {
1507
+ "name": "list",
1508
+ "pattern": "server_stream",
1509
+ "request_type": "ListFilter",
1510
+ "response_type": "Invoice",
1511
+ },
1512
+ ],
1513
+ },
1514
+ ],
1515
+ },
1516
+ "acme-corp": {
1517
+ "readme": (
1518
+ "# acme-corp\n\n"
1519
+ "Enterprise payment infrastructure.\n"
1520
+ ),
1521
+ "services": [
1522
+ {
1523
+ "name": "PaymentGateway",
1524
+ "published": True,
1525
+ "version": 7,
1526
+ "scoped": "session",
1527
+ "description": "Process payments across multiple providers",
1528
+ "endpoints": 5,
1529
+ "methods": [
1530
+ {
1531
+ "name": "charge",
1532
+ "pattern": "unary",
1533
+ "request_type": "ChargeRequest",
1534
+ "response_type": "ChargeResult",
1535
+ "timeout": 30.0,
1536
+ "fields": [
1537
+ {"name": "amount_cents", "type": "int", "required": True},
1538
+ {"name": "currency", "type": "str", "required": True},
1539
+ {"name": "source_token", "type": "str", "required": True},
1540
+ ],
1541
+ },
1542
+ {
1543
+ "name": "refund",
1544
+ "pattern": "unary",
1545
+ "request_type": "RefundRequest",
1546
+ "response_type": "RefundResult",
1547
+ "timeout": 30.0,
1548
+ },
1549
+ {
1550
+ "name": "watchSettlements",
1551
+ "pattern": "server_stream",
1552
+ "request_type": "SettlementFilter",
1553
+ "response_type": "SettlementEvent",
1554
+ "timeout": None,
1555
+ },
1556
+ ],
1557
+ },
1558
+ {
1559
+ "name": "FraudDetector",
1560
+ "published": True,
1561
+ "version": 2,
1562
+ "scoped": "shared",
1563
+ "description": "Real-time transaction fraud scoring",
1564
+ "endpoints": 3,
1565
+ "methods": [
1566
+ {
1567
+ "name": "score",
1568
+ "pattern": "unary",
1569
+ "request_type": "Transaction",
1570
+ "response_type": "FraudScore",
1571
+ "timeout": 5.0,
1572
+ },
1573
+ {
1574
+ "name": "trainModel",
1575
+ "pattern": "client_stream",
1576
+ "request_type": "TrainingExample",
1577
+ "response_type": "TrainingResult",
1578
+ "timeout": 300.0,
1579
+ },
1580
+ ],
1581
+ },
1582
+ ],
1583
+ },
1584
+ "alice-dev": {
1585
+ "readme": (
1586
+ "# alice-dev\n\n"
1587
+ "AI research tools and agent services.\n"
1588
+ ),
1589
+ "services": [
1590
+ {
1591
+ "name": "DocumentSummarizer",
1592
+ "published": True,
1593
+ "version": 1,
1594
+ "scoped": "session",
1595
+ "description": "Summarize documents using LLM pipelines",
1596
+ "endpoints": 1,
1597
+ "methods": [
1598
+ {
1599
+ "name": "summarize",
1600
+ "pattern": "unary",
1601
+ "request_type": "SummarizeRequest",
1602
+ "response_type": "Summary",
1603
+ "timeout": 60.0,
1604
+ "fields": [
1605
+ {"name": "document", "type": "str", "required": True},
1606
+ {"name": "max_length", "type": "int", "required": False, "default": 500},
1607
+ ],
1608
+ },
1609
+ {
1610
+ "name": "streamSummary",
1611
+ "pattern": "server_stream",
1612
+ "request_type": "SummarizeRequest",
1613
+ "response_type": "SummaryChunk",
1614
+ "timeout": 120.0,
1615
+ },
1616
+ ],
1617
+ },
1618
+ ],
1619
+ },
1620
+ "7f3a2bc9de01": {
1621
+ "readme": "",
1622
+ "services": [
1623
+ {
1624
+ "name": "WeatherAPI",
1625
+ "published": True,
1626
+ "version": 1,
1627
+ "scoped": "shared",
1628
+ "description": "Global weather data service",
1629
+ "endpoints": 1,
1630
+ "methods": [
1631
+ {
1632
+ "name": "getCurrent",
1633
+ "pattern": "unary",
1634
+ "request_type": "LocationQuery",
1635
+ "response_type": "WeatherData",
1636
+ "timeout": 10.0,
1637
+ },
1638
+ ],
1639
+ },
1640
+ ],
1641
+ },
1642
+ "9e8d7c6b5a43": {
1643
+ "readme": "",
1644
+ "services": [
1645
+ {
1646
+ "name": "EchoService",
1647
+ "published": True,
1648
+ "version": 1,
1649
+ "scoped": "shared",
1650
+ "description": "Simple echo for testing connectivity",
1651
+ "endpoints": 1,
1652
+ "methods": [
1653
+ {
1654
+ "name": "echo",
1655
+ "pattern": "unary",
1656
+ "request_type": "EchoRequest",
1657
+ "response_type": "EchoResponse",
1658
+ "timeout": 5.0,
1659
+ },
1660
+ ],
1661
+ },
1662
+ ],
1663
+ },
1664
+ }
1665
+
1666
+ def __init__(self) -> None:
1667
+ from aster_cli.join import get_local_identity_state
1668
+ from aster_cli.profile import get_published_services
1669
+
1670
+ state = get_local_identity_state()
1671
+ self.my_handle = state["handle"] or state["display_handle"].lstrip("@")
1672
+ self.my_pubkey_hash = (state["root_pubkey"] or self.MY_PUBKEY_HASH)[:12]
1673
+ self._handles = copy.deepcopy(self._HANDLES)
1674
+ self._handle_info = copy.deepcopy(self._HANDLE_INFO)
1675
+
1676
+ local_services = []
1677
+ published_names = set(get_published_services(state["profile"]))
1678
+ manifest_path = Path(".aster/manifest.json")
1679
+ if manifest_path.exists():
1680
+ try:
1681
+ import json as _json
1682
+
1683
+ payload = _json.loads(manifest_path.read_text())
1684
+ manifests = payload if isinstance(payload, list) else [payload]
1685
+ for manifest in manifests:
1686
+ methods = []
1687
+ for method in manifest.get("methods", []):
1688
+ methods.append({
1689
+ "name": method.get("name", "?"),
1690
+ "pattern": method.get("pattern", "unary"),
1691
+ "request_type": method.get("request_type", "?"),
1692
+ "response_type": method.get("response_type", "?"),
1693
+ "timeout": method.get("timeout"),
1694
+ "fields": method.get("fields", []),
1695
+ })
1696
+ service_name = manifest.get("service", "UnknownService")
1697
+ local_services.append({
1698
+ "name": service_name,
1699
+ "published": service_name in published_names,
1700
+ "version": manifest.get("version", 1),
1701
+ "scoped": manifest.get("scoped", "shared"),
1702
+ "description": "Local manifest",
1703
+ "endpoints": 1 if service_name in published_names else 0,
1704
+ "contract_hash": manifest.get("contract_id", ""),
1705
+ "methods": methods,
1706
+ })
1707
+ except Exception:
1708
+ local_services = []
1709
+
1710
+ info = self._handle_info.setdefault(self.my_handle, {"readme": "", "services": []})
1711
+ info["services"] = local_services + info.get("services", [])
1712
+ if not any((h.get("handle") or h.get("pubkey_hash")) == self.my_handle for h in self._handles):
1713
+ self._handles.insert(0, {
1714
+ "handle": self.my_handle,
1715
+ "pubkey_hash": self.my_pubkey_hash,
1716
+ "registered": bool(state["handle"]) and state["handle_status"] in {"pending", "verified"},
1717
+ })
1718
+
1719
+ async def connect(self) -> None:
1720
+ pass
1721
+
1722
+ async def list_handles(self) -> list[dict[str, Any]]:
1723
+ return [
1724
+ {
1725
+ "handle": h["handle"] or h["pubkey_hash"],
1726
+ "pubkey_hash": h["pubkey_hash"],
1727
+ "registered": h["registered"],
1728
+ }
1729
+ for h in self._handles
1730
+ ]
1731
+
1732
+ async def get_handle_info(self, handle: str) -> dict[str, Any]:
1733
+ return self._handle_info.get(handle, {"readme": "", "services": []})
1734
+
1735
+ # -- These support drill-down into services (reuses demo logic) --
1736
+
1737
+ async def list_services(self) -> list[dict[str, Any]]:
1738
+ return []
1739
+
1740
+ async def get_contract(self, service_name: str) -> dict[str, Any] | None:
1741
+ # Search all handles for the service
1742
+ for info in self._HANDLE_INFO.values():
1743
+ for svc in info.get("services", []):
1744
+ if svc["name"] == service_name:
1745
+ return {
1746
+ "name": svc["name"],
1747
+ "version": svc.get("version", 1),
1748
+ "contract_id": svc.get("contract_hash", "demo-hash")[:16] + "...",
1749
+ "methods": svc.get("methods", []),
1750
+ "types": [
1751
+ {"name": m.get("request_type", "?"), "hash": "aabbccdd..."}
1752
+ for m in svc.get("methods", [])
1753
+ if m.get("request_type")
1754
+ ],
1755
+ }
1756
+ return None
1757
+
1758
+ async def list_blobs(self) -> list[dict[str, Any]]:
1759
+ return []
1760
+
1761
+ async def read_blob(self, blob_hash: str) -> bytes:
1762
+ return b"(directory demo -- no blob content)"
1763
+
1764
+ async def invoke(
1765
+ self, service: str, method: str, payload: dict[str, Any]
1766
+ ) -> Any:
1767
+ await asyncio.sleep(0.05)
1768
+ if service == "TaskManager" and method == "submitTask":
1769
+ return {
1770
+ "task_id": "tsk_demo_001",
1771
+ "status": "queued",
1772
+ "prompt": payload.get("prompt", ""),
1773
+ "priority": payload.get("priority", 0),
1774
+ }
1775
+ return {"status": "ok", "service": service, "method": method, "args": payload}
1776
+
1777
+ async def server_stream(
1778
+ self, service: str, method: str, payload: dict[str, Any]
1779
+ ) -> Any:
1780
+ async def _gen():
1781
+ import time
1782
+ for i in range(5):
1783
+ await asyncio.sleep(0.1)
1784
+ yield {"index": i, "timestamp": time.strftime("%H:%M:%S")}
1785
+ return _gen()
1786
+
1787
+ async def client_stream(
1788
+ self, service: str, method: str, values: list[Any]
1789
+ ) -> Any:
1790
+ return {"received": len(values), "status": "ok"}
1791
+
1792
+ def bidi_stream(self, service: str, method: str, values: Any) -> Any:
1793
+ async def _echo():
1794
+ async for v in values:
1795
+ yield {"echo": v}
1796
+ return _echo()
1797
+
1798
+ async def close(self) -> None:
1799
+ pass
1800
+
1801
+
1802
+ # ── VFS population from connection ──────────────────────────────────────────
1803
+
1804
+ async def _populate_from_connection(root, connection) -> tuple[int, int]:
1805
+ """Pre-populate the VFS from the connection. Returns (service_count, blob_count)."""
1806
+ services_node = root.child("services")
1807
+ blobs_node = root.child("blobs")
1808
+
1809
+ svc_count = 0
1810
+ blob_count = 0
1811
+
1812
+ # Populate services
1813
+ if services_node:
1814
+ try:
1815
+ summaries = await connection.list_services()
1816
+ for svc in summaries:
1817
+ name = svc["name"] if isinstance(svc, dict) else str(svc)
1818
+ from aster_cli.shell.vfs import VfsNode, NodeKind
1819
+ svc_node = VfsNode(
1820
+ name=name,
1821
+ kind=NodeKind.SERVICE,
1822
+ path=f"/services/{name}",
1823
+ metadata=svc if isinstance(svc, dict) else {"name": name},
1824
+ )
1825
+
1826
+ # Populate methods if available in summary
1827
+ methods = svc.get("methods", []) if isinstance(svc, dict) else []
1828
+ for m in methods:
1829
+ m_name = m.get("name", str(m)) if isinstance(m, dict) else str(m)
1830
+ m_node = VfsNode(
1831
+ name=m_name,
1832
+ kind=NodeKind.METHOD,
1833
+ path=f"/services/{name}/{m_name}",
1834
+ metadata=m if isinstance(m, dict) else {"name": m_name},
1835
+ loaded=True,
1836
+ )
1837
+ svc_node.add_child(m_node)
1838
+ # Only mark loaded if methods were populated;
1839
+ # otherwise let ensure_loaded retry after manifest fetch
1840
+ if methods:
1841
+ svc_node.loaded = True
1842
+
1843
+ services_node.add_child(svc_node)
1844
+ svc_count += 1
1845
+ services_node.loaded = True
1846
+ except Exception:
1847
+ services_node.loaded = True
1848
+
1849
+ # Populate blobs
1850
+ if blobs_node:
1851
+ try:
1852
+ blobs = await connection.list_blobs()
1853
+ for blob in blobs:
1854
+ hash_str = blob.get("hash", str(blob)) if isinstance(blob, dict) else str(blob)
1855
+ short = hash_str[:12] + "…" if len(hash_str) > 12 else hash_str
1856
+ from aster_cli.shell.vfs import VfsNode, NodeKind
1857
+ blob_node = VfsNode(
1858
+ name=short,
1859
+ kind=NodeKind.BLOB,
1860
+ path=f"/blobs/{short}",
1861
+ metadata=blob if isinstance(blob, dict) else {"hash": hash_str},
1862
+ loaded=True,
1863
+ )
1864
+ blobs_node.add_child(blob_node)
1865
+ blob_count += 1
1866
+ # Only mark loaded if we found blobs; otherwise let
1867
+ # ensure_loaded retry after manifest fetch populates artifact_refs
1868
+ if blob_count > 0:
1869
+ blobs_node.loaded = True
1870
+ except Exception:
1871
+ blobs_node.loaded = True
1872
+
1873
+ return svc_count, blob_count
1874
+
1875
+
1876
+ # ── Directory VFS population ─────────────────────────────────────────────────
1877
+
1878
+
1879
+ async def _populate_directory(root, connection) -> int:
1880
+ """Pre-populate the /aster/ directory VFS. Returns handle count."""
1881
+ aster_node = root.child("aster")
1882
+ if not aster_node:
1883
+ return 0
1884
+
1885
+ handle_count = 0
1886
+ try:
1887
+ from aster_cli.shell.vfs import VfsNode, NodeKind
1888
+
1889
+ handles = await connection.list_handles()
1890
+ for h in handles:
1891
+ name = h.get("handle") or h.get("pubkey_hash", "???")
1892
+ handle_node = VfsNode(
1893
+ name=name,
1894
+ kind=NodeKind.HANDLE,
1895
+ path=f"/aster/{name}",
1896
+ metadata=h,
1897
+ )
1898
+ aster_node.add_child(handle_node)
1899
+ handle_count += 1
1900
+
1901
+ # Pre-populate services for each handle
1902
+ try:
1903
+ info = await connection.get_handle_info(name)
1904
+ except Exception:
1905
+ info = {"readme": "", "services": []}
1906
+
1907
+ readme_text = info.get("readme", "")
1908
+ if readme_text:
1909
+ readme_node = VfsNode(
1910
+ name="README.md",
1911
+ kind=NodeKind.README,
1912
+ path=f"/aster/{name}/README.md",
1913
+ metadata={"content": readme_text},
1914
+ loaded=True,
1915
+ )
1916
+ handle_node.add_child(readme_node)
1917
+
1918
+ for svc in info.get("services", []):
1919
+ svc_name = svc.get("name", "???")
1920
+ published = svc.get("published", False)
1921
+
1922
+ if published:
1923
+ display_name = svc_name
1924
+ else:
1925
+ short_hash = svc.get("contract_hash", "??????")[:10]
1926
+ display_name = f"{short_hash}... ({svc_name})"
1927
+
1928
+ svc_node = VfsNode(
1929
+ name=display_name,
1930
+ kind=NodeKind.SERVICE,
1931
+ path=f"/aster/{name}/{display_name}",
1932
+ metadata=svc,
1933
+ )
1934
+ for m in svc.get("methods", []):
1935
+ m_name = m.get("name", str(m)) if isinstance(m, dict) else str(m)
1936
+ m_node = VfsNode(
1937
+ name=m_name,
1938
+ kind=NodeKind.METHOD,
1939
+ path=f"/aster/{name}/{display_name}/{m_name}",
1940
+ metadata=m if isinstance(m, dict) else {"name": m_name},
1941
+ loaded=True,
1942
+ )
1943
+ svc_node.add_child(m_node)
1944
+ svc_node.loaded = True
1945
+ handle_node.add_child(svc_node)
1946
+
1947
+ handle_node.loaded = True
1948
+
1949
+ aster_node.loaded = True
1950
+ except Exception:
1951
+ aster_node.loaded = True
1952
+
1953
+ return handle_count
1954
+
1955
+
1956
+ # ── Shell REPL ────────────────────────────────────────────────────────────────
1957
+
1958
+ async def _run_shell(
1959
+ connection: Any,
1960
+ peer_name: str,
1961
+ raw: bool = False,
1962
+ directory_mode: bool = False,
1963
+ air_gapped: bool = False,
1964
+ ) -> None:
1965
+ """Run the interactive shell REPL."""
1966
+ console = Console()
1967
+ display = Display(console=console, raw=raw)
1968
+ from rich.panel import Panel
1969
+ from aster_cli.join import (
1970
+ STATUS_REMOTE_TIMEOUT_SECONDS,
1971
+ _fetch_remote_status,
1972
+ apply_remote_identity_state,
1973
+ get_local_identity_state,
1974
+ )
1975
+
1976
+ state = get_local_identity_state()
1977
+ remote_error: str | None = None
1978
+ if state["root_pubkey"] and not air_gapped:
1979
+ aster_addr = getattr(connection, "_address", None) or getattr(connection, "_peer_addr", None)
1980
+ try:
1981
+ remote = await asyncio.wait_for(
1982
+ _fetch_remote_status(
1983
+ SimpleNamespace(aster=aster_addr, root_key=None)
1984
+ ),
1985
+ timeout=STATUS_REMOTE_TIMEOUT_SECONDS,
1986
+ )
1987
+ except Exception as exc:
1988
+ remote = None
1989
+ remote_error = str(exc)
1990
+ if remote is not None:
1991
+ apply_remote_identity_state(state, remote)
1992
+
1993
+ if not raw:
1994
+ open_gate = bool(
1995
+ getattr(connection, "_aster_client", None)
1996
+ and getattr(connection._aster_client, "open_gate", False)
1997
+ )
1998
+ banner_lines = _build_identity_banner_lines(
1999
+ state,
2000
+ remote_error=remote_error,
2001
+ air_gapped=air_gapped,
2002
+ open_gate=open_gate,
2003
+ )
2004
+ if banner_lines:
2005
+ display.console.print()
2006
+ display.console.print(
2007
+ Panel("\n".join(banner_lines), border_style="blue", padding=(0, 2))
2008
+ )
2009
+
2010
+ if directory_mode:
2011
+ root = build_directory_root()
2012
+ handle_count = await _populate_directory(root, connection)
2013
+ display.directory_welcome(peer_name, handle_count)
2014
+ else:
2015
+ # Build VFS
2016
+ root = build_root()
2017
+
2018
+ # Pre-populate from connection
2019
+ svc_count, blob_count = await _populate_from_connection(root, connection)
2020
+
2021
+ # Welcome banner -- include our own short node id so users can
2022
+ # see exactly which endpoint their credential must match.
2023
+ our_node_id: str | None = None
2024
+ try:
2025
+ ac = getattr(connection, "_aster_client", None)
2026
+ if ac is not None and getattr(ac, "_node", None) is not None:
2027
+ our_node_id = ac._node.node_addr_info().endpoint_id
2028
+ except Exception:
2029
+ our_node_id = None
2030
+ display.welcome(peer_name, svc_count, blob_count, our_node_id=our_node_id)
2031
+
2032
+ # ── Guided tour ───────────────────────────────────────────────────────
2033
+ from aster_cli.shell.guide import (
2034
+ DEFAULT_TOUR,
2035
+ GuideManager,
2036
+ Tour,
2037
+ is_first_time,
2038
+ mark_tour_complete,
2039
+ )
2040
+
2041
+ if directory_mode:
2042
+ # Directory mode has its own UX flow -- skip the peer tour
2043
+ guide = GuideManager(display)
2044
+ guide.disable()
2045
+ elif is_first_time() and not raw:
2046
+ guide = GuideManager(display, tour=DEFAULT_TOUR)
2047
+ guide.fire("connected")
2048
+ else:
2049
+ guide = GuideManager(display) # empty tour, no-op
2050
+ guide.disable()
2051
+
2052
+ # Shell state
2053
+ if directory_mode:
2054
+ cwd = f"/aster/{peer_name}"
2055
+ else:
2056
+ cwd = "/"
2057
+ _last_ctrl_c = 0.0 # timestamp of last Ctrl+C for double-tap exit
2058
+
2059
+ # Detect non-interactive (piped) mode and switch to a plain-stdin
2060
+ # reader. prompt_toolkit's PromptSession assumes a real terminal: when
2061
+ # stdin/stdout are pipes it falls back to a degraded renderer that
2062
+ # echoes prompts twice and emits cursor-positioning ANSI escapes that
2063
+ # mangle the output. This is the duplicated/garbled "non-interactive
2064
+ # mode" failure mode QA agents kept hitting.
2065
+ is_interactive = sys.stdin.isatty() and sys.stdout.isatty()
2066
+
2067
+ # Command context (mutable -- cwd updates)
2068
+ ctx = CommandContext(
2069
+ vfs_cwd=cwd,
2070
+ vfs_root=root,
2071
+ connection=connection,
2072
+ display=display,
2073
+ peer_name=peer_name,
2074
+ interactive=is_interactive,
2075
+ raw_output=raw,
2076
+ guide=guide,
2077
+ )
2078
+
2079
+ # History (only used in interactive mode -- non-interactive runs are
2080
+ # one-shot and don't benefit from a shared command history)
2081
+ history_dir = Path.home() / ".aster"
2082
+ history_dir.mkdir(exist_ok=True)
2083
+
2084
+ # Read-line abstraction: pick the implementation up-front so the REPL
2085
+ # body stays the same shape in both modes.
2086
+ read_line: "callable[[], asyncio.Future[str]]"
2087
+ if is_interactive:
2088
+ history = FileHistory(str(history_dir / "shell_history"))
2089
+ completer = ShellCompleter(get_context=lambda: ctx)
2090
+ session: PromptSession = PromptSession(
2091
+ history=history,
2092
+ completer=completer,
2093
+ style=SHELL_STYLE,
2094
+ complete_while_typing=True,
2095
+ )
2096
+
2097
+ async def read_line() -> str: # type: ignore[no-redef]
2098
+ prompt = _make_prompt(peer_name, ctx.vfs_cwd)
2099
+ return await session.prompt_async(prompt)
2100
+ else:
2101
+ # Plain stdin reader. No prompt rendering, no completer, no
2102
+ # history. asyncio.to_thread keeps the event loop responsive
2103
+ # while we're blocked on stdin so background tasks (manifest
2104
+ # fetch, etc.) keep running.
2105
+ async def read_line() -> str: # type: ignore[no-redef]
2106
+ line = await asyncio.to_thread(sys.stdin.readline)
2107
+ if not line:
2108
+ raise EOFError()
2109
+ return line.rstrip("\r\n")
2110
+
2111
+ # REPL loop
2112
+ while True:
2113
+ try:
2114
+ text = await read_line()
2115
+ text = text.strip()
2116
+ _last_ctrl_c = 0.0 # reset on successful input
2117
+
2118
+ if not text:
2119
+ continue
2120
+
2121
+ # Parse command
2122
+ parts = _tokenize(text)
2123
+ cmd_name = parts[0]
2124
+ cmd_args = parts[1:]
2125
+
2126
+ # Handle ./methodName syntax
2127
+ if cmd_name.startswith("./"):
2128
+ method_name = cmd_name[2:]
2129
+ cmd = get_command("./")
2130
+ if cmd:
2131
+ await cmd.execute([method_name] + cmd_args, ctx)
2132
+ else:
2133
+ display.error(f"unknown command: {cmd_name}")
2134
+ continue
2135
+
2136
+ # Handle quit/exit aliases
2137
+ if cmd_name in ("quit", "q"):
2138
+ cmd_name = "exit"
2139
+
2140
+ # Look up command
2141
+ cmd = get_command(cmd_name)
2142
+ if cmd is None:
2143
+ # Try as a direct method invocation if in a service dir
2144
+ node, _ = resolve_path(root, ctx.vfs_cwd, ".")
2145
+ if node and node.kind == NodeKind.SERVICE and node.child(cmd_name):
2146
+ # Treat as ./methodName
2147
+ invoke_cmd = get_command("./")
2148
+ if invoke_cmd:
2149
+ await invoke_cmd.execute([cmd_name] + cmd_args, ctx)
2150
+ continue
2151
+
2152
+ display.error(f"unknown command: {cmd_name} (try 'help')")
2153
+ continue
2154
+
2155
+ # Check if command is valid at current path
2156
+ if not cmd.is_valid_at(ctx.vfs_cwd):
2157
+ hint = ""
2158
+ if cmd.contexts:
2159
+ valid = cmd.contexts[0].rstrip("/*").rstrip("/") or "/"
2160
+ hint = f"\n Try: cd {valid} && {cmd_name} {' '.join(cmd_args)}".rstrip()
2161
+ display.error(
2162
+ f"'{cmd_name}' is not available at {ctx.vfs_cwd}{hint}"
2163
+ )
2164
+ continue
2165
+
2166
+ # Execute
2167
+ old_cwd = ctx.vfs_cwd
2168
+ await cmd.execute(cmd_args, ctx)
2169
+
2170
+ # Fire guide events
2171
+ if guide.is_active:
2172
+ guide.fire("command", cmd_name)
2173
+ if ctx.vfs_cwd != old_cwd:
2174
+ guide.fire("cd", ctx.vfs_cwd)
2175
+
2176
+ except KeyboardInterrupt:
2177
+ import time as _time
2178
+ now = _time.monotonic()
2179
+ if now - _last_ctrl_c < 1.5:
2180
+ # Double Ctrl+C → clean exit
2181
+ display.print()
2182
+ display.info("Disconnecting...")
2183
+ break
2184
+ _last_ctrl_c = now
2185
+ display.print()
2186
+ display.info("Press Ctrl+C again to exit")
2187
+ continue
2188
+ except EOFError:
2189
+ display.print()
2190
+ display.info("Disconnecting...")
2191
+ break
2192
+ except SystemExit:
2193
+ break
2194
+ except Exception as e:
2195
+ display.error(str(e))
2196
+
2197
+ # Mark tour complete if it was active
2198
+ if guide.tour and not guide.tour.is_complete:
2199
+ pass # partial tour, don't mark complete
2200
+ elif guide.tour and guide.tour.is_complete:
2201
+ try:
2202
+ mark_tour_complete()
2203
+ except Exception:
2204
+ pass # non-critical
2205
+
2206
+
2207
+ def _build_identity_banner_lines(
2208
+ state: dict[str, Any],
2209
+ *,
2210
+ remote_error: str | None,
2211
+ air_gapped: bool,
2212
+ open_gate: bool = False,
2213
+ ) -> list[str]:
2214
+ """Build shell startup identity banner lines.
2215
+
2216
+ On an open-gate server (``open_gate=True``) the "Identity not configured"
2217
+ prompt is suppressed entirely: the server doesn't care about the user's
2218
+ local @aster identity, so nagging the user to set one up is pure noise.
2219
+ """
2220
+ lines: list[str] = []
2221
+ if air_gapped:
2222
+ lines.append("[bold]Air-gapped[/bold] [dim]@aster lookups disabled for this session[/dim]")
2223
+
2224
+ if not state["root_pubkey"]:
2225
+ if open_gate:
2226
+ # Server is open-gate, local identity is irrelevant; stay silent.
2227
+ pass
2228
+ else:
2229
+ lines.append("[bold]Identity not configured[/bold]")
2230
+ lines.append("[dim]Run `aster keygen root` or `join <handle> <email>` to get started.[/dim]")
2231
+ elif state["handle_status"] == "pending":
2232
+ lines.append(f"[bold cyan]{state['display_handle']}[/bold cyan] [yellow]pending verification[/yellow]")
2233
+ lines.append("[dim]Run `verify <code>` or `status` to check for auto-verification.[/dim]")
2234
+ elif state["handle_status"] == "verified":
2235
+ lines.append(f"[bold cyan]{state['display_handle']}[/bold cyan] [green]verified[/green]")
2236
+ else:
2237
+ lines.append(f"[bold cyan]{state['display_handle']}[/bold cyan] [yellow]not registered[/yellow]")
2238
+ lines.append("[dim]Run `join <handle> <email>` to register this identity.[/dim]")
2239
+
2240
+ if air_gapped:
2241
+ lines.append("[dim]Remote: disabled[/dim]")
2242
+ elif remote_error:
2243
+ lines.append(f"[yellow]Remote offline[/yellow] [dim]{remote_error}[/dim]")
2244
+ elif state.get("remote"):
2245
+ lines.append("[green]Remote connected[/green] [dim]identity synced with @aster[/dim]")
2246
+
2247
+ return lines
2248
+
2249
+
2250
+ def _tokenize(text: str) -> list[str]:
2251
+ """Simple shell-like tokenization respecting quotes."""
2252
+ import shlex
2253
+ try:
2254
+ return shlex.split(text)
2255
+ except ValueError:
2256
+ return text.split()
2257
+
2258
+
2259
+ # ── Public API ────────────────────────────────────────────────────────────────
2260
+
2261
+ async def launch_shell(
2262
+ peer_addr: str | None = None,
2263
+ rcan_path: str | None = None,
2264
+ demo: bool = False,
2265
+ demo2: bool = False,
2266
+ air_gapped: bool = False,
2267
+ raw: bool = False,
2268
+ ) -> None:
2269
+ """Launch the interactive shell.
2270
+
2271
+ Args:
2272
+ peer_addr: Address of the peer to connect to. May be a base64
2273
+ NodeAddr string, an EndpointId hex, or a peer name
2274
+ from ``.aster-identity``.
2275
+ rcan_path: Path to RCAN credential file.
2276
+ demo: If True, use demo connection (no real peer).
2277
+ demo2: If True, use directory demo (aster.site browsing).
2278
+ """
2279
+ if demo2:
2280
+ connection = DirectoryConnection(peer_addr) if peer_addr else DirectoryDemoConnection()
2281
+ peer_name = connection.my_handle
2282
+ await connection.connect()
2283
+ try:
2284
+ await _run_shell(connection, peer_name, raw=raw, directory_mode=True, air_gapped=air_gapped)
2285
+ finally:
2286
+ await connection.close()
2287
+ return
2288
+
2289
+ if demo or peer_addr is None:
2290
+ connection = DemoConnection()
2291
+ peer_name = "demo"
2292
+ await connection.connect()
2293
+ else:
2294
+ # Resolve peer name before heavy imports (instant -- just reads .aster-identity)
2295
+ resolved_addr, friendly_name = _resolve_peer_arg(peer_addr)
2296
+ peer_name = friendly_name
2297
+
2298
+ # Animate a spinner during connect so the CLI feels responsive
2299
+ connection = PeerConnection(peer_addr=resolved_addr, rcan_path=rcan_path)
2300
+ connect_task = asyncio.create_task(connection.connect())
2301
+
2302
+ frames = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
2303
+ i = 0
2304
+ while not connect_task.done():
2305
+ sys.stderr.write(f"\r {frames[i % len(frames)]} connecting to {peer_name}")
2306
+ sys.stderr.flush()
2307
+ i += 1
2308
+ try:
2309
+ await asyncio.wait_for(asyncio.shield(connect_task), timeout=0.08)
2310
+ break
2311
+ except asyncio.TimeoutError:
2312
+ continue
2313
+ sys.stderr.write("\r\033[K")
2314
+
2315
+ # Re-raise if connect failed
2316
+ await connect_task
2317
+
2318
+ try:
2319
+ await _run_shell(connection, peer_name, raw=raw, air_gapped=air_gapped)
2320
+ finally:
2321
+ await connection.close()
2322
+
2323
+
2324
+ def register_shell_subparser(subparsers: argparse._SubParsersAction) -> None:
2325
+ """Register the ``aster shell`` subcommand."""
2326
+ shell_parser = subparsers.add_parser(
2327
+ "shell",
2328
+ help="Interactive shell for exploring a peer",
2329
+ )
2330
+ shell_parser.add_argument(
2331
+ "peer",
2332
+ nargs="?",
2333
+ default=None,
2334
+ help="Peer address to connect to (omit for demo mode)",
2335
+ )
2336
+ shell_parser.add_argument(
2337
+ "--rcan",
2338
+ default=None,
2339
+ metavar="PATH",
2340
+ help="Path to RCAN credential file",
2341
+ )
2342
+ shell_parser.add_argument(
2343
+ "--demo",
2344
+ action="store_true",
2345
+ help="Launch in demo mode with sample data",
2346
+ )
2347
+ shell_parser.add_argument(
2348
+ "--demo2",
2349
+ action="store_true",
2350
+ help=argparse.SUPPRESS, # undocumented
2351
+ )
2352
+ shell_parser.add_argument(
2353
+ "--air-gapped",
2354
+ action="store_true",
2355
+ help="Disable @aster service features for this shell session",
2356
+ )
2357
+ shell_parser.add_argument(
2358
+ "--json",
2359
+ action="store_true",
2360
+ dest="raw_json",
2361
+ help="Output raw JSON (for piping)",
2362
+ )
2363
+
2364
+
2365
+ def run_shell_command(args: argparse.Namespace) -> int:
2366
+ """Execute the ``aster shell`` command."""
2367
+ demo2 = getattr(args, "demo2", False)
2368
+ demo = args.demo or (args.peer is None and not demo2)
2369
+
2370
+ try:
2371
+ asyncio.run(launch_shell(
2372
+ peer_addr=args.peer,
2373
+ rcan_path=args.rcan,
2374
+ demo=demo,
2375
+ demo2=demo2,
2376
+ air_gapped=args.air_gapped,
2377
+ raw=args.raw_json,
2378
+ ))
2379
+ except KeyboardInterrupt:
2380
+ pass
2381
+ except SystemExit:
2382
+ pass
2383
+ except Exception as exc:
2384
+ from aster.runtime import AdmissionDeniedError
2385
+ if isinstance(exc, AdmissionDeniedError):
2386
+ print(f"Error: {exc}", file=sys.stderr)
2387
+ return 1
2388
+ raise
2389
+
2390
+ return 0