@pentatonic-ai/ai-agent-sdk 0.7.13 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/package.json +1 -1
  2. package/packages/memory/openclaw-plugin/index.js +7 -0
  3. package/packages/memory/openclaw-plugin/openclaw.plugin.json +9 -1
  4. package/packages/memory/openclaw-plugin/package.json +1 -1
  5. package/packages/memory/src/__tests__/engine.test.js +142 -0
  6. package/packages/memory/src/engine.js +65 -0
  7. package/packages/memory-engine/compat/server.py +90 -5
  8. package/packages/memory-engine/docker-compose.yml +18 -8
  9. package/packages/memory-engine/engine/services/_shared/__init__.py +1 -0
  10. package/packages/memory-engine/engine/services/_shared/embed_provider.py +431 -0
  11. package/packages/memory-engine/engine/services/l2/Dockerfile +4 -2
  12. package/packages/memory-engine/engine/services/l2/l2-hybridrag-proxy.py +640 -81
  13. package/packages/memory-engine/engine/services/l4/Dockerfile +5 -1
  14. package/packages/memory-engine/engine/services/l4/server.py +19 -57
  15. package/packages/memory-engine/engine/services/l5/Dockerfile +3 -1
  16. package/packages/memory-engine/engine/services/l5/l5-comms-layer.py +24 -32
  17. package/packages/memory-engine/engine/services/l6/Dockerfile +3 -1
  18. package/packages/memory-engine/engine/services/l6/l6-document-store.py +24 -29
  19. package/packages/memory-engine/scripts/wipe-legacy-l3-entities.py +128 -0
  20. package/packages/memory-engine/tests/e2e_arena.sh +28 -4
  21. package/packages/memory-engine/tests/test_aggregate.py +333 -0
  22. package/packages/memory-engine/tests/test_arena_safety.py +232 -0
  23. package/packages/memory-engine/tests/test_channel_stat_reader.py +437 -0
  24. package/packages/memory-engine/tests/test_channel_stat_rollups.py +308 -0
  25. package/packages/memory-engine/tests/test_embed_provider.py +354 -0
  26. package/packages/memory-engine/tests/test_l3_arena_isolation.py +412 -0
@@ -4,7 +4,11 @@ WORKDIR /app
4
4
 
5
5
  RUN pip install --no-cache-dir fastapi 'uvicorn[standard]' httpx pydantic
6
6
 
7
- COPY server.py /app/server.py
7
+ # Build context is engine/services so the shared embed_provider module is
8
+ # COPYable. server.py adds engine/services to sys.path at startup, then
9
+ # imports from `_shared.embed_provider`.
10
+ COPY _shared /app/_shared
11
+ COPY l4/server.py /app/server.py
8
12
 
9
13
  RUN mkdir -p /data
10
14
  ENV L4_DB_PATH=/data/vec.db
@@ -23,27 +23,25 @@ import hashlib
23
23
  import os
24
24
  import sqlite3
25
25
  import struct
26
+ import sys
26
27
  import time
27
28
  from pathlib import Path
28
29
  from typing import Any
29
30
 
30
- import httpx
31
31
  from fastapi import FastAPI, HTTPException
32
32
  from pydantic import BaseModel
33
33
 
34
+ # Shared embedding client lives at engine/services/_shared/. Add the parent of
35
+ # the service dir to sys.path so `from _shared.embed_provider import ...` works
36
+ # regardless of how the service is launched (uvicorn, python server.py, etc.).
37
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
38
+ from _shared.embed_provider import EmbedClient # noqa: E402
39
+
34
40
  # ----------------------------------------------------------------------
35
41
  # Config
36
42
  # ----------------------------------------------------------------------
37
43
 
38
44
  DB_PATH = os.environ.get("L4_DB_PATH", "/data/vec.db")
39
- NV_EMBED_URL = os.environ.get("L4_NV_EMBED_URL", "http://nv-embed:8041/v1/embeddings")
40
- # Embedding model name sent in /v1/embeddings request body. Defaults to
41
- # the production NV-Embed-v2 name; override via env when pointing at a
42
- # different OpenAI-compat endpoint (e.g. Ollama with nomic-embed-text).
43
- EMBED_MODEL_NAME = os.environ.get("L4_EMBED_MODEL", "nv-embed-v2")
44
- # Optional Authorization: Bearer <key> for the embedding endpoint.
45
- # Set when calling a hosted gateway (e.g. pentatonic-ai-gateway). Empty = no auth.
46
- EMBED_API_KEY = os.environ.get("L4_EMBED_API_KEY", "")
47
45
  EMBED_DIM = int(os.environ.get("L4_EMBED_DIM", "4096"))
48
46
 
49
47
 
@@ -96,59 +94,23 @@ def _get_db() -> sqlite3.Connection:
96
94
  # Embedding client
97
95
  # ----------------------------------------------------------------------
98
96
 
99
- _http: httpx.AsyncClient | None = None
97
+ _embed: EmbedClient | None = None
100
98
 
101
99
 
102
- def _client() -> httpx.AsyncClient:
103
- global _http
104
- if _http is None:
105
- _http = httpx.AsyncClient(timeout=120.0)
106
- return _http
100
+ def _embed_client() -> EmbedClient:
101
+ """Lazily build the embed client so env vars are read at first use."""
102
+ global _embed
103
+ if _embed is None:
104
+ _embed = EmbedClient.from_env(
105
+ prefix="L4_",
106
+ default_url="http://nv-embed:8041/v1/embeddings",
107
+ )
108
+ return _embed
107
109
 
108
110
 
109
111
  async def _embed_batch(texts: list[str]) -> list[list[float]]:
110
- """Embed a batch of texts.
111
-
112
- Tries OpenAI-compatible shape first (POST <url>, Bearer auth,
113
- response data[i].embedding). On failure, falls back to the
114
- Pentatonic-AI gateway's native shape (POST .../v1/embed, X-API-Key
115
- auth, response embeddings[i]). When the gateway eventually adds an
116
- OpenAI-compat /v1/embeddings alias, the primary path will succeed
117
- and the fallback will never fire — no code change needed.
118
- """
119
- if not texts:
120
- return []
121
- payload = {"input": texts, "model": EMBED_MODEL_NAME}
122
- # Primary: OpenAI-compat
123
- try:
124
- resp = await _client().post(
125
- NV_EMBED_URL,
126
- headers=_openai_headers(),
127
- json=payload,
128
- timeout=120.0,
129
- )
130
- resp.raise_for_status()
131
- return [d["embedding"] for d in resp.json()["data"]]
132
- except Exception:
133
- pass
134
- # Fallback: lambda-gateway native shape
135
- fallback_url = NV_EMBED_URL.replace("/v1/embeddings", "/v1/embed").replace("/embeddings", "/embed")
136
- resp = await _client().post(
137
- fallback_url,
138
- headers=_lambda_headers(),
139
- json=payload,
140
- timeout=120.0,
141
- )
142
- resp.raise_for_status()
143
- return resp.json()["embeddings"]
144
-
145
-
146
- def _openai_headers() -> dict:
147
- return {"Authorization": f"Bearer {EMBED_API_KEY}"} if EMBED_API_KEY else {}
148
-
149
-
150
- def _lambda_headers() -> dict:
151
- return {"X-API-Key": EMBED_API_KEY} if EMBED_API_KEY else {}
112
+ """Embed a batch of texts via the shared EmbedClient."""
113
+ return await _embed_client().embed_batch_async(texts)
152
114
 
153
115
 
154
116
  # ----------------------------------------------------------------------
@@ -1,7 +1,9 @@
1
1
  FROM python:3.12-slim
2
2
  WORKDIR /app
3
3
  RUN pip install --no-cache-dir fastapi "uvicorn[standard]" httpx "pymilvus[milvus_lite]" "setuptools<70" pydantic
4
- COPY l5-comms-layer.py /app/server.py
4
+ # Shared embed_provider module (build context is engine/services).
5
+ COPY _shared /app/_shared
6
+ COPY l5/l5-comms-layer.py /app/server.py
5
7
  RUN mkdir -p /data
6
8
  ENV L5_DB_PATH=/data/comms.db
7
9
  EXPOSE 8034
@@ -23,6 +23,7 @@ import os
23
23
  import glob
24
24
  import hashlib
25
25
  import json
26
+ import sys
26
27
  import time
27
28
  from datetime import datetime
28
29
  from pathlib import Path
@@ -30,6 +31,10 @@ from pathlib import Path
30
31
  import httpx
31
32
  from pymilvus import MilvusClient, DataType, CollectionSchema, FieldSchema
32
33
 
34
+ # Shared embed client lives at engine/services/_shared/.
35
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
36
+ from _shared.embed_provider import EmbedClient # noqa: E402
37
+
33
38
  # --- Config ---
34
39
  DB_PATH = os.environ.get(
35
40
  "L5_DB_PATH",
@@ -43,43 +48,30 @@ PEOPLE_DIR = WORKSPACE / "memory" / "people"
43
48
  CONTACTS_DIR = WORKSPACE / "memory" / "contacts"
44
49
  MEMORY_DIR = WORKSPACE / "memory"
45
50
 
46
- NV_EMBED_URL = os.environ.get("L5_NV_EMBED_URL", "http://localhost:8041/v1/embeddings")
47
- # Embedding model name sent in /v1/embeddings request body. Defaults to
48
- # the production NV-Embed-v2 name; override when pointing at a different
49
- # OpenAI-compat endpoint (e.g. Ollama with nomic-embed-text).
50
- EMBED_MODEL_NAME = os.environ.get("L5_EMBED_MODEL", "nv-embed-v2")
51
- # Optional Authorization: Bearer <key> for the primary embedding endpoint.
52
- EMBED_API_KEY = os.environ.get("L5_EMBED_API_KEY", "")
51
+ _embed: EmbedClient | None = None
52
+
53
+
54
+ def _embed_client() -> EmbedClient:
55
+ """Lazily build the shared EmbedClient for L5."""
56
+ global _embed
57
+ if _embed is None:
58
+ _embed = EmbedClient.from_env(
59
+ prefix="L5_",
60
+ default_url="http://localhost:8041/v1/embeddings",
61
+ )
62
+ return _embed
63
+
53
64
 
54
65
  def _embed_post(texts):
55
- """POST to the configured embedding endpoint. Tries OpenAI-compat
56
- shape first; falls back to Pentatonic-AI lambda-gateway native shape
57
- on any failure. When the gateway adds an /v1/embeddings alias the
58
- primary path will succeed and the fallback never fires.
66
+ """Embed a batch of texts via the shared EmbedClient.
67
+
68
+ Provider profile (auth scheme + URL path + body/response shape) is
69
+ chosen by L5_EMBED_PROVIDER env var (openai | pentatonic-gateway |
70
+ cohere | custom). Auto-detects on 401 unless L5_EMBED_AUTODETECT=false.
59
71
 
60
72
  Returns: list[list[float]] (one embedding per input text).
61
73
  """
62
- payload = {"input": texts, "model": EMBED_MODEL_NAME}
63
- try:
64
- r = httpx.post(
65
- NV_EMBED_URL,
66
- headers={"Authorization": f"Bearer {EMBED_API_KEY}"} if EMBED_API_KEY else {},
67
- json=payload,
68
- timeout=120,
69
- )
70
- r.raise_for_status()
71
- return [d["embedding"] for d in r.json()["data"]]
72
- except Exception:
73
- pass
74
- fallback_url = NV_EMBED_URL.replace("/v1/embeddings", "/v1/embed").replace("/embeddings", "/embed")
75
- r = httpx.post(
76
- fallback_url,
77
- headers={"X-API-Key": EMBED_API_KEY} if EMBED_API_KEY else {},
78
- json=payload,
79
- timeout=120,
80
- )
81
- r.raise_for_status()
82
- return r.json()["embeddings"]
74
+ return _embed_client().embed_batch(texts)
83
75
 
84
76
  # Ollama fallback path. URL/model can be overridden so the L5 container can
85
77
  # reach an Ollama instance running on the docker host (host.docker.internal)
@@ -3,7 +3,9 @@ WORKDIR /app
3
3
  RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
4
4
  RUN pip install --no-cache-dir fastapi "uvicorn[standard]" httpx "pymilvus[milvus_lite]" "setuptools<70" pydantic spacy
5
5
  RUN python -m spacy download en_core_web_sm
6
- COPY l6-document-store.py /app/server.py
6
+ # Shared embed_provider module (build context is engine/services).
7
+ COPY _shared /app/_shared
8
+ COPY l6/l6-document-store.py /app/server.py
7
9
  RUN mkdir -p /data
8
10
  ENV L6_DATA_DIR=/data
9
11
  EXPOSE 8037
@@ -20,6 +20,7 @@ import logging
20
20
  import os
21
21
  import re
22
22
  import sqlite3
23
+ import sys
23
24
  import time
24
25
  from datetime import datetime, timezone
25
26
  from pathlib import Path
@@ -29,6 +30,10 @@ import httpx
29
30
  from pymilvus import MilvusClient, DataType, CollectionSchema, FieldSchema
30
31
  from pymilvus.milvus_client.index import IndexParams
31
32
 
33
+ # Shared embed client lives at engine/services/_shared/.
34
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
35
+ from _shared.embed_provider import EmbedClient # noqa: E402
36
+
32
37
  # ---------------------------------------------------------------------------
33
38
  # Config
34
39
  # ---------------------------------------------------------------------------
@@ -37,39 +42,29 @@ DATA_DIR = Path(os.environ.get("L6_DATA_DIR", str(Path.home() / "l6-document-sto
37
42
  MILVUS_DB = str(DATA_DIR / "documents.db")
38
43
  FTS_DB = str(DATA_DIR / "documents_fts.db")
39
44
  OLLAMA_URL = os.environ.get("L6_OLLAMA_URL", "http://localhost:11434")
40
- EMBED_MODEL = os.environ.get("L6_EMBED_MODEL", "nomic-embed-text")
41
- NV_EMBED_URL = os.environ.get("L6_NV_EMBED_URL", "http://localhost:8041/v1/embeddings")
42
45
  NV_EMBED_ENABLED = os.environ.get("L6_NV_EMBED_ENABLED", "true").lower() == "true"
43
46
  EMBED_DIM = int(os.environ.get("L6_EMBED_DIM", "4096"))
44
- # Optional Authorization: Bearer <key> for the embedding endpoint.
45
- EMBED_API_KEY = os.environ.get("L6_EMBED_API_KEY", "")
46
47
 
47
- def _embed_post(texts):
48
- """POST to embedding endpoint. Tries OpenAI-compat shape first;
49
- falls back to Pentatonic-AI lambda-gateway native shape on failure.
50
- See L4 / L5 for the same pattern."""
51
- import httpx as _httpx
52
- payload = {"input": texts, "model": EMBED_MODEL}
53
- try:
54
- r = _httpx.post(
55
- NV_EMBED_URL,
56
- headers={"Authorization": f"Bearer {EMBED_API_KEY}"} if EMBED_API_KEY else {},
57
- json=payload,
58
- timeout=120,
48
+ _embed: EmbedClient | None = None
49
+
50
+
51
+ def _embed_client() -> EmbedClient:
52
+ """Lazily build the shared EmbedClient for L6."""
53
+ global _embed
54
+ if _embed is None:
55
+ _embed = EmbedClient.from_env(
56
+ prefix="L6_",
57
+ default_url="http://localhost:8041/v1/embeddings",
58
+ default_model="nomic-embed-text",
59
59
  )
60
- r.raise_for_status()
61
- return [d["embedding"] for d in r.json()["data"]]
62
- except Exception:
63
- pass
64
- fallback_url = NV_EMBED_URL.replace("/v1/embeddings", "/v1/embed").replace("/embeddings", "/embed")
65
- r = _httpx.post(
66
- fallback_url,
67
- headers={"X-API-Key": EMBED_API_KEY} if EMBED_API_KEY else {},
68
- json=payload,
69
- timeout=120,
70
- )
71
- r.raise_for_status()
72
- return r.json()["embeddings"]
60
+ return _embed
61
+
62
+
63
+ def _embed_post(texts):
64
+ """Embed a batch of texts via the shared EmbedClient. Provider profile
65
+ chosen by L6_EMBED_PROVIDER env (openai | pentatonic-gateway | cohere
66
+ | custom). See engine/services/_shared/embed_provider.py for details."""
67
+ return _embed_client().embed_batch(texts)
73
68
 
74
69
  COLLECTION_NAME = "documents"
75
70
  RRF_K = 60
@@ -0,0 +1,128 @@
1
+ #!/usr/bin/env python3
2
+ """Wipe pre-arena-scoping :Entity nodes from the L3 Neo4j graph.
3
+
4
+ Run this AFTER the engine has been deployed with the arena-scoped
5
+ writer paths, not before. Sequence:
6
+
7
+ 1. Deploy l2-hybridrag-proxy with arena-scoped MERGE patterns.
8
+ 2. Verify new ingest is creating arena-tagged entities (run
9
+ --dry-run on this script first; it reports legacy vs new).
10
+ 3. Run this script with --confirm to wipe legacy entities.
11
+ 4. Future ingest re-extracts entities from existing :Chunk nodes
12
+ on-demand (search-side touches them; new stores recreate them
13
+ from scratch under the right arena).
14
+
15
+ Why wipe vs. backfill: pre-fix entities collapsed cross-tenant by name,
16
+ so their MENTIONS edges connect to chunks across multiple arenas.
17
+ Splitting them by mentions is doable but error-prone (edge cases for
18
+ many-arena entities, orphans, no-mention entities). The Hebbian weights
19
+ on those edges were also corrupted by cross-tenant traffic, so they
20
+ weren't worth saving. Chunks are preserved either way — re-extraction
21
+ is cheap.
22
+
23
+ Usage:
24
+ # report counts only
25
+ python wipe-legacy-l3-entities.py --neo4j-uri bolt://localhost:7687
26
+
27
+ # actually wipe
28
+ python wipe-legacy-l3-entities.py --neo4j-uri bolt://localhost:7687 --confirm
29
+
30
+ The script is idempotent — running it twice on a clean graph deletes
31
+ zero rows.
32
+ """
33
+ from __future__ import annotations
34
+
35
+ import argparse
36
+ import os
37
+ import sys
38
+
39
+ try:
40
+ from neo4j import GraphDatabase
41
+ except ImportError:
42
+ print("ERROR: neo4j driver not installed. `pip install neo4j` first.", file=sys.stderr)
43
+ sys.exit(1)
44
+
45
+
46
+ def main() -> int:
47
+ parser = argparse.ArgumentParser(description=__doc__.split("\n\n")[0])
48
+ parser.add_argument(
49
+ "--neo4j-uri",
50
+ default=os.environ.get("NEO4J_URI", "bolt://localhost:7687"),
51
+ help="Neo4j bolt URI (default: NEO4J_URI env or bolt://localhost:7687)",
52
+ )
53
+ parser.add_argument(
54
+ "--neo4j-user",
55
+ default=os.environ.get("NEO4J_USER", "neo4j"),
56
+ )
57
+ parser.add_argument(
58
+ "--neo4j-password",
59
+ default=os.environ.get("NEO4J_PASSWORD"),
60
+ )
61
+ parser.add_argument(
62
+ "--confirm",
63
+ action="store_true",
64
+ help="Actually delete. Without this flag, runs in dry-run mode "
65
+ "(reports counts only).",
66
+ )
67
+ args = parser.parse_args()
68
+
69
+ if not args.neo4j_password:
70
+ print("ERROR: --neo4j-password or NEO4J_PASSWORD env required", file=sys.stderr)
71
+ return 2
72
+
73
+ driver = GraphDatabase.driver(
74
+ args.neo4j_uri, auth=(args.neo4j_user, args.neo4j_password),
75
+ )
76
+ try:
77
+ with driver.session() as session:
78
+ # Count legacy vs arena-scoped entities so the operator can
79
+ # eyeball whether the new writer path has actually started
80
+ # producing arena-scoped rows before deleting anything.
81
+ legacy = session.run(
82
+ "MATCH (e:Entity) WHERE e.arena IS NULL RETURN count(e) AS n"
83
+ ).single()["n"]
84
+ scoped = session.run(
85
+ "MATCH (e:Entity) WHERE e.arena IS NOT NULL RETURN count(e) AS n"
86
+ ).single()["n"]
87
+ print(f"L3 Entity nodes: {legacy} legacy (no arena) / {scoped} arena-scoped")
88
+
89
+ # Same breakdown for chunks — they should already all be
90
+ # arena-tagged but worth verifying before/after.
91
+ chunk_legacy = session.run(
92
+ "MATCH (c:Chunk) WHERE c.arena IS NULL RETURN count(c) AS n"
93
+ ).single()["n"]
94
+ chunk_scoped = session.run(
95
+ "MATCH (c:Chunk) WHERE c.arena IS NOT NULL RETURN count(c) AS n"
96
+ ).single()["n"]
97
+ print(f"L3 Chunk nodes: {chunk_legacy} legacy (no arena) / {chunk_scoped} arena-scoped")
98
+
99
+ if not args.confirm:
100
+ print("\nDry run — pass --confirm to actually delete legacy entities.")
101
+ return 0
102
+
103
+ if legacy == 0:
104
+ print("\nNothing to do — all entities already arena-scoped.")
105
+ return 0
106
+
107
+ print(f"\nWiping {legacy} legacy entities…")
108
+ result = session.run(
109
+ "MATCH (e:Entity) WHERE e.arena IS NULL DETACH DELETE e RETURN count(e) AS n"
110
+ ).single()
111
+ deleted = result["n"]
112
+ print(f"Deleted {deleted} legacy entities.")
113
+
114
+ # Drop the old single-key entity index now that the data
115
+ # is gone — safe to do because the new writer path uses
116
+ # the compound (arena, name) index.
117
+ try:
118
+ session.run("DROP INDEX entity_name IF EXISTS")
119
+ print("Dropped legacy `entity_name` index.")
120
+ except Exception as e: # noqa: BLE001
121
+ print(f"Note: could not drop entity_name index ({e}); ok if absent.")
122
+ return 0
123
+ finally:
124
+ driver.close()
125
+
126
+
127
+ if __name__ == "__main__":
128
+ sys.exit(main())
@@ -217,16 +217,40 @@ print("yes" if any("Eclipse" in r.get("content","") for r in data) else "no")')
217
217
  || fail "tenant-y lost the shared phrase (id collision?)"
218
218
 
219
219
  # ---------------------------------------------------------------------------
220
- # /forget — by metadata_contains. Cleans up so reruns are idempotent.
220
+ # /forget — tenant-scoped delete by arena.
221
+ #
222
+ # Pre-arena, /forget would unconditionally trigger a global wipe of
223
+ # L0+L4+L3 inside the shim, regardless of what filters were passed. The
224
+ # smoke test happily asserted "deleted >= 1" because the shim returned
225
+ # the count of every row globally, even rows the caller didn't ask to
226
+ # remove. That was the right thing for a bench reset, the wrong thing
227
+ # for any real tenant operation.
228
+ #
229
+ # Now /forget honours the arena in the metadata filter:
230
+ # - {metadata_contains: {arena: <tenant>}} → tenant-scoped wipe
231
+ # - {metadata_contains: {<other>: <val>}} → L6 targeted only
232
+ # - {} (empty body) → bench reset (gated
233
+ # by GLOBAL_WIPE in the
234
+ # internal endpoint)
235
+ #
236
+ # We assert here that wiping arena=e2e-tenant-a actually removes ≥1 row,
237
+ # AND that arena=e2e-tenant-b's rows survived — proves real tenant
238
+ # isolation rather than just "shim returned non-zero".
221
239
  # ---------------------------------------------------------------------------
222
240
 
223
241
  echo ""
224
- echo "=== /forget probe=e2e-arena ==="
242
+ echo "=== /forget arena=e2e-tenant-a ==="
225
243
  F=$(curl -sf -X POST "$BASE/forget" -H "Content-Type: application/json" \
226
- -d '{"metadata_contains":{"probe":"e2e-arena"}}')
244
+ -d '{"metadata_contains":{"arena":"e2e-tenant-a"}}')
227
245
  deleted=$(echo "$F" | python3 -c 'import json,sys; print(json.load(sys.stdin).get("deleted",0))')
228
246
  echo " deleted: $deleted"
229
- [ "$deleted" -ge "1" ] && ok "/forget removed at least 1 row" || fail "/forget"
247
+ [ "$deleted" -ge "1" ] && ok "/forget removed tenant-a rows" || fail "/forget tenant-a wipe"
248
+
249
+ # tenant-b should still have its rows.
250
+ B=$(curl -sf -X POST "$BASE/search" -H "Content-Type: application/json" \
251
+ -d '{"arena":"e2e-tenant-b","query":"Borealis","limit":4}')
252
+ b_hits=$(echo "$B" | python3 -c 'import json,sys; print(len(json.load(sys.stdin).get("results",[])))')
253
+ [ "$b_hits" -ge "1" ] && ok "/forget left tenant-b intact" || fail "/forget leaked into tenant-b"
230
254
 
231
255
  echo ""
232
256
  echo "=== Result ==="