mtrx-cli 0.1.23 → 0.1.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mtrx-cli",
3
- "version": "0.1.23",
3
+ "version": "0.1.25",
4
4
  "description": "MATRX CLI for routing Codex, Claude, and Cursor through Matrx",
5
5
  "homepage": "https://mtrx.so",
6
6
  "repository": {
@@ -33,6 +33,8 @@
33
33
  "src/matrx/cli/cursor_proxy.py",
34
34
  "src/matrx/cli/cursor_reroute.py",
35
35
  "src/matrx/cli/cursor_service.py",
36
+ "src/matrx/cli/bootstrap.py",
37
+ "src/matrx/cli/gemini_env_bootstrap.cjs",
36
38
  "src/matrx/cli/launcher.py",
37
39
  "src/matrx/cli/main.py",
38
40
  "src/matrx/cli/project_cmds.py",
@@ -1 +1 @@
1
- __version__ = "0.1.23"
1
+ __version__ = "0.1.25"
@@ -0,0 +1,119 @@
1
+ """
2
+ Bootstrap command — warms the system registry for an existing project.
3
+ Called by `mtrx init`.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import json
8
+ import subprocess
9
+ from pathlib import Path
10
+
11
+ _DEFAULT_SYSTEMS_TEMPLATE = [
12
+ {
13
+ "id": "memory",
14
+ "name": "Memory System",
15
+ "description": "Memory flywheel: extract, store, retrieve, inject",
16
+ "file_patterns": ["core/memory.py", "services/memory.py", "core/extractor.py", "core/profile_builder.py"]
17
+ },
18
+ {
19
+ "id": "proxy",
20
+ "name": "Proxy Service",
21
+ "description": "Intercepts all LLM calls, applies compression + injection",
22
+ "file_patterns": ["services/proxy.py", "core/compressor.py", "core/summarizer.py"]
23
+ },
24
+ {
25
+ "id": "auth",
26
+ "name": "Auth & Multi-Org",
27
+ "description": "Clerk JWT validation, org/project scoping, API keys",
28
+ "file_patterns": ["middleware/auth.py", "api/auth.py", "api/orgs.py", "models/org_member.py"]
29
+ },
30
+ {
31
+ "id": "analytics",
32
+ "name": "Analytics",
33
+ "description": "Usage snapshots, memory hit rate, token tracking",
34
+ "file_patterns": ["services/analytics.py", "api/analytics.py"]
35
+ },
36
+ ]
37
+
38
+
39
+ def run_init(project_root: str = ".") -> None:
40
+ """Entry point for `mtrx init`."""
41
+ root = Path(project_root).resolve()
42
+ print(f"Matrx init: analyzing {root}")
43
+
44
+ # Step 1: Load or create .matrx/systems.json
45
+ matrx_dir = root / ".matrx"
46
+ matrx_dir.mkdir(exist_ok=True)
47
+ systems_path = matrx_dir / "systems.json"
48
+
49
+ if not systems_path.exists():
50
+ _seed_systems_json(root, systems_path)
51
+ print(f" Created {systems_path}")
52
+ else:
53
+ print(f" Found existing {systems_path}")
54
+
55
+ # Step 2: Analyze git log for hot systems
56
+ hot_files = _get_hot_files(root, days=30)
57
+ print(f" Found {len(hot_files)} recently modified files")
58
+
59
+ # Step 3: Map to systems
60
+ systems = json.loads(systems_path.read_text()).get("systems", [])
61
+ hot_systems = _rank_systems_by_activity(hot_files, systems)
62
+
63
+ if hot_systems:
64
+ print("\n Active systems detected:")
65
+ for sys_id, count, depth in hot_systems:
66
+ meta = next((s for s in systems if s["id"] == sys_id), None)
67
+ if meta:
68
+ print(f" {meta['name']:<25} {count:>3} touches → {depth} card")
69
+ else:
70
+ print("\n No recently active systems detected (no git history or no matches).")
71
+
72
+ print(f"\n Matrx will generate cards for detected systems on first use.")
73
+ print(f" Run your agent — cards will be ready within the first few calls.\n")
74
+ print(" Done.")
75
+
76
+
77
+ def _get_hot_files(root: Path, days: int = 30) -> dict[str, int]:
78
+ """Parse git log, return file → touch count mapping."""
79
+ try:
80
+ result = subprocess.run(
81
+ ["git", "log", f"--since={days} days ago", "--name-only", "--pretty=format:"],
82
+ cwd=root, capture_output=True, text=True, timeout=10,
83
+ )
84
+ counts: dict[str, int] = {}
85
+ for line in result.stdout.splitlines():
86
+ line = line.strip()
87
+ if line and not line.startswith("commit"):
88
+ counts[line] = counts.get(line, 0) + 1
89
+ return counts
90
+ except Exception:
91
+ return {}
92
+
93
+
94
+ def _rank_systems_by_activity(
95
+ hot_files: dict[str, int], systems: list[dict]
96
+ ) -> list[tuple[str, int, str]]:
97
+ """Return [(system_id, touch_count, depth)] sorted by activity."""
98
+ ranked = []
99
+ for s in systems:
100
+ patterns = s.get("file_patterns", [])
101
+ total = sum(
102
+ count for f, count in hot_files.items()
103
+ if any(pat in f or f.endswith(pat) for pat in patterns)
104
+ )
105
+ if total > 0:
106
+ ranked.append((s["id"], total, "standard"))
107
+
108
+ ranked.sort(key=lambda x: x[1], reverse=True)
109
+ # Re-assign depths by rank
110
+ result = []
111
+ for i, (sid, count, _) in enumerate(ranked):
112
+ depth = "full" if i < 3 else ("standard" if i < 8 else "distilled")
113
+ result.append((sid, count, depth))
114
+ return result
115
+
116
+
117
+ def _seed_systems_json(root: Path, out_path: Path) -> None:
118
+ """Write default systems.json. Users can hand-edit afterward."""
119
+ out_path.write_text(json.dumps({"systems": _DEFAULT_SYSTEMS_TEMPLATE}, indent=2))
@@ -33,7 +33,7 @@ import httpx
33
33
  from matrx.cli.cursor_ca import CertCache, load_ca
34
34
 
35
35
  try:
36
- from matrx.cli.cursor_reroute import is_ai_path, try_reroute_to_matrx
36
+ from matrx.cli.cursor_reroute import is_ai_path, try_inject_context, try_reroute_to_matrx
37
37
  except ImportError:
38
38
  # Stubs when cursor_reroute not available (e.g. npm package omit).
39
39
  def is_ai_path(path: str) -> bool:
@@ -42,6 +42,9 @@ except ImportError:
42
42
  async def try_reroute_to_matrx(*, path: str, method: str, **kwargs: Any) -> None:
43
43
  return None
44
44
 
45
+ async def try_inject_context(**kwargs: Any) -> None:
46
+ return None
47
+
45
48
  logger = logging.getLogger(__name__)
46
49
 
47
50
  DEFAULT_PORT = 8842
@@ -287,8 +290,10 @@ class MITMProxy:
287
290
  path = parts[1] if len(parts) > 1 else "/"
288
291
 
289
292
  req_body_size = 0
293
+ _is_ai_req = method == "POST" and is_ai_path(path)
294
+ _req_session_id = str(uuid.uuid4()) if _is_ai_req else ""
290
295
  # For AI paths: buffer request and try rerouting through MTRX (live injection)
291
- if method == "POST" and is_ai_path(path):
296
+ if _is_ai_req:
292
297
  req_headers, req_cl, req_chunked = await self._read_headers_only(
293
298
  client_reader
294
299
  )
@@ -303,7 +308,7 @@ class MITMProxy:
303
308
  req_body=req_body,
304
309
  matrx_base_url=self.matrx_base_url,
305
310
  matrx_key=self.matrx_key,
306
- session_id=str(uuid.uuid4()),
311
+ session_id=_req_session_id,
307
312
  )
308
313
  if result is not None:
309
314
  success, resp_headers, resp_body, is_streaming = result
@@ -327,10 +332,20 @@ class MITMProxy:
327
332
  )
328
333
  continue
329
334
  # Reroute returned but failed — fall through to forward
330
- # Reroute not implemented or failed forward to upstream
335
+ # Inject MTRX memory context into request before forwarding
336
+ injected_body = await try_inject_context(
337
+ req_body=req_body,
338
+ req_headers=req_headers,
339
+ matrx_base_url=self.matrx_base_url,
340
+ matrx_key=self.matrx_key,
341
+ session_id=_req_session_id,
342
+ )
343
+ body_to_forward = injected_body if injected_body is not None else req_body
344
+ fwd_headers = dict(req_headers)
345
+ fwd_headers["content-length"] = str(len(body_to_forward))
331
346
  up_writer.write(req_line)
332
- await self._write_headers(up_writer, req_headers)
333
- up_writer.write(req_body)
347
+ await self._write_headers(up_writer, fwd_headers)
348
+ up_writer.write(body_to_forward)
334
349
  await up_writer.drain()
335
350
  else:
336
351
  up_writer.write(req_line)
@@ -369,9 +384,20 @@ class MITMProxy:
369
384
  for t in ("text/event-stream", "grpc", "proto", "connect")
370
385
  )
371
386
 
372
- resp_body_size = await self._forward_body(
373
- up_reader, client_writer, resp_cl, resp_chunked
374
- )
387
+ if _is_ai_req:
388
+ resp_body_size, resp_captured = await self._forward_body_with_capture(
389
+ up_reader, client_writer, resp_cl, resp_chunked
390
+ )
391
+ if resp_captured:
392
+ asyncio.create_task(
393
+ self._extract_ai_response(
394
+ resp_captured, _req_session_id, hostname
395
+ )
396
+ )
397
+ else:
398
+ resp_body_size = await self._forward_body(
399
+ up_reader, client_writer, resp_cl, resp_chunked
400
+ )
375
401
 
376
402
  elapsed_ms = int((time.monotonic() - started) * 1000)
377
403
  self._request_count += 1
@@ -397,6 +423,114 @@ class MITMProxy:
397
423
  if "close" in conn_h:
398
424
  break
399
425
 
426
+ async def _forward_body_with_capture(
427
+ self,
428
+ reader: asyncio.StreamReader,
429
+ writer: asyncio.StreamWriter,
430
+ content_length: int,
431
+ chunked: bool,
432
+ ) -> tuple[int, bytes]:
433
+ """Forward body like ``_forward_body`` while also capturing a copy.
434
+
435
+ Returns ``(bytes_forwarded, captured_bytes)``. The capture enables
436
+ background response extraction without blocking the forward path.
437
+ """
438
+ parts: list[bytes] = []
439
+
440
+ if content_length > 0:
441
+ total = 0
442
+ remaining = content_length
443
+ while remaining > 0:
444
+ chunk = await reader.read(min(remaining, 65536))
445
+ if not chunk:
446
+ break
447
+ writer.write(chunk)
448
+ await writer.drain()
449
+ parts.append(chunk)
450
+ total += len(chunk)
451
+ remaining -= len(chunk)
452
+ return total, b"".join(parts)
453
+
454
+ if chunked:
455
+ total = 0
456
+ while True:
457
+ size_line = await reader.readline()
458
+ if not size_line:
459
+ break
460
+ writer.write(size_line)
461
+ await writer.drain()
462
+ size_str = size_line.decode("utf-8", errors="replace").strip()
463
+ try:
464
+ chunk_size = int(size_str.split(";")[0], 16)
465
+ except ValueError:
466
+ break
467
+ if chunk_size == 0:
468
+ trailer = await reader.readline()
469
+ writer.write(trailer)
470
+ await writer.drain()
471
+ break
472
+ remaining = chunk_size
473
+ chunk_parts: list[bytes] = []
474
+ while remaining > 0:
475
+ data = await reader.read(min(remaining, 65536))
476
+ if not data:
477
+ return total, b"".join(parts)
478
+ writer.write(data)
479
+ await writer.drain()
480
+ chunk_parts.append(data)
481
+ total += len(data)
482
+ remaining -= len(data)
483
+ chunk_data = b"".join(chunk_parts)
484
+ parts.append(chunk_data)
485
+ crlf = await reader.readline()
486
+ writer.write(crlf)
487
+ await writer.drain()
488
+ return total, b"".join(parts)
489
+
490
+ return 0, b""
491
+
492
+ async def _extract_ai_response(
493
+ self,
494
+ resp_bytes: bytes,
495
+ session_id: str,
496
+ hostname: str,
497
+ ) -> None:
498
+ """Parse Connect frames from *resp_bytes* and ship response telemetry.
499
+
500
+ Fire-and-forget — never raises, never blocks the forward path.
501
+ """
502
+ try:
503
+ from matrx.cli.cursor_connect import parse_all_frames
504
+ from matrx.cli.cursor_extraction import (
505
+ extract_from_response_frame,
506
+ parse_response_proto,
507
+ ship_ai_telemetry,
508
+ )
509
+
510
+ frames = parse_all_frames(resp_bytes)
511
+ accumulated: dict = {
512
+ "session_id": session_id,
513
+ "response_text": "",
514
+ "tool_calls": [],
515
+ "usage": None,
516
+ }
517
+ for flags, payload in frames:
518
+ if flags == 0x02: # end-of-stream trailer — stop
519
+ break
520
+ resp_proto = parse_response_proto(payload)
521
+ frame_data = extract_from_response_frame(resp_proto)
522
+ if frame_data:
523
+ accumulated["response_text"] = (
524
+ accumulated.get("response_text", "") + frame_data.get("text", "")
525
+ )
526
+ accumulated["tool_calls"].extend(frame_data.get("tool_calls", []))
527
+ if frame_data.get("usage"):
528
+ accumulated["usage"] = frame_data["usage"]
529
+
530
+ await ship_ai_telemetry(accumulated, self.matrx_base_url, self.matrx_key)
531
+ except Exception:
532
+ logger.debug("proxy: _extract_ai_response failed", exc_info=True)
533
+
400
534
  async def _read_headers_only(
401
535
  self, reader: asyncio.StreamReader
402
536
  ) -> tuple[dict[str, str], int, bool]:
@@ -13,11 +13,14 @@ Refs: cursor-tap (https://github.com/burpheart/cursor-tap), everestmz/cursor-rpc
13
13
 
14
14
  from __future__ import annotations
15
15
 
16
+ import asyncio
16
17
  import json
17
18
  import logging
18
19
  import re
19
20
  from typing import Any
20
21
 
22
+ import httpx
23
+
21
24
  logger = logging.getLogger(__name__)
22
25
 
23
26
  # Cursor AI RPC paths (Connect protocol). RunSSE = main chat, StreamCpp = code completion.
@@ -46,6 +49,67 @@ def _cursor_model_to_openai(cursor_model: str) -> str:
46
49
  return cursor_model
47
50
 
48
51
 
52
+ def _build_search_query(extracted: dict[str, Any]) -> str:
53
+ files = extracted.get("files", [])
54
+ query_parts = [f.get("path", "").strip() for f in files[:3] if f.get("path")]
55
+ if query_parts:
56
+ return " ".join(query_parts)
57
+ return (extracted.get("conversation_summary") or "").strip()
58
+
59
+
60
+ def _prepend_context_items(context_items: Any, injected_items: list[Any]) -> None:
61
+ for item in reversed(injected_items):
62
+ try:
63
+ context_items.insert(0, item)
64
+ except Exception:
65
+ context_items.append(item)
66
+
67
+
68
+ def _inject_memory_context_items(
69
+ *,
70
+ req_proto: Any,
71
+ memory_results: list[dict[str, Any]],
72
+ server_chat_pb2: Any,
73
+ existing_files: list[dict[str, Any]] | None = None,
74
+ limit: int = 5,
75
+ ) -> int:
76
+ existing_contents = {
77
+ (entry.get("content") or "").strip() for entry in (existing_files or []) if entry.get("content")
78
+ }
79
+ injected_contents: set[str] = set()
80
+ injected_items: list[Any] = []
81
+
82
+ for mem in memory_results:
83
+ if len(injected_items) >= limit:
84
+ break
85
+ content = (mem.get("content") or "").strip()
86
+ if not content or content in existing_contents or content in injected_contents:
87
+ continue
88
+
89
+ cached_item = server_chat_pb2.PotentiallyCachedContextItem()
90
+ ctx_item = server_chat_pb2.ContextItem()
91
+ file_chunk = server_chat_pb2.FileChunk()
92
+ file_chunk.file_path = f"[matrx:{(mem.get('id') or '')[:8]}]"
93
+ file_chunk.content = content[:4096]
94
+ if hasattr(getattr(ctx_item, "file_chunk", None), "CopyFrom"):
95
+ ctx_item.file_chunk.CopyFrom(file_chunk)
96
+ else:
97
+ ctx_item.file_chunk = file_chunk
98
+ if hasattr(getattr(cached_item, "context_item", None), "CopyFrom"):
99
+ cached_item.context_item.CopyFrom(ctx_item)
100
+ else:
101
+ cached_item.context_item = ctx_item
102
+
103
+ injected_items.append(cached_item)
104
+ injected_contents.add(content)
105
+
106
+ if not injected_items:
107
+ return 0
108
+
109
+ _prepend_context_items(req_proto.context_items, injected_items)
110
+ return len(injected_items)
111
+
112
+
49
113
  async def try_reroute_to_matrx(
50
114
  *,
51
115
  path: str,
@@ -74,3 +138,111 @@ async def try_reroute_to_matrx(
74
138
  # convert response back to Cursor's gRPC format.
75
139
  logger.debug("cursor_reroute: path=%s would reroute (protobuf conversion not yet implemented)", path)
76
140
  return None
141
+
142
+
143
+ # ---------------------------------------------------------------------------
144
+ # Context injection
145
+ # ---------------------------------------------------------------------------
146
+
147
+
148
+ async def _query_memory(
149
+ *,
150
+ query: str,
151
+ matrx_base_url: str,
152
+ matrx_key: str,
153
+ limit: int = 5,
154
+ ) -> list[dict]:
155
+ """Query MTRX memory search API. Returns list of memory entry dicts."""
156
+ try:
157
+ async with httpx.AsyncClient(timeout=0.1) as client: # 100 ms budget
158
+ resp = await client.get(
159
+ f"{matrx_base_url.rstrip('/')}/v1/memory/search",
160
+ params={"q": query, "limit": limit},
161
+ headers={"X-Matrx-Key": matrx_key},
162
+ )
163
+ if resp.status_code == 200:
164
+ return resp.json().get("entries", [])
165
+ except Exception:
166
+ logger.debug("cursor_reroute: memory query failed", exc_info=True)
167
+ return []
168
+
169
+
170
+ async def try_inject_context(
171
+ *,
172
+ req_body: bytes,
173
+ req_headers: dict[str, str],
174
+ matrx_base_url: str,
175
+ matrx_key: str,
176
+ session_id: str,
177
+ ) -> bytes | None:
178
+ """Parse the Connect+proto request, extract data, inject MTRX memory context items.
179
+
180
+ Returns modified request bytes with injected context items prepended, or
181
+ ``None`` to signal that the original request should be forwarded unchanged.
182
+ Wraps everything in try/except — never raises.
183
+ """
184
+ try:
185
+ from matrx.cli.cursor_connect import (
186
+ build_connect_frame,
187
+ is_connect_proto_request,
188
+ parse_connect_frame,
189
+ )
190
+ from matrx.cli.cursor_extraction import (
191
+ _PROTOS_AVAILABLE,
192
+ extract_from_request,
193
+ parse_request_proto,
194
+ ship_ai_telemetry,
195
+ )
196
+
197
+ if not is_connect_proto_request(req_headers):
198
+ return None
199
+
200
+ # Parse Connect envelope → raw proto bytes
201
+ flags, proto_bytes = parse_connect_frame(req_body)
202
+
203
+ # Deserialize proto
204
+ req_proto = parse_request_proto(proto_bytes)
205
+
206
+ # Extract structured data and ship telemetry fire-and-forget
207
+ extracted = extract_from_request(req_proto)
208
+ extracted["session_id"] = extracted.get("session_id") or session_id
209
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
210
+
211
+ # Cannot inject without compiled protos or a successfully parsed proto
212
+ if not _PROTOS_AVAILABLE or req_proto is None:
213
+ return None
214
+
215
+ # Build search query from the open file paths
216
+ search_query = _build_search_query(extracted)
217
+ if not search_query:
218
+ return None
219
+
220
+ # Query MTRX memory (100 ms budget — never blocks the forward path)
221
+ memory_results = await _query_memory(
222
+ query=search_query,
223
+ matrx_base_url=matrx_base_url,
224
+ matrx_key=matrx_key,
225
+ limit=5,
226
+ )
227
+ if not memory_results:
228
+ return None
229
+
230
+ # Inject memory results as PotentiallyCachedContextItem entries
231
+ from matrx.cli.cursor_proto import server_chat_pb2 # type: ignore[import]
232
+
233
+ inserted = _inject_memory_context_items(
234
+ req_proto=req_proto,
235
+ memory_results=memory_results,
236
+ server_chat_pb2=server_chat_pb2,
237
+ existing_files=extracted.get("files", []),
238
+ )
239
+ if inserted == 0:
240
+ return None
241
+
242
+ # Serialize modified proto and re-wrap in Connect frame
243
+ new_proto_bytes = req_proto.SerializeToString()
244
+ return build_connect_frame(flags, new_proto_bytes)
245
+
246
+ except Exception:
247
+ logger.warning("cursor_reroute: try_inject_context failed", exc_info=True)
248
+ return None
@@ -0,0 +1,34 @@
1
+ "use strict";
2
+
3
+ function decodeBase64Env(name) {
4
+ const value = process.env[name];
5
+ if (!value) {
6
+ return undefined;
7
+ }
8
+ try {
9
+ return Buffer.from(value, "base64").toString("utf8");
10
+ } catch {
11
+ return undefined;
12
+ }
13
+ }
14
+
15
+ const envMappings = [
16
+ ["MTRX_GEMINI_CUSTOM_HEADERS_B64", "GEMINI_CLI_CUSTOM_HEADERS"],
17
+ ["MTRX_CODE_ASSIST_ENDPOINT_B64", "CODE_ASSIST_ENDPOINT"],
18
+ ["MTRX_GEMINI_API_ENDPOINT_B64", "GEMINI_API_ENDPOINT"],
19
+ ];
20
+
21
+ for (const [sourceName, targetName] of envMappings) {
22
+ const decoded = decodeBase64Env(sourceName);
23
+ if (decoded && !process.env[targetName]) {
24
+ process.env[targetName] = decoded;
25
+ }
26
+ }
27
+
28
+ if (
29
+ process.env.MTRX_GEMINI_API_KEY_AUTH_MECHANISM &&
30
+ !process.env.GEMINI_API_KEY_AUTH_MECHANISM
31
+ ) {
32
+ process.env.GEMINI_API_KEY_AUTH_MECHANISM =
33
+ process.env.MTRX_GEMINI_API_KEY_AUTH_MECHANISM;
34
+ }
@@ -95,6 +95,46 @@ def _runtime_agent_basename(tool: str) -> tuple[str, str, list[str], str]:
95
95
  return normalized, f"{tool.capitalize()} CLI", ["cli", tool], tool
96
96
 
97
97
 
98
+ def _append_sandbox_env(env: dict[str, str], key: str, value: str | None) -> None:
99
+ if not value:
100
+ return
101
+ entry = f"{key}={value}"
102
+ existing = [item.strip() for item in (env.get("SANDBOX_ENV") or "").split(",") if item.strip()]
103
+ filtered = [item for item in existing if not item.startswith(f"{key}=")]
104
+ filtered.append(entry)
105
+ env["SANDBOX_ENV"] = ",".join(filtered)
106
+
107
+
108
+ def _remove_sandbox_env_keys(env: dict[str, str], keys: tuple[str, ...]) -> None:
109
+ existing = [item.strip() for item in (env.get("SANDBOX_ENV") or "").split(",") if item.strip()]
110
+ filtered = [
111
+ item for item in existing
112
+ if not any(item.startswith(f"{key}=") for key in keys)
113
+ ]
114
+ if filtered:
115
+ env["SANDBOX_ENV"] = ",".join(filtered)
116
+ else:
117
+ env.pop("SANDBOX_ENV", None)
118
+
119
+
120
+ def _ensure_node_require(env: dict[str, str], script_path: str) -> None:
121
+ require_flag = f"--require={script_path}"
122
+ existing = (env.get("NODE_OPTIONS") or "").strip()
123
+ if require_flag in existing.split():
124
+ return
125
+ env["NODE_OPTIONS"] = f"{existing} {require_flag}".strip()
126
+
127
+
128
+ def _remove_node_require(env: dict[str, str], script_path: str) -> None:
129
+ require_flag = f"--require={script_path}"
130
+ existing = (env.get("NODE_OPTIONS") or "").split()
131
+ filtered = [part for part in existing if part != require_flag]
132
+ if filtered:
133
+ env["NODE_OPTIONS"] = " ".join(filtered)
134
+ else:
135
+ env.pop("NODE_OPTIONS", None)
136
+
137
+
98
138
  def configured_route(state: dict, tool: str) -> str | None:
99
139
  route = state.get("defaults", {}).get(tool)
100
140
  if route in VALID_ROUTES:
@@ -467,6 +507,20 @@ def _capture_git_context(cwd: str | None = None) -> tuple[str, str]:
467
507
  return branch, commit
468
508
 
469
509
 
510
+ def _capture_git_remote_url(cwd: str | None = None) -> str:
511
+ root = cwd or os.getcwd()
512
+ try:
513
+ r = subprocess.run(
514
+ ["git", "-C", root, "remote", "get-url", "origin"],
515
+ capture_output=True, text=True, timeout=2, check=False,
516
+ )
517
+ if r.returncode == 0:
518
+ return r.stdout.strip()
519
+ except (OSError, subprocess.SubprocessError):
520
+ pass
521
+ return ""
522
+
523
+
470
524
  def _resolve_matrx_context_overrides(
471
525
  state: dict,
472
526
  env: dict[str, str],
@@ -544,10 +598,13 @@ def _build_codex_env(
544
598
  if project_id:
545
599
  header_parts.append(f'"X-Matrx-Project-Id" = "{project_id}"')
546
600
  _git_branch, _git_commit = _capture_git_context(_workspace_cwd(env))
601
+ _git_repo_url = _capture_git_remote_url(_workspace_cwd(env))
547
602
  if _git_branch:
548
603
  header_parts.append(f'"X-Matrx-Branch" = "{_git_branch}"')
549
604
  if _git_commit:
550
605
  header_parts.append(f'"X-Matrx-Commit" = "{_git_commit}"')
606
+ if _git_repo_url:
607
+ header_parts.append(f'"X-Matrx-Repo-Url" = "{_git_repo_url}"')
551
608
  if env_b64:
552
609
  header_parts.append(f'"X-Matrx-Env" = "{env_b64}"')
553
610
  headers_str = ", ".join(header_parts)
@@ -589,6 +646,7 @@ def _build_gemini_env(
589
646
  proxy_base = ensure_v1_url(matrx.get("base_url"))
590
647
  mx_key, matrx_auth_source = _resolve_matrx_route_key(state, env)
591
648
  direct_key = (env.get("GEMINI_API_KEY") or env.get("GOOGLE_API_KEY") or "").strip()
649
+ bootstrap_script = str(Path(__file__).with_name("gemini_env_bootstrap.cjs").resolve())
592
650
 
593
651
  if route == "matrx":
594
652
  if not mx_key:
@@ -610,6 +668,7 @@ def _build_gemini_env(
610
668
  if runtime_agent_id:
611
669
  ctx_params.append(f"mtrx_agent={runtime_agent_id}")
612
670
  git_branch, git_commit = _capture_git_context(_workspace_cwd(env))
671
+ git_repo_url = _capture_git_remote_url(_workspace_cwd(env))
613
672
  if git_branch:
614
673
  ctx_params.append(f"mtrx_branch={git_branch}")
615
674
  if git_commit:
@@ -632,6 +691,8 @@ def _build_gemini_env(
632
691
  custom_headers.append(f"x-matrx-branch: {git_branch}")
633
692
  if git_commit:
634
693
  custom_headers.append(f"x-matrx-commit: {git_commit}")
694
+ if git_repo_url:
695
+ custom_headers.append(f"x-matrx-repo-url: {git_repo_url}")
635
696
  if env_b64:
636
697
  custom_headers.append(f"x-matrx-env: {env_b64}")
637
698
 
@@ -641,6 +702,27 @@ def _build_gemini_env(
641
702
  env["CODE_ASSIST_ENDPOINT"] = proxy_base
642
703
  env["GEMINI_CLI_CUSTOM_HEADERS"] = ", ".join(custom_headers)
643
704
  env["GEMINI_API_KEY_AUTH_MECHANISM"] = "bearer"
705
+ _ensure_node_require(env, bootstrap_script)
706
+ _append_sandbox_env(
707
+ env,
708
+ "MTRX_GEMINI_CUSTOM_HEADERS_B64",
709
+ base64.b64encode(env["GEMINI_CLI_CUSTOM_HEADERS"].encode("utf-8")).decode("ascii"),
710
+ )
711
+ _append_sandbox_env(
712
+ env,
713
+ "MTRX_CODE_ASSIST_ENDPOINT_B64",
714
+ base64.b64encode(env["CODE_ASSIST_ENDPOINT"].encode("utf-8")).decode("ascii"),
715
+ )
716
+ _append_sandbox_env(
717
+ env,
718
+ "MTRX_GEMINI_API_ENDPOINT_B64",
719
+ base64.b64encode(env["GEMINI_API_ENDPOINT"].encode("utf-8")).decode("ascii"),
720
+ )
721
+ _append_sandbox_env(
722
+ env,
723
+ "MTRX_GEMINI_API_KEY_AUTH_MECHANISM",
724
+ env["GEMINI_API_KEY_AUTH_MECHANISM"],
725
+ )
644
726
 
645
727
  return env, matrx_auth_source
646
728
 
@@ -655,6 +737,16 @@ def _build_gemini_env(
655
737
  value = (env.get(key) or "").strip()
656
738
  if "matrx" in value.lower() or "mtrx.so" in value.lower():
657
739
  env.pop(key, None)
740
+ _remove_node_require(env, bootstrap_script)
741
+ _remove_sandbox_env_keys(
742
+ env,
743
+ (
744
+ "MTRX_GEMINI_CUSTOM_HEADERS_B64",
745
+ "MTRX_CODE_ASSIST_ENDPOINT_B64",
746
+ "MTRX_GEMINI_API_ENDPOINT_B64",
747
+ "MTRX_GEMINI_API_KEY_AUTH_MECHANISM",
748
+ ),
749
+ )
658
750
 
659
751
  custom_headers = (env.get("GEMINI_CLI_CUSTOM_HEADERS") or "").strip().lower()
660
752
  if "x-matrx-" in custom_headers:
@@ -718,10 +810,13 @@ def _build_claude_env(
718
810
  if project_id:
719
811
  custom_headers += f"\nx-matrx-project-id: {project_id}"
720
812
  _git_branch, _git_commit = _capture_git_context(_workspace_cwd(env))
813
+ _git_repo_url = _capture_git_remote_url(_workspace_cwd(env))
721
814
  if _git_branch:
722
815
  custom_headers += f"\nx-matrx-branch: {_git_branch}"
723
816
  if _git_commit:
724
817
  custom_headers += f"\nx-matrx-commit: {_git_commit}"
818
+ if _git_repo_url:
819
+ custom_headers += f"\nx-matrx-repo-url: {_git_repo_url}"
725
820
  if env_b64:
726
821
  custom_headers += f"\nx-matrx-env: {env_b64}"
727
822
  env["ANTHROPIC_CUSTOM_HEADERS"] = custom_headers
@@ -973,6 +1068,12 @@ def _validate_gemini_launch_plan(plan: LaunchPlan, state: dict) -> None:
973
1068
  if "x-matrx-agent-id:" not in custom_headers:
974
1069
  raise ValueError("Gemini Matrx route is missing GEMINI_CLI_CUSTOM_HEADERS with X-Matrx-Agent-Id")
975
1070
 
1071
+ sandbox_env = (plan.env.get("SANDBOX_ENV") or "").strip()
1072
+ if "MTRX_GEMINI_CUSTOM_HEADERS_B64=" not in sandbox_env:
1073
+ raise ValueError("Gemini Matrx route is missing sandbox bootstrap for GEMINI_CLI_CUSTOM_HEADERS")
1074
+ if "MTRX_CODE_ASSIST_ENDPOINT_B64=" not in sandbox_env:
1075
+ raise ValueError("Gemini Matrx route is missing sandbox bootstrap for CODE_ASSIST_ENDPOINT")
1076
+
976
1077
 
977
1078
  def _validate_codex_launch_plan(plan: LaunchPlan, state: dict) -> None:
978
1079
  if plan.route != "matrx":
@@ -144,7 +144,7 @@ def _build_parser() -> argparse.ArgumentParser:
144
144
  cursor.add_argument(
145
145
  "--launch",
146
146
  action="store_true",
147
- help="Launch Cursor with proxy env (required for traffic to flow)",
147
+ help="Launch Cursor after applying the current Matrx settings",
148
148
  )
149
149
 
150
150
  return parser
@@ -932,6 +932,7 @@ def _cmd_launch(tool: str, route: str | None, remainder: list[str]) -> int:
932
932
 
933
933
  def _cmd_cursor(args) -> int:
934
934
  from matrx.cli.cursor_hooks import install_mtrx_hooks, is_mtrx_hooks_installed
935
+ from matrx.cli.cursor_service import is_proxy_running
935
936
  from matrx.cli.cursor_launcher import find_cursor_executable
936
937
 
937
938
  route = args.route
@@ -942,10 +943,13 @@ def _cmd_cursor(args) -> int:
942
943
  hooks_installed = is_mtrx_hooks_installed()
943
944
  base_url = ensure_v1_url(state.get("auth", {}).get("matrx", {}).get("base_url"))
944
945
  prev_path = config_dir() / "cursor-previous-settings.json"
946
+ legacy_proxy_prev_path = config_dir() / "cursor-proxy-previous-settings.json"
945
947
  configured = prev_path.exists()
948
+ legacy_proxy_active = is_proxy_running() or legacy_proxy_prev_path.exists()
946
949
  print("MTRX Cursor integration:")
947
950
  print(f" mode: {'Base URL override (all models)' if configured else 'not configured'}")
948
951
  print(f" hooks: {'active (sessionEnd, stop → telemetry)' if hooks_installed else 'not installed'}")
952
+ print(f" legacy MITM proxy: {'active' if legacy_proxy_active else 'not active'}")
949
953
  if configured:
950
954
  print(f" matrx: {base_url}")
951
955
  return 0
@@ -1001,6 +1005,11 @@ def _cmd_cursor(args) -> int:
1001
1005
  "Use `mtrx use cursor direct` to opt out.",
1002
1006
  )
1003
1007
 
1008
+ # Ensure legacy MITM routing is fully torn down before enabling the
1009
+ # current Cursor base-URL override flow. Leaving both active causes
1010
+ # Cursor traffic to keep flowing through the old telemetry proxy.
1011
+ _restore_cursor_if_needed()
1012
+
1004
1013
  # Configure Cursor's Override Base URL — sends chat to MTRX (any model: Claude, GPT-5, Gemini, etc.)
1005
1014
  prev_path = config_dir() / "cursor-previous-settings.json"
1006
1015
  previous = configure_cursor_for_proxy(matrx_proxy_url, mx_key)