mtrx-cli 0.1.25 → 0.1.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,20 +17,80 @@ import asyncio
17
17
  import json
18
18
  import logging
19
19
  import re
20
- from typing import Any
20
+ from typing import Any, AsyncGenerator
21
21
 
22
22
  import httpx
23
23
 
24
24
  logger = logging.getLogger(__name__)
25
25
 
26
- # Cursor AI RPC paths (Connect protocol). RunSSE = main chat, StreamCpp = code completion.
26
+ # Cursor AI RPC paths (Connect protocol).
27
+ # Cursor uses aiserver.v1.AiServerService for all AI endpoints.
27
28
  _AI_PATH_PATTERNS = (
29
+ r"AiServerService",
30
+ # r"AiService" intentionally omitted — too broad, matches non-inference endpoints like
31
+ # KnowledgeBaseList, UpdateVscodeProfile, GetDefaultModel. Actual inference methods
32
+ # on AiService are all covered by their specific method-level patterns below.
33
+ r"ChatService",
34
+ r"StreamUnifiedChat",
35
+ r"StreamDiff",
36
+ r"GetCompletion",
28
37
  r"RunSSE",
29
38
  r"StreamCpp",
30
39
  r"BidiAppend",
31
40
  r"AgentService",
32
- r"AiService",
41
+ r"/v1/messages", # Anthropic Messages API
42
+ r"/v1/chat/completions", # OpenAI Chat Completions
33
43
  )
44
+ _REROUTABLE_AI_PATH_PATTERNS = (
45
+ r"AiService/.+(Stream|Run|Chat|Completion)",
46
+ r"StreamUnifiedChat",
47
+ r"RunSSE",
48
+ r"AgentService",
49
+ r"ChatService",
50
+ r"CppService/.+(Complete|Stream)",
51
+ r"/v1/messages",
52
+ r"/v1/chat/completions",
53
+ )
54
+ _AI_SERVICE_CANDIDATE_PATTERNS = (
55
+ r"AiServerService",
56
+ # r"AiService" intentionally omitted — see note in _AI_PATH_PATTERNS above.
57
+ r"AgentService",
58
+ r"ChatService",
59
+ r"CppService",
60
+ r"composer",
61
+ )
62
+
63
+
64
+ def is_ai_candidate_request(method: str, path: str, headers: dict[str, str] | None = None) -> bool:
65
+ """Return True when the request looks like Cursor/model traffic worth inspecting."""
66
+ if method.upper() != "POST" or not path:
67
+ return False
68
+ if is_ai_path(path):
69
+ return True
70
+ content_type = ((headers or {}).get("content-type") or "").lower()
71
+ return any(re.search(p, path, re.IGNORECASE) for p in _AI_SERVICE_CANDIDATE_PATTERNS) and (
72
+ "connect+proto" in content_type
73
+ or "application/proto" in content_type
74
+ or "grpc" in content_type
75
+ or "application/json" in content_type
76
+ )
77
+
78
+
79
+ def is_reroutable_ai_path(path: str) -> bool:
80
+ """Return True if the path is a supported AI endpoint for active reroute."""
81
+ if not path:
82
+ return False
83
+ return any(re.search(p, path, re.IGNORECASE) for p in _REROUTABLE_AI_PATH_PATTERNS)
84
+
85
+
86
+ def classify_ai_request(method: str, path: str, headers: dict[str, str] | None = None) -> dict[str, bool]:
87
+ """Classify Cursor requests for logging and reroute decisions."""
88
+ candidate = is_ai_candidate_request(method, path, headers)
89
+ reroutable = candidate and is_reroutable_ai_path(path)
90
+ return {
91
+ "candidate": candidate,
92
+ "reroutable": reroutable,
93
+ }
34
94
 
35
95
 
36
96
  def is_ai_path(path: str) -> bool:
@@ -40,6 +100,122 @@ def is_ai_path(path: str) -> bool:
40
100
  return any(re.search(p, path, re.IGNORECASE) for p in _AI_PATH_PATTERNS)
41
101
 
42
102
 
103
+ def _detect_provider_from_model(model: str) -> str:
104
+ normalized = (model or "").strip().lower()
105
+ if normalized.startswith("claude-"):
106
+ return "anthropic"
107
+ if normalized.startswith("gemini"):
108
+ return "google"
109
+ return "openai"
110
+
111
+
112
+ def _extract_request_prompt(*, req_proto: Any, extracted: dict[str, Any]) -> str:
113
+ parts: list[str] = []
114
+
115
+ summary = (extracted.get("conversation_summary") or "").strip()
116
+ if summary:
117
+ parts.append(summary)
118
+
119
+ debug_info = (getattr(getattr(req_proto, "cmd_k_debug_info", None), "debug_info", "") or "").strip()
120
+ if debug_info:
121
+ parts.append(debug_info)
122
+
123
+ rules = [
124
+ (getattr(rule, "rule_definition", "") or "").strip()
125
+ for rule in list(getattr(req_proto, "rules", []) or [])[:3]
126
+ if (getattr(rule, "rule_definition", "") or "").strip()
127
+ ]
128
+ if rules:
129
+ parts.append("Project rules:\n" + "\n".join(f"- {rule}" for rule in rules))
130
+
131
+ doc_ids = [
132
+ value.strip()
133
+ for value in list(getattr(getattr(req_proto, "legacy_context", None), "documentation_identifiers", []) or [])[:5]
134
+ if isinstance(value, str) and value.strip()
135
+ ]
136
+ if doc_ids:
137
+ parts.append("Relevant docs:\n" + "\n".join(f"- {value}" for value in doc_ids))
138
+
139
+ files = [
140
+ entry.get("path", "").strip()
141
+ for entry in extracted.get("files", [])[:8]
142
+ if entry.get("path")
143
+ ]
144
+ if files:
145
+ parts.append("Active files:\n" + "\n".join(f"- {path}" for path in files))
146
+
147
+ edits = [
148
+ entry.get("path", "").strip()
149
+ for entry in extracted.get("edits", [])[:5]
150
+ if entry.get("path")
151
+ ]
152
+ if edits:
153
+ parts.append("Recent edits:\n" + "\n".join(f"- {path}" for path in edits))
154
+
155
+ return "\n\n".join(part for part in parts if part).strip()
156
+
157
+
158
+ def _build_matrx_upstream_request(
159
+ *,
160
+ req_proto: Any,
161
+ extracted: dict[str, Any],
162
+ ) -> tuple[str, dict[str, str], dict[str, Any]] | None:
163
+ model = (extracted.get("model") or "").strip()
164
+ prompt = _extract_request_prompt(req_proto=req_proto, extracted=extracted)
165
+ if not model or not prompt:
166
+ return None
167
+
168
+ provider = _detect_provider_from_model(model)
169
+ if provider == "anthropic":
170
+ return (
171
+ "/v1/messages",
172
+ {"x-api-key": ""},
173
+ {
174
+ "model": model,
175
+ "max_tokens": 2048,
176
+ "messages": [{"role": "user", "content": prompt}],
177
+ },
178
+ )
179
+
180
+ return (
181
+ "/v1/chat/completions",
182
+ {"authorization": ""},
183
+ {
184
+ "model": model,
185
+ "messages": [{"role": "user", "content": prompt}],
186
+ "temperature": 0.2,
187
+ },
188
+ )
189
+
190
+
191
+ def _build_cursor_response_bytes(*, text: str, usage: dict[str, Any] | None = None) -> bytes | None:
192
+ try:
193
+ from matrx.cli.cursor_connect import build_connect_frame
194
+ from matrx.cli.cursor_proto import _PROTOS_AVAILABLE, server_chat_pb2 # type: ignore[import]
195
+ except Exception:
196
+ return None
197
+
198
+ if not _PROTOS_AVAILABLE:
199
+ return None
200
+
201
+ frames: list[bytes] = []
202
+ text = (text or "").strip()
203
+ if text:
204
+ content_resp = server_chat_pb2.StreamUnifiedChatWithToolsResponse()
205
+ content_resp.content.text = text
206
+ frames.append(build_connect_frame(0x00, content_resp.SerializeToString()))
207
+
208
+ if usage:
209
+ usage_resp = server_chat_pb2.StreamUnifiedChatWithToolsResponse()
210
+ usage_resp.usage.input_tokens = int(usage.get("input_tokens", 0) or 0)
211
+ usage_resp.usage.output_tokens = int(usage.get("output_tokens", 0) or 0)
212
+ if usage_resp.usage.input_tokens or usage_resp.usage.output_tokens:
213
+ frames.append(build_connect_frame(0x00, usage_resp.SerializeToString()))
214
+
215
+ frames.append(build_connect_frame(0x02, b"{}"))
216
+ return b"".join(frames)
217
+
218
+
43
219
  def _cursor_model_to_openai(cursor_model: str) -> str:
44
220
  """Map Cursor model names to OpenAI-style names MTRX expects."""
45
221
  # Cursor uses names like "claude-sonnet-4" or "gpt-4o" - usually compatible
@@ -54,7 +230,12 @@ def _build_search_query(extracted: dict[str, Any]) -> str:
54
230
  query_parts = [f.get("path", "").strip() for f in files[:3] if f.get("path")]
55
231
  if query_parts:
56
232
  return " ".join(query_parts)
57
- return (extracted.get("conversation_summary") or "").strip()
233
+ summary = extracted.get("conversation_summary") or ""
234
+ if isinstance(summary, str):
235
+ return summary.strip()
236
+ if isinstance(summary, dict):
237
+ return str(summary.get("summary") or "").strip()
238
+ return str(summary).strip()
58
239
 
59
240
 
60
241
  def _prepend_context_items(context_items: Any, injected_items: list[Any]) -> None:
@@ -110,6 +291,85 @@ def _inject_memory_context_items(
110
291
  return len(injected_items)
111
292
 
112
293
 
294
+ # Pre-built Connect end-of-stream frame: flags=0x02, payload=b"{}"
295
+ # Frame format: [flags:1][length:4 BE][payload] → \x02 \x00\x00\x00\x02 {}
296
+ _EOS_FRAME = b"\x02\x00\x00\x00\x02{}"
297
+
298
+
299
+ async def _stream_rerouted_frames(
300
+ url: str,
301
+ payload: dict[str, Any],
302
+ headers: dict[str, str],
303
+ provider: str,
304
+ ) -> AsyncGenerator[bytes, None]:
305
+ """POST to MTRX and yield Connect-framed protobuf text deltas as SSE events arrive.
306
+
307
+ Parses each ``data: `` line from the SSE stream, extracts the text delta
308
+ (Anthropic ``content_block_delta`` or OpenAI ``choices[].delta.content``),
309
+ wraps it in a Connect data frame (flags=0x00), and yields it immediately so
310
+ Cursor sees tokens arrive incrementally.
311
+
312
+ Always terminates with a Connect end-of-stream frame (flags=0x02). Any error
313
+ causes a silent fallback: the generator yields only the EOS frame so Cursor
314
+ sees an empty stream rather than a broken connection.
315
+ """
316
+ try:
317
+ from matrx.cli.cursor_connect import build_connect_frame
318
+ from matrx.cli.cursor_proto import _PROTOS_AVAILABLE, server_chat_pb2 # type: ignore[import]
319
+ except Exception:
320
+ yield _EOS_FRAME
321
+ return
322
+
323
+ if not _PROTOS_AVAILABLE:
324
+ yield _EOS_FRAME
325
+ return
326
+
327
+ try:
328
+ async with httpx.AsyncClient(
329
+ timeout=httpx.Timeout(timeout=90.0, connect=5.0)
330
+ ) as client:
331
+ async with client.stream("POST", url, json=payload, headers=headers) as resp:
332
+ if resp.status_code >= 400:
333
+ logger.info(
334
+ "cursor_reroute: stream upstream returned %s", resp.status_code
335
+ )
336
+ yield _EOS_FRAME
337
+ return
338
+ async for raw_line in resp.aiter_lines():
339
+ if not raw_line.startswith("data: "):
340
+ continue
341
+ data_str = raw_line[6:].strip()
342
+ if data_str == "[DONE]":
343
+ break
344
+ try:
345
+ chunk = json.loads(data_str)
346
+ except json.JSONDecodeError:
347
+ continue
348
+
349
+ text = ""
350
+ if provider == "anthropic":
351
+ # Mirrors extract_from_anthropic_sse_response inner loop
352
+ if chunk.get("type") == "content_block_delta":
353
+ delta = chunk.get("delta") or {}
354
+ if delta.get("type") == "text_delta":
355
+ text = delta.get("text") or ""
356
+ else:
357
+ # Mirrors extract_from_openai_sse_response inner loop
358
+ for choice in chunk.get("choices") or []:
359
+ delta = choice.get("delta") or {}
360
+ text += delta.get("content") or ""
361
+
362
+ if text:
363
+ resp_msg = server_chat_pb2.StreamUnifiedChatWithToolsResponse()
364
+ resp_msg.content.text = text
365
+ yield build_connect_frame(0x00, resp_msg.SerializeToString())
366
+
367
+ except Exception:
368
+ logger.warning("cursor_reroute: streaming reroute error", exc_info=True)
369
+
370
+ yield _EOS_FRAME
371
+
372
+
113
373
  async def try_reroute_to_matrx(
114
374
  *,
115
375
  path: str,
@@ -121,23 +381,94 @@ async def try_reroute_to_matrx(
121
381
  session_id: str | None = None,
122
382
  group_id: str | None = None,
123
383
  project_id: str | None = None,
124
- ) -> tuple[bool, dict[str, str], bytes | None, bool] | None:
384
+ ) -> tuple[bool, dict[str, str], AsyncGenerator[bytes, None] | bytes | None, bool] | None:
125
385
  """
126
386
  Attempt to reroute a Cursor AI request through MTRX.
127
387
 
128
388
  Returns:
129
- (success, response_headers, response_body, is_streaming) if handled,
389
+ (success, response_headers, response_body_or_generator, is_streaming) if handled,
130
390
  None to fall back to normal forward.
391
+ response_body_or_generator is an AsyncGenerator[bytes, None] of Connect frames;
392
+ the proxy must iterate it using chunked transfer encoding.
131
393
  """
132
- if method != "POST" or not is_ai_path(path):
394
+ classification = classify_ai_request(method, path, req_headers)
395
+ if not classification["candidate"]:
396
+ return None
397
+ if not classification["reroutable"]:
398
+ logger.info("cursor_reroute: candidate AI path not yet reroutable: %s", path)
399
+ return None
400
+
401
+ try:
402
+ from matrx.cli.cursor_connect import is_connect_proto_request, parse_connect_frame
403
+ from matrx.cli.cursor_extraction import (
404
+ _PROTOS_AVAILABLE,
405
+ extract_from_request,
406
+ parse_request_proto,
407
+ ship_ai_telemetry,
408
+ )
409
+ except Exception:
133
410
  return None
134
411
 
135
- # TODO: Full protobuf parsing. Cursor uses Connect/gRPC with binary frames.
136
- # For now we don't have the proto conversion - fall back to forward.
137
- # When implemented: parse req_body, extract messages+model, call MTRX,
138
- # convert response back to Cursor's gRPC format.
139
- logger.debug("cursor_reroute: path=%s would reroute (protobuf conversion not yet implemented)", path)
140
- return None
412
+ if not _PROTOS_AVAILABLE or not is_connect_proto_request(req_headers):
413
+ logger.info("cursor_reroute: reroutable path lacks compiled proto support: %s", path)
414
+ return None
415
+
416
+ import gzip as _gzip
417
+
418
+ body = req_body
419
+ ce = req_headers.get("content-encoding", "").lower()
420
+ if ce == "gzip" or (len(body) >= 2 and body[:2] == b"\x1f\x8b"):
421
+ try:
422
+ body = _gzip.decompress(body)
423
+ except Exception:
424
+ return None
425
+
426
+ try:
427
+ _, proto_bytes = parse_connect_frame(body)
428
+ except ValueError:
429
+ proto_bytes = body
430
+
431
+ req_proto = parse_request_proto(proto_bytes)
432
+ if req_proto is None:
433
+ logger.info("cursor_reroute: parse_request_proto failed for %s", path)
434
+ return None
435
+
436
+ extracted = extract_from_request(req_proto)
437
+ extracted["session_id"] = extracted.get("session_id") or session_id or ""
438
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
439
+
440
+ upstream_request = _build_matrx_upstream_request(req_proto=req_proto, extracted=extracted)
441
+ if upstream_request is None:
442
+ logger.info("cursor_reroute: insufficient prompt/model data for %s", path)
443
+ return None
444
+
445
+ upstream_path, auth_headers, payload = upstream_request
446
+ headers = {
447
+ "X-Matrx-Key": matrx_key,
448
+ "Content-Type": "application/json",
449
+ }
450
+ if "x-api-key" in auth_headers:
451
+ headers["x-api-key"] = matrx_key
452
+ if "authorization" in auth_headers:
453
+ headers["Authorization"] = f"Bearer {matrx_key}"
454
+
455
+ url = f"{matrx_base_url.rstrip('/')}{upstream_path}"
456
+ provider = _detect_provider_from_model(str(payload.get("model", "")))
457
+ gen = _stream_rerouted_frames(
458
+ url=url,
459
+ payload={**payload, "stream": True},
460
+ headers=headers,
461
+ provider=provider,
462
+ )
463
+ return (
464
+ True,
465
+ {
466
+ "content-type": req_headers.get("content-type", "application/connect+proto"),
467
+ "connect-protocol-version": "1",
468
+ },
469
+ gen,
470
+ True,
471
+ )
141
472
 
142
473
 
143
474
  # ---------------------------------------------------------------------------
@@ -189,22 +520,50 @@ async def try_inject_context(
189
520
  )
190
521
  from matrx.cli.cursor_extraction import (
191
522
  _PROTOS_AVAILABLE,
523
+ _raw_extract_request,
524
+ extract_from_json_request,
192
525
  extract_from_request,
193
526
  parse_request_proto,
194
527
  ship_ai_telemetry,
195
528
  )
196
529
 
530
+ # JSON API path (Anthropic Messages API, OpenAI Chat Completions)
531
+ content_type = req_headers.get("content-type", "").lower()
532
+ if "application/json" in content_type:
533
+ extracted = extract_from_json_request(req_body)
534
+ extracted["session_id"] = extracted.get("session_id") or session_id
535
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
536
+ return None
537
+
197
538
  if not is_connect_proto_request(req_headers):
198
539
  return None
199
540
 
200
- # Parse Connect envelope → raw proto bytes
201
- flags, proto_bytes = parse_connect_frame(req_body)
541
+ # Decompress body if gzip-encoded
542
+ import gzip as _gzip
543
+ body = req_body
544
+ ce = req_headers.get("content-encoding", "").lower()
545
+ if ce == "gzip" or (len(body) >= 2 and body[:2] == b"\x1f\x8b"):
546
+ try:
547
+ body = _gzip.decompress(body)
548
+ except Exception:
549
+ return None
550
+
551
+ # Parse Connect envelope → raw proto bytes.
552
+ # Fall back to treating the body as raw protobuf if Connect framing is absent
553
+ # (some Cursor versions send raw proto without the 5-byte envelope).
554
+ try:
555
+ flags, proto_bytes = parse_connect_frame(body)
556
+ except ValueError:
557
+ flags, proto_bytes = 0, body
202
558
 
203
- # Deserialize proto
559
+ # Deserialize proto (compiled path); fall back to raw wire parsing
204
560
  req_proto = parse_request_proto(proto_bytes)
561
+ if req_proto is not None:
562
+ extracted = extract_from_request(req_proto)
563
+ else:
564
+ extracted = _raw_extract_request(proto_bytes)
205
565
 
206
566
  # Extract structured data and ship telemetry fire-and-forget
207
- extracted = extract_from_request(req_proto)
208
567
  extracted["session_id"] = extracted.get("session_id") or session_id
209
568
  asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
210
569
 
@@ -582,16 +582,21 @@ def _build_codex_env(
582
582
  env_b64 = base64.b64encode(json.dumps(env_snap).encode()).decode() if env_snap else ""
583
583
  session_id = str(uuid.uuid4())
584
584
  group_id, project_id = _resolve_matrx_context_overrides(state, env)
585
+ codex_root = ensure_root_url(matrx.get("base_url"))
586
+ if not group_id:
587
+ group_id = _auto_resolve_default_group_id(codex_root, mx_key)
585
588
  runtime_agent_id = (
586
589
  (orchestration or {}).get("agent_id")
587
590
  or _runtime_agent_basename("codex")[0]
588
591
  )
592
+ workspace_fp = _compute_workspace_fingerprint(_workspace_cwd(env))
589
593
  header_parts = [
590
594
  f'"Authorization" = "Bearer {provider_bearer}"',
591
595
  f'"X-Matrx-Key" = "{mx_key}"',
592
596
  f'"X-Matrx-Agent-Id" = "{runtime_agent_id}"',
593
597
  '"X-Matrx-Provider" = "codex"',
594
598
  f'"X-Matrx-Session-Id" = "{session_id}"',
599
+ f'"X-Matrx-Workspace" = "{workspace_fp}"',
595
600
  ]
596
601
  if group_id:
597
602
  header_parts.append(f'"X-Matrx-Group" = "{group_id}"')
@@ -655,24 +660,32 @@ def _build_gemini_env(
655
660
  env.pop(key, None)
656
661
  env.pop("MTRX_KEY", None)
657
662
  group_id, project_id = _resolve_matrx_context_overrides(state, env)
663
+ if not group_id:
664
+ group_id = _auto_resolve_default_group_id(proxy_root, mx_key)
658
665
  session_id = str(uuid.uuid4())
659
666
  runtime_agent_id = (
660
667
  (orchestration or {}).get("agent_id")
661
668
  or _runtime_agent_basename("gemini")[0]
662
669
  )
670
+ workspace_fp = _compute_workspace_fingerprint(_workspace_cwd(env))
663
671
  ctx_params: list[str] = []
664
672
  if project_id:
665
673
  ctx_params.append(f"mtrx_project={project_id}")
674
+ if group_id:
675
+ ctx_params.append(f"mtrx_group={group_id}")
666
676
  if session_id:
667
677
  ctx_params.append(f"mtrx_session={session_id}")
668
678
  if runtime_agent_id:
669
679
  ctx_params.append(f"mtrx_agent={runtime_agent_id}")
680
+ ctx_params.append(f"mtrx_workspace={workspace_fp}")
670
681
  git_branch, git_commit = _capture_git_context(_workspace_cwd(env))
671
682
  git_repo_url = _capture_git_remote_url(_workspace_cwd(env))
672
683
  if git_branch:
673
684
  ctx_params.append(f"mtrx_branch={git_branch}")
674
685
  if git_commit:
675
686
  ctx_params.append(f"mtrx_commit={git_commit}")
687
+ if git_repo_url:
688
+ ctx_params.append(f"mtrx_repo_url={git_repo_url}")
676
689
 
677
690
  query_suffix = f"?{'&'.join(ctx_params)}" if ctx_params else ""
678
691
  env_snap = _capture_env_snapshot()
@@ -682,6 +695,7 @@ def _build_gemini_env(
682
695
  f"x-matrx-agent-id: {runtime_agent_id}",
683
696
  "x-matrx-provider: gemini_code",
684
697
  f"x-matrx-session-id: {session_id}",
698
+ f"x-matrx-workspace: {workspace_fp}",
685
699
  ]
686
700
  if group_id:
687
701
  custom_headers.append(f"x-matrx-group: {group_id}")
@@ -763,6 +777,35 @@ def _build_gemini_env(
763
777
  return env, "missing_auth"
764
778
 
765
779
 
780
+ def _compute_workspace_fingerprint(cwd: str) -> str:
781
+ return hashlib.sha256(cwd.encode("utf-8")).hexdigest()[:16]
782
+
783
+
784
+ def _auto_resolve_default_group_id(base_url: str, mx_key: str) -> str:
785
+ """Fetch the user's groups; return the sole/default group ID if unambiguous."""
786
+ if not base_url or not mx_key:
787
+ return ""
788
+ try:
789
+ with httpx.Client(timeout=5) as client:
790
+ resp = client.get(
791
+ f"{base_url.rstrip('/')}/v1/groups",
792
+ headers={"X-Matrx-Key": mx_key},
793
+ )
794
+ if resp.status_code != 200:
795
+ return ""
796
+ groups = resp.json().get("groups", [])
797
+ if not groups:
798
+ return ""
799
+ if len(groups) == 1:
800
+ return str(groups[0].get("id", ""))
801
+ for g in groups:
802
+ if g.get("is_default"):
803
+ return str(g.get("id", ""))
804
+ return ""
805
+ except (httpx.HTTPError, Exception):
806
+ return ""
807
+
808
+
766
809
  def _build_claude_env(
767
810
  state: dict,
768
811
  route: str,
@@ -789,12 +832,14 @@ def _build_claude_env(
789
832
  env["ANTHROPIC_BASE_URL"] = proxy_root
790
833
  env.pop("ANTHROPIC_API_KEY", None)
791
834
  group_id, project_id = _resolve_matrx_context_overrides(state, env)
835
+ if not group_id:
836
+ group_id = _auto_resolve_default_group_id(proxy_root, mx_key)
792
837
  session_id = str(uuid.uuid4())
793
838
  runtime_agent_id = (
794
839
  (orchestration or {}).get("agent_id")
795
840
  or _runtime_agent_basename("claude")[0]
796
841
  )
797
- # Evolutionary scaffolding: env snapshot for AI context injection
842
+ workspace_fp = _compute_workspace_fingerprint(_workspace_cwd(env))
798
843
  env_snap = _capture_env_snapshot()
799
844
  env_b64 = base64.b64encode(json.dumps(env_snap).encode()).decode() if env_snap else ""
800
845
  custom_headers = "\n".join(
@@ -805,6 +850,7 @@ def _build_claude_env(
805
850
  f"x-matrx-session-id: {session_id}",
806
851
  ]
807
852
  )
853
+ custom_headers += f"\nx-matrx-workspace: {workspace_fp}"
808
854
  if group_id:
809
855
  custom_headers += f"\nx-matrx-group: {group_id}"
810
856
  if project_id: