mtrx-cli 0.1.24 → 0.1.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,23 +13,84 @@ Refs: cursor-tap (https://github.com/burpheart/cursor-tap), everestmz/cursor-rpc
13
13
 
14
14
  from __future__ import annotations
15
15
 
16
+ import asyncio
16
17
  import json
17
18
  import logging
18
19
  import re
19
20
  from typing import Any
20
21
 
22
+ import httpx
23
+
21
24
  logger = logging.getLogger(__name__)
22
25
 
23
- # Cursor AI RPC paths (Connect protocol). RunSSE = main chat, StreamCpp = code completion.
26
+ # Cursor AI RPC paths (Connect protocol).
27
+ # Cursor uses aiserver.v1.AiServerService for all AI endpoints.
24
28
  _AI_PATH_PATTERNS = (
29
+ r"AiServerService",
30
+ r"AiService",
31
+ r"ChatService",
32
+ r"StreamUnifiedChat",
33
+ r"StreamDiff",
34
+ r"GetCompletion",
25
35
  r"RunSSE",
26
36
  r"StreamCpp",
27
37
  r"BidiAppend",
28
38
  r"AgentService",
39
+ r"/v1/messages", # Anthropic Messages API
40
+ r"/v1/chat/completions", # OpenAI Chat Completions
41
+ )
42
+ _REROUTABLE_AI_PATH_PATTERNS = (
43
+ r"AiService/.+(Stream|Run|Chat|Completion)",
44
+ r"StreamUnifiedChat",
45
+ r"RunSSE",
46
+ r"AgentService",
47
+ r"ChatService",
48
+ r"CppService/.+(Complete|Stream)",
49
+ r"/v1/messages",
50
+ r"/v1/chat/completions",
51
+ )
52
+ _AI_SERVICE_CANDIDATE_PATTERNS = (
53
+ r"AiServerService",
29
54
  r"AiService",
55
+ r"AgentService",
56
+ r"ChatService",
57
+ r"CppService",
58
+ r"composer",
30
59
  )
31
60
 
32
61
 
62
+ def is_ai_candidate_request(method: str, path: str, headers: dict[str, str] | None = None) -> bool:
63
+ """Return True when the request looks like Cursor/model traffic worth inspecting."""
64
+ if method.upper() != "POST" or not path:
65
+ return False
66
+ if is_ai_path(path):
67
+ return True
68
+ content_type = ((headers or {}).get("content-type") or "").lower()
69
+ return any(re.search(p, path, re.IGNORECASE) for p in _AI_SERVICE_CANDIDATE_PATTERNS) and (
70
+ "connect+proto" in content_type
71
+ or "application/proto" in content_type
72
+ or "grpc" in content_type
73
+ or "application/json" in content_type
74
+ )
75
+
76
+
77
+ def is_reroutable_ai_path(path: str) -> bool:
78
+ """Return True if the path is a supported AI endpoint for active reroute."""
79
+ if not path:
80
+ return False
81
+ return any(re.search(p, path, re.IGNORECASE) for p in _REROUTABLE_AI_PATH_PATTERNS)
82
+
83
+
84
+ def classify_ai_request(method: str, path: str, headers: dict[str, str] | None = None) -> dict[str, bool]:
85
+ """Classify Cursor requests for logging and reroute decisions."""
86
+ candidate = is_ai_candidate_request(method, path, headers)
87
+ reroutable = candidate and is_reroutable_ai_path(path)
88
+ return {
89
+ "candidate": candidate,
90
+ "reroutable": reroutable,
91
+ }
92
+
93
+
33
94
  def is_ai_path(path: str) -> bool:
34
95
  """Return True if this path is an AI/LLM endpoint we should reroute to MTRX."""
35
96
  if not path:
@@ -37,6 +98,122 @@ def is_ai_path(path: str) -> bool:
37
98
  return any(re.search(p, path, re.IGNORECASE) for p in _AI_PATH_PATTERNS)
38
99
 
39
100
 
101
+ def _detect_provider_from_model(model: str) -> str:
102
+ normalized = (model or "").strip().lower()
103
+ if normalized.startswith("claude-"):
104
+ return "anthropic"
105
+ if normalized.startswith("gemini"):
106
+ return "google"
107
+ return "openai"
108
+
109
+
110
+ def _extract_request_prompt(*, req_proto: Any, extracted: dict[str, Any]) -> str:
111
+ parts: list[str] = []
112
+
113
+ summary = (extracted.get("conversation_summary") or "").strip()
114
+ if summary:
115
+ parts.append(summary)
116
+
117
+ debug_info = (getattr(getattr(req_proto, "cmd_k_debug_info", None), "debug_info", "") or "").strip()
118
+ if debug_info:
119
+ parts.append(debug_info)
120
+
121
+ rules = [
122
+ (getattr(rule, "rule_definition", "") or "").strip()
123
+ for rule in list(getattr(req_proto, "rules", []) or [])[:3]
124
+ if (getattr(rule, "rule_definition", "") or "").strip()
125
+ ]
126
+ if rules:
127
+ parts.append("Project rules:\n" + "\n".join(f"- {rule}" for rule in rules))
128
+
129
+ doc_ids = [
130
+ value.strip()
131
+ for value in list(getattr(getattr(req_proto, "legacy_context", None), "documentation_identifiers", []) or [])[:5]
132
+ if isinstance(value, str) and value.strip()
133
+ ]
134
+ if doc_ids:
135
+ parts.append("Relevant docs:\n" + "\n".join(f"- {value}" for value in doc_ids))
136
+
137
+ files = [
138
+ entry.get("path", "").strip()
139
+ for entry in extracted.get("files", [])[:8]
140
+ if entry.get("path")
141
+ ]
142
+ if files:
143
+ parts.append("Active files:\n" + "\n".join(f"- {path}" for path in files))
144
+
145
+ edits = [
146
+ entry.get("path", "").strip()
147
+ for entry in extracted.get("edits", [])[:5]
148
+ if entry.get("path")
149
+ ]
150
+ if edits:
151
+ parts.append("Recent edits:\n" + "\n".join(f"- {path}" for path in edits))
152
+
153
+ return "\n\n".join(part for part in parts if part).strip()
154
+
155
+
156
+ def _build_matrx_upstream_request(
157
+ *,
158
+ req_proto: Any,
159
+ extracted: dict[str, Any],
160
+ ) -> tuple[str, dict[str, str], dict[str, Any]] | None:
161
+ model = (extracted.get("model") or "").strip()
162
+ prompt = _extract_request_prompt(req_proto=req_proto, extracted=extracted)
163
+ if not model or not prompt:
164
+ return None
165
+
166
+ provider = _detect_provider_from_model(model)
167
+ if provider == "anthropic":
168
+ return (
169
+ "/v1/messages",
170
+ {"x-api-key": ""},
171
+ {
172
+ "model": model,
173
+ "max_tokens": 2048,
174
+ "messages": [{"role": "user", "content": prompt}],
175
+ },
176
+ )
177
+
178
+ return (
179
+ "/v1/chat/completions",
180
+ {"authorization": ""},
181
+ {
182
+ "model": model,
183
+ "messages": [{"role": "user", "content": prompt}],
184
+ "temperature": 0.2,
185
+ },
186
+ )
187
+
188
+
189
+ def _build_cursor_response_bytes(*, text: str, usage: dict[str, Any] | None = None) -> bytes | None:
190
+ try:
191
+ from matrx.cli.cursor_connect import build_connect_frame
192
+ from matrx.cli.cursor_proto import _PROTOS_AVAILABLE, server_chat_pb2 # type: ignore[import]
193
+ except Exception:
194
+ return None
195
+
196
+ if not _PROTOS_AVAILABLE:
197
+ return None
198
+
199
+ frames: list[bytes] = []
200
+ text = (text or "").strip()
201
+ if text:
202
+ content_resp = server_chat_pb2.StreamUnifiedChatWithToolsResponse()
203
+ content_resp.content.text = text
204
+ frames.append(build_connect_frame(0x00, content_resp.SerializeToString()))
205
+
206
+ if usage:
207
+ usage_resp = server_chat_pb2.StreamUnifiedChatWithToolsResponse()
208
+ usage_resp.usage.input_tokens = int(usage.get("input_tokens", 0) or 0)
209
+ usage_resp.usage.output_tokens = int(usage.get("output_tokens", 0) or 0)
210
+ if usage_resp.usage.input_tokens or usage_resp.usage.output_tokens:
211
+ frames.append(build_connect_frame(0x00, usage_resp.SerializeToString()))
212
+
213
+ frames.append(build_connect_frame(0x02, b"{}"))
214
+ return b"".join(frames)
215
+
216
+
40
217
  def _cursor_model_to_openai(cursor_model: str) -> str:
41
218
  """Map Cursor model names to OpenAI-style names MTRX expects."""
42
219
  # Cursor uses names like "claude-sonnet-4" or "gpt-4o" - usually compatible
@@ -46,6 +223,72 @@ def _cursor_model_to_openai(cursor_model: str) -> str:
46
223
  return cursor_model
47
224
 
48
225
 
226
+ def _build_search_query(extracted: dict[str, Any]) -> str:
227
+ files = extracted.get("files", [])
228
+ query_parts = [f.get("path", "").strip() for f in files[:3] if f.get("path")]
229
+ if query_parts:
230
+ return " ".join(query_parts)
231
+ summary = extracted.get("conversation_summary") or ""
232
+ if isinstance(summary, str):
233
+ return summary.strip()
234
+ if isinstance(summary, dict):
235
+ return str(summary.get("summary") or "").strip()
236
+ return str(summary).strip()
237
+
238
+
239
+ def _prepend_context_items(context_items: Any, injected_items: list[Any]) -> None:
240
+ for item in reversed(injected_items):
241
+ try:
242
+ context_items.insert(0, item)
243
+ except Exception:
244
+ context_items.append(item)
245
+
246
+
247
+ def _inject_memory_context_items(
248
+ *,
249
+ req_proto: Any,
250
+ memory_results: list[dict[str, Any]],
251
+ server_chat_pb2: Any,
252
+ existing_files: list[dict[str, Any]] | None = None,
253
+ limit: int = 5,
254
+ ) -> int:
255
+ existing_contents = {
256
+ (entry.get("content") or "").strip() for entry in (existing_files or []) if entry.get("content")
257
+ }
258
+ injected_contents: set[str] = set()
259
+ injected_items: list[Any] = []
260
+
261
+ for mem in memory_results:
262
+ if len(injected_items) >= limit:
263
+ break
264
+ content = (mem.get("content") or "").strip()
265
+ if not content or content in existing_contents or content in injected_contents:
266
+ continue
267
+
268
+ cached_item = server_chat_pb2.PotentiallyCachedContextItem()
269
+ ctx_item = server_chat_pb2.ContextItem()
270
+ file_chunk = server_chat_pb2.FileChunk()
271
+ file_chunk.file_path = f"[matrx:{(mem.get('id') or '')[:8]}]"
272
+ file_chunk.content = content[:4096]
273
+ if hasattr(getattr(ctx_item, "file_chunk", None), "CopyFrom"):
274
+ ctx_item.file_chunk.CopyFrom(file_chunk)
275
+ else:
276
+ ctx_item.file_chunk = file_chunk
277
+ if hasattr(getattr(cached_item, "context_item", None), "CopyFrom"):
278
+ cached_item.context_item.CopyFrom(ctx_item)
279
+ else:
280
+ cached_item.context_item = ctx_item
281
+
282
+ injected_items.append(cached_item)
283
+ injected_contents.add(content)
284
+
285
+ if not injected_items:
286
+ return 0
287
+
288
+ _prepend_context_items(req_proto.context_items, injected_items)
289
+ return len(injected_items)
290
+
291
+
49
292
  async def try_reroute_to_matrx(
50
293
  *,
51
294
  path: str,
@@ -65,12 +308,251 @@ async def try_reroute_to_matrx(
65
308
  (success, response_headers, response_body, is_streaming) if handled,
66
309
  None to fall back to normal forward.
67
310
  """
68
- if method != "POST" or not is_ai_path(path):
311
+ classification = classify_ai_request(method, path, req_headers)
312
+ if not classification["candidate"]:
69
313
  return None
314
+ if not classification["reroutable"]:
315
+ logger.info("cursor_reroute: candidate AI path not yet reroutable: %s", path)
316
+ return None
317
+
318
+ try:
319
+ from matrx.cli.cursor_connect import is_connect_proto_request, parse_connect_frame
320
+ from matrx.cli.cursor_extraction import (
321
+ _PROTOS_AVAILABLE,
322
+ extract_from_anthropic_sse_response,
323
+ extract_from_openai_sse_response,
324
+ extract_from_request,
325
+ parse_request_proto,
326
+ ship_ai_telemetry,
327
+ )
328
+ except Exception:
329
+ return None
330
+
331
+ if not _PROTOS_AVAILABLE or not is_connect_proto_request(req_headers):
332
+ logger.info("cursor_reroute: reroutable path lacks compiled proto support: %s", path)
333
+ return None
334
+
335
+ import gzip as _gzip
336
+
337
+ body = req_body
338
+ ce = req_headers.get("content-encoding", "").lower()
339
+ if ce == "gzip" or (len(body) >= 2 and body[:2] == b"\x1f\x8b"):
340
+ try:
341
+ body = _gzip.decompress(body)
342
+ except Exception:
343
+ return None
344
+
345
+ try:
346
+ _, proto_bytes = parse_connect_frame(body)
347
+ except ValueError:
348
+ proto_bytes = body
349
+
350
+ req_proto = parse_request_proto(proto_bytes)
351
+ if req_proto is None:
352
+ logger.info("cursor_reroute: parse_request_proto failed for %s", path)
353
+ return None
354
+
355
+ extracted = extract_from_request(req_proto)
356
+ extracted["session_id"] = extracted.get("session_id") or session_id or ""
357
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
358
+
359
+ upstream_request = _build_matrx_upstream_request(req_proto=req_proto, extracted=extracted)
360
+ if upstream_request is None:
361
+ logger.info("cursor_reroute: insufficient prompt/model data for %s", path)
362
+ return None
363
+
364
+ upstream_path, auth_headers, payload = upstream_request
365
+ headers = {
366
+ "X-Matrx-Key": matrx_key,
367
+ "Content-Type": "application/json",
368
+ }
369
+ if "x-api-key" in auth_headers:
370
+ headers["x-api-key"] = matrx_key
371
+ if "authorization" in auth_headers:
372
+ headers["Authorization"] = f"Bearer {matrx_key}"
373
+
374
+ url = f"{matrx_base_url.rstrip('/')}{upstream_path}"
375
+ try:
376
+ async with httpx.AsyncClient(timeout=httpx.Timeout(timeout=90.0, connect=5.0)) as client:
377
+ resp = await client.post(url, json={**payload, "stream": True}, headers=headers)
378
+ except Exception:
379
+ logger.warning("cursor_reroute: upstream request failed for %s", path, exc_info=True)
380
+ return None
381
+
382
+ if resp.status_code >= 400:
383
+ logger.info(
384
+ "cursor_reroute: upstream returned %s for %s; forwarding unchanged",
385
+ resp.status_code,
386
+ path,
387
+ )
388
+ return None
389
+
390
+ provider = _detect_provider_from_model(str(payload.get("model", "")))
391
+ if provider == "anthropic":
392
+ frame_data = extract_from_anthropic_sse_response(resp.content)
393
+ else:
394
+ frame_data = extract_from_openai_sse_response(resp.content)
395
+
396
+ text = frame_data.get("text", "")
397
+ usage = frame_data.get("usage")
398
+ response_body = _build_cursor_response_bytes(text=text, usage=usage)
399
+ if response_body is None:
400
+ return None
401
+
402
+ response_telemetry = {
403
+ "session_id": extracted.get("session_id") or session_id or "",
404
+ "conversation_id": extracted.get("conversation_id") or "",
405
+ "model": extracted.get("model") or "",
406
+ "files": extracted.get("files", []),
407
+ "edits": extracted.get("edits", []),
408
+ "response_text": text,
409
+ "tool_calls": [],
410
+ "usage": usage,
411
+ }
412
+ asyncio.create_task(ship_ai_telemetry(response_telemetry, matrx_base_url, matrx_key))
413
+
414
+ return (
415
+ True,
416
+ {
417
+ "content-type": req_headers.get("content-type", "application/connect+proto"),
418
+ "connect-protocol-version": "1",
419
+ },
420
+ response_body,
421
+ True,
422
+ )
423
+
424
+
425
+ # ---------------------------------------------------------------------------
426
+ # Context injection
427
+ # ---------------------------------------------------------------------------
428
+
70
429
 
71
- # TODO: Full protobuf parsing. Cursor uses Connect/gRPC with binary frames.
72
- # For now we don't have the proto conversion - fall back to forward.
73
- # When implemented: parse req_body, extract messages+model, call MTRX,
74
- # convert response back to Cursor's gRPC format.
75
- logger.debug("cursor_reroute: path=%s would reroute (protobuf conversion not yet implemented)", path)
76
- return None
430
+ async def _query_memory(
431
+ *,
432
+ query: str,
433
+ matrx_base_url: str,
434
+ matrx_key: str,
435
+ limit: int = 5,
436
+ ) -> list[dict]:
437
+ """Query MTRX memory search API. Returns list of memory entry dicts."""
438
+ try:
439
+ async with httpx.AsyncClient(timeout=0.1) as client: # 100 ms budget
440
+ resp = await client.get(
441
+ f"{matrx_base_url.rstrip('/')}/v1/memory/search",
442
+ params={"q": query, "limit": limit},
443
+ headers={"X-Matrx-Key": matrx_key},
444
+ )
445
+ if resp.status_code == 200:
446
+ return resp.json().get("entries", [])
447
+ except Exception:
448
+ logger.debug("cursor_reroute: memory query failed", exc_info=True)
449
+ return []
450
+
451
+
452
+ async def try_inject_context(
453
+ *,
454
+ req_body: bytes,
455
+ req_headers: dict[str, str],
456
+ matrx_base_url: str,
457
+ matrx_key: str,
458
+ session_id: str,
459
+ ) -> bytes | None:
460
+ """Parse the Connect+proto request, extract data, inject MTRX memory context items.
461
+
462
+ Returns modified request bytes with injected context items prepended, or
463
+ ``None`` to signal that the original request should be forwarded unchanged.
464
+ Wraps everything in try/except — never raises.
465
+ """
466
+ try:
467
+ from matrx.cli.cursor_connect import (
468
+ build_connect_frame,
469
+ is_connect_proto_request,
470
+ parse_connect_frame,
471
+ )
472
+ from matrx.cli.cursor_extraction import (
473
+ _PROTOS_AVAILABLE,
474
+ _raw_extract_request,
475
+ extract_from_json_request,
476
+ extract_from_request,
477
+ parse_request_proto,
478
+ ship_ai_telemetry,
479
+ )
480
+
481
+ # JSON API path (Anthropic Messages API, OpenAI Chat Completions)
482
+ content_type = req_headers.get("content-type", "").lower()
483
+ if "application/json" in content_type:
484
+ extracted = extract_from_json_request(req_body)
485
+ extracted["session_id"] = extracted.get("session_id") or session_id
486
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
487
+ return None
488
+
489
+ if not is_connect_proto_request(req_headers):
490
+ return None
491
+
492
+ # Decompress body if gzip-encoded
493
+ import gzip as _gzip
494
+ body = req_body
495
+ ce = req_headers.get("content-encoding", "").lower()
496
+ if ce == "gzip" or (len(body) >= 2 and body[:2] == b"\x1f\x8b"):
497
+ try:
498
+ body = _gzip.decompress(body)
499
+ except Exception:
500
+ return None
501
+
502
+ # Parse Connect envelope → raw proto bytes.
503
+ # Fall back to treating the body as raw protobuf if Connect framing is absent
504
+ # (some Cursor versions send raw proto without the 5-byte envelope).
505
+ try:
506
+ flags, proto_bytes = parse_connect_frame(body)
507
+ except ValueError:
508
+ flags, proto_bytes = 0, body
509
+
510
+ # Deserialize proto (compiled path); fall back to raw wire parsing
511
+ req_proto = parse_request_proto(proto_bytes)
512
+ if req_proto is not None:
513
+ extracted = extract_from_request(req_proto)
514
+ else:
515
+ extracted = _raw_extract_request(proto_bytes)
516
+
517
+ # Extract structured data and ship telemetry fire-and-forget
518
+ extracted["session_id"] = extracted.get("session_id") or session_id
519
+ asyncio.create_task(ship_ai_telemetry(extracted, matrx_base_url, matrx_key))
520
+
521
+ # Cannot inject without compiled protos or a successfully parsed proto
522
+ if not _PROTOS_AVAILABLE or req_proto is None:
523
+ return None
524
+
525
+ # Build search query from the open file paths
526
+ search_query = _build_search_query(extracted)
527
+ if not search_query:
528
+ return None
529
+
530
+ # Query MTRX memory (100 ms budget — never blocks the forward path)
531
+ memory_results = await _query_memory(
532
+ query=search_query,
533
+ matrx_base_url=matrx_base_url,
534
+ matrx_key=matrx_key,
535
+ limit=5,
536
+ )
537
+ if not memory_results:
538
+ return None
539
+
540
+ # Inject memory results as PotentiallyCachedContextItem entries
541
+ from matrx.cli.cursor_proto import server_chat_pb2 # type: ignore[import]
542
+
543
+ inserted = _inject_memory_context_items(
544
+ req_proto=req_proto,
545
+ memory_results=memory_results,
546
+ server_chat_pb2=server_chat_pb2,
547
+ existing_files=extracted.get("files", []),
548
+ )
549
+ if inserted == 0:
550
+ return None
551
+
552
+ # Serialize modified proto and re-wrap in Connect frame
553
+ new_proto_bytes = req_proto.SerializeToString()
554
+ return build_connect_frame(flags, new_proto_bytes)
555
+
556
+ except Exception:
557
+ logger.warning("cursor_reroute: try_inject_context failed", exc_info=True)
558
+ return None
@@ -663,6 +663,8 @@ def _build_gemini_env(
663
663
  ctx_params: list[str] = []
664
664
  if project_id:
665
665
  ctx_params.append(f"mtrx_project={project_id}")
666
+ if group_id:
667
+ ctx_params.append(f"mtrx_group={group_id}")
666
668
  if session_id:
667
669
  ctx_params.append(f"mtrx_session={session_id}")
668
670
  if runtime_agent_id:
@@ -673,6 +675,8 @@ def _build_gemini_env(
673
675
  ctx_params.append(f"mtrx_branch={git_branch}")
674
676
  if git_commit:
675
677
  ctx_params.append(f"mtrx_commit={git_commit}")
678
+ if git_repo_url:
679
+ ctx_params.append(f"mtrx_repo_url={git_repo_url}")
676
680
 
677
681
  query_suffix = f"?{'&'.join(ctx_params)}" if ctx_params else ""
678
682
  env_snap = _capture_env_snapshot()