stravinsky 0.2.40__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. mcp_bridge/__init__.py +1 -1
  2. mcp_bridge/auth/token_refresh.py +130 -0
  3. mcp_bridge/cli/__init__.py +6 -0
  4. mcp_bridge/cli/install_hooks.py +1265 -0
  5. mcp_bridge/cli/session_report.py +585 -0
  6. mcp_bridge/hooks/HOOKS_SETTINGS.json +175 -0
  7. mcp_bridge/hooks/README.md +215 -0
  8. mcp_bridge/hooks/__init__.py +119 -43
  9. mcp_bridge/hooks/edit_recovery.py +42 -37
  10. mcp_bridge/hooks/git_noninteractive.py +89 -0
  11. mcp_bridge/hooks/keyword_detector.py +30 -0
  12. mcp_bridge/hooks/manager.py +50 -0
  13. mcp_bridge/hooks/notification_hook.py +103 -0
  14. mcp_bridge/hooks/parallel_enforcer.py +127 -0
  15. mcp_bridge/hooks/parallel_execution.py +111 -0
  16. mcp_bridge/hooks/pre_compact.py +123 -0
  17. mcp_bridge/hooks/preemptive_compaction.py +81 -7
  18. mcp_bridge/hooks/rules_injector.py +507 -0
  19. mcp_bridge/hooks/session_idle.py +116 -0
  20. mcp_bridge/hooks/session_notifier.py +125 -0
  21. mcp_bridge/{native_hooks → hooks}/stravinsky_mode.py +51 -16
  22. mcp_bridge/hooks/subagent_stop.py +98 -0
  23. mcp_bridge/hooks/task_validator.py +73 -0
  24. mcp_bridge/hooks/tmux_manager.py +141 -0
  25. mcp_bridge/hooks/todo_continuation.py +90 -0
  26. mcp_bridge/hooks/todo_delegation.py +88 -0
  27. mcp_bridge/hooks/tool_messaging.py +164 -0
  28. mcp_bridge/hooks/truncator.py +21 -17
  29. mcp_bridge/notifications.py +151 -0
  30. mcp_bridge/prompts/__init__.py +3 -1
  31. mcp_bridge/prompts/dewey.py +30 -20
  32. mcp_bridge/prompts/explore.py +46 -8
  33. mcp_bridge/prompts/multimodal.py +24 -3
  34. mcp_bridge/prompts/planner.py +222 -0
  35. mcp_bridge/prompts/stravinsky.py +107 -28
  36. mcp_bridge/server.py +170 -10
  37. mcp_bridge/server_tools.py +554 -32
  38. mcp_bridge/tools/agent_manager.py +316 -106
  39. mcp_bridge/tools/background_tasks.py +2 -1
  40. mcp_bridge/tools/code_search.py +97 -11
  41. mcp_bridge/tools/lsp/__init__.py +7 -0
  42. mcp_bridge/tools/lsp/manager.py +448 -0
  43. mcp_bridge/tools/lsp/tools.py +637 -150
  44. mcp_bridge/tools/model_invoke.py +270 -47
  45. mcp_bridge/tools/semantic_search.py +2492 -0
  46. mcp_bridge/tools/templates.py +32 -18
  47. stravinsky-0.3.4.dist-info/METADATA +420 -0
  48. stravinsky-0.3.4.dist-info/RECORD +79 -0
  49. stravinsky-0.3.4.dist-info/entry_points.txt +5 -0
  50. mcp_bridge/native_hooks/edit_recovery.py +0 -46
  51. mcp_bridge/native_hooks/truncator.py +0 -23
  52. stravinsky-0.2.40.dist-info/METADATA +0 -204
  53. stravinsky-0.2.40.dist-info/RECORD +0 -57
  54. stravinsky-0.2.40.dist-info/entry_points.txt +0 -3
  55. /mcp_bridge/{native_hooks → hooks}/context.py +0 -0
  56. {stravinsky-0.2.40.dist-info → stravinsky-0.3.4.dist-info}/WHEEL +0 -0
@@ -8,22 +8,86 @@ API requests to external model providers.
8
8
  import logging
9
9
  import os
10
10
  import time
11
+ import uuid
12
+ import base64
13
+ import json as json_module
11
14
 
12
15
  logger = logging.getLogger(__name__)
13
16
 
17
+
18
+ def _summarize_prompt(prompt: str, max_length: int = 120) -> str:
19
+ """
20
+ Generate a short summary of the prompt for logging.
21
+
22
+ Args:
23
+ prompt: The full prompt text
24
+ max_length: Maximum characters to include in summary
25
+
26
+ Returns:
27
+ Truncated prompt suitable for logging (single line, max_length chars)
28
+ """
29
+ if not prompt:
30
+ return "(empty prompt)"
31
+
32
+ # Normalize whitespace: collapse newlines and multiple spaces
33
+ clean = " ".join(prompt.split())
34
+
35
+ if len(clean) <= max_length:
36
+ return clean
37
+
38
+ return clean[:max_length] + "..."
39
+
40
+
41
+ # Cache for Codex instructions (fetched from GitHub)
42
+ _CODEX_INSTRUCTIONS_CACHE = {}
43
+ _CODEX_INSTRUCTIONS_RELEASE_TAG = "rust-v0.77.0" # Update as needed
44
+
45
+
46
+ async def _fetch_codex_instructions(model: str = "gpt-5.2-codex") -> str:
47
+ """
48
+ Fetch official Codex instructions from GitHub.
49
+ Caches results to avoid repeated fetches.
50
+ """
51
+ import httpx
52
+
53
+ if model in _CODEX_INSTRUCTIONS_CACHE:
54
+ return _CODEX_INSTRUCTIONS_CACHE[model]
55
+
56
+ # Map model to prompt file
57
+ prompt_file_map = {
58
+ "gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
59
+ "gpt-5.1-codex": "gpt_5_codex_prompt.md",
60
+ "gpt-5.1-codex-max": "gpt_5_codex_max_prompt.md",
61
+ }
62
+
63
+ prompt_file = prompt_file_map.get(model, "gpt-5.2-codex_prompt.md")
64
+ url = f"https://raw.githubusercontent.com/openai/codex/{_CODEX_INSTRUCTIONS_RELEASE_TAG}/codex-rs/core/{prompt_file}"
65
+
66
+ try:
67
+ async with httpx.AsyncClient() as client:
68
+ response = await client.get(url, timeout=30.0)
69
+ response.raise_for_status()
70
+ instructions = response.text
71
+ _CODEX_INSTRUCTIONS_CACHE[model] = instructions
72
+ return instructions
73
+ except Exception as e:
74
+ logger.error(f"Failed to fetch Codex instructions: {e}")
75
+ # Return basic fallback instructions
76
+ return "You are Codex, based on GPT-5. You are running as a coding agent."
77
+
78
+
14
79
  # Model name mapping: user-friendly names -> Antigravity API model IDs
15
80
  # Per API spec: https://github.com/NoeFabris/opencode-antigravity-auth/blob/main/docs/ANTIGRAVITY_API_SPEC.md
16
- # VERIFIED GEMINI MODELS (as of 2025-12):
17
- # - gemini-3-pro-high, gemini-3-pro-low
18
- # NOTE: There is NO gemini-3-flash in the API - all flash aliases map to gemini-3-pro-low
81
+ # VERIFIED GEMINI MODELS (as of 2026-01):
82
+ # - gemini-3-flash, gemini-3-pro-high, gemini-3-pro-low
19
83
  # NOTE: Claude models should use Anthropic API directly, NOT Antigravity
20
84
  GEMINI_MODEL_MAP = {
21
85
  # Antigravity verified Gemini models (pass-through)
22
86
  "gemini-3-pro-low": "gemini-3-pro-low",
23
87
  "gemini-3-pro-high": "gemini-3-pro-high",
24
- # Aliases for convenience (map to closest verified model)
25
- "gemini-flash": "gemini-3-pro-low",
26
- "gemini-3-flash": "gemini-3-pro-low", # NOT a real model - redirect to pro-low
88
+ "gemini-3-flash": "gemini-3-flash",
89
+ # Aliases for convenience
90
+ "gemini-flash": "gemini-3-flash",
27
91
  "gemini-pro": "gemini-3-pro-low",
28
92
  "gemini-3-pro": "gemini-3-pro-low",
29
93
  "gemini": "gemini-3-pro-low", # Default gemini alias
@@ -84,11 +148,11 @@ def _get_session_id(conversation_key: str | None = None) -> str:
84
148
  Returns:
85
149
  Stable session UUID for this conversation
86
150
  """
87
- import uuid
151
+ import uuid as uuid_module # Local import workaround
88
152
 
89
153
  key = conversation_key or "default"
90
154
  if key not in _SESSION_CACHE:
91
- _SESSION_CACHE[key] = str(uuid.uuid4())
155
+ _SESSION_CACHE[key] = str(uuid_module.uuid4())
92
156
  return _SESSION_CACHE[key]
93
157
 
94
158
 
@@ -241,11 +305,13 @@ async def invoke_gemini(
241
305
  temperature: float = 0.7,
242
306
  max_tokens: int = 4096,
243
307
  thinking_budget: int = 0,
308
+ image_path: str | None = None,
244
309
  ) -> str:
245
310
  """
246
311
  Invoke a Gemini model with the given prompt.
247
312
 
248
313
  Uses OAuth authentication with Antigravity credentials.
314
+ Supports vision API for image/PDF analysis when image_path is provided.
249
315
 
250
316
  Args:
251
317
  token_store: Token store for OAuth credentials
@@ -253,6 +319,8 @@ async def invoke_gemini(
253
319
  model: Gemini model to use
254
320
  temperature: Sampling temperature (0.0-2.0)
255
321
  max_tokens: Maximum tokens in response
322
+ thinking_budget: Tokens reserved for internal reasoning
323
+ image_path: Optional path to image/PDF for vision analysis (token optimization)
256
324
 
257
325
  Returns:
258
326
  The model's response text.
@@ -261,6 +329,7 @@ async def invoke_gemini(
261
329
  ValueError: If not authenticated with Gemini
262
330
  httpx.HTTPStatusError: If API request fails
263
331
  """
332
+ logger.info(f"[DEBUG] invoke_gemini called, uuid module check: {uuid}")
264
333
  # Execute pre-model invoke hooks
265
334
  params = {
266
335
  "prompt": prompt,
@@ -268,6 +337,8 @@ async def invoke_gemini(
268
337
  "temperature": temperature,
269
338
  "max_tokens": max_tokens,
270
339
  "thinking_budget": thinking_budget,
340
+ "token_store": token_store, # Pass for hooks that need model access
341
+ "provider": "gemini", # Identify which provider is being called
271
342
  }
272
343
  hook_manager = get_hook_manager()
273
344
  params = await hook_manager.execute_pre_model_invoke(params)
@@ -279,6 +350,22 @@ async def invoke_gemini(
279
350
  max_tokens = params["max_tokens"]
280
351
  thinking_budget = params["thinking_budget"]
281
352
 
353
+ # Extract agent context for logging (may be passed via params or original call)
354
+ agent_context = params.get("agent_context", {})
355
+ agent_type = agent_context.get("agent_type", "direct")
356
+ task_id = agent_context.get("task_id", "")
357
+ description = agent_context.get("description", "")
358
+ prompt_summary = _summarize_prompt(prompt)
359
+
360
+ # Log with agent context and prompt summary
361
+ logger.info(f"[{agent_type}] → {model}: {prompt_summary}")
362
+
363
+ # USER-VISIBLE NOTIFICATION (stderr) - Shows when Gemini is invoked
364
+ import sys
365
+ task_info = f" task={task_id}" if task_id else ""
366
+ desc_info = f" | {description}" if description else ""
367
+ print(f"🔮 GEMINI: {model} | agent={agent_type}{task_info}{desc_info}", file=sys.stderr)
368
+
282
369
  access_token = await _ensure_valid_token(token_store, "gemini")
283
370
 
284
371
  # Resolve user-friendly model name to actual API model ID
@@ -296,8 +383,43 @@ async def invoke_gemini(
296
383
 
297
384
  # Build inner request payload
298
385
  # Per API spec: contents must include role ("user" or "model")
386
+
387
+ # Build parts list - text prompt plus optional image
388
+ parts = [{"text": prompt}]
389
+
390
+ # Add image data for vision analysis (token optimization for multimodal)
391
+ if image_path:
392
+ import base64
393
+ from pathlib import Path
394
+
395
+ image_file = Path(image_path)
396
+ if image_file.exists():
397
+ # Determine MIME type
398
+ suffix = image_file.suffix.lower()
399
+ mime_types = {
400
+ ".png": "image/png",
401
+ ".jpg": "image/jpeg",
402
+ ".jpeg": "image/jpeg",
403
+ ".gif": "image/gif",
404
+ ".webp": "image/webp",
405
+ ".pdf": "application/pdf",
406
+ }
407
+ mime_type = mime_types.get(suffix, "image/png")
408
+
409
+ # Read and base64 encode
410
+ image_data = base64.b64encode(image_file.read_bytes()).decode("utf-8")
411
+
412
+ # Add inline image data for Gemini Vision API
413
+ parts.append({
414
+ "inlineData": {
415
+ "mimeType": mime_type,
416
+ "data": image_data,
417
+ }
418
+ })
419
+ logger.info(f"[multimodal] Added vision data: {image_path} ({mime_type})")
420
+
299
421
  inner_payload = {
300
- "contents": [{"role": "user", "parts": [{"text": prompt}]}],
422
+ "contents": [{"role": "user", "parts": parts}],
301
423
  "generationConfig": {
302
424
  "temperature": temperature,
303
425
  "maxOutputTokens": max_tokens,
@@ -315,11 +437,19 @@ async def invoke_gemini(
315
437
  }
316
438
 
317
439
  # Wrap request body per reference implementation
440
+ try:
441
+ import uuid as uuid_module # Local import workaround for MCP context issue
442
+
443
+ request_id = f"invoke-{uuid_module.uuid4()}"
444
+ except Exception as e:
445
+ logger.error(f"UUID IMPORT FAILED: {e}")
446
+ raise RuntimeError(f"CUSTOM ERROR: UUID import failed: {e}")
447
+
318
448
  wrapped_payload = {
319
449
  "project": project_id,
320
450
  "model": api_model,
321
451
  "userAgent": "antigravity",
322
- "requestId": f"invoke-{uuid.uuid4()}",
452
+ "requestId": request_id,
323
453
  "request": inner_payload,
324
454
  }
325
455
 
@@ -385,6 +515,26 @@ async def invoke_gemini(
385
515
  break
386
516
 
387
517
  if response is None:
518
+ # FALLBACK: Try Claude sonnet-4.5 for agents that support it
519
+ agent_context = params.get("agent_context", {})
520
+ agent_type = agent_context.get("agent_type", "unknown")
521
+
522
+ if agent_type in ("dewey", "explore", "document_writer", "multimodal"):
523
+ logger.warning(f"[{agent_type}] Gemini failed, falling back to Claude sonnet-4.5")
524
+ try:
525
+ import subprocess
526
+ fallback_result = subprocess.run(
527
+ ["claude", "-p", prompt, "--model", "sonnet", "--output-format", "text"],
528
+ capture_output=True,
529
+ text=True,
530
+ timeout=120,
531
+ cwd=os.getcwd(),
532
+ )
533
+ if fallback_result.returncode == 0 and fallback_result.stdout.strip():
534
+ return fallback_result.stdout.strip()
535
+ except Exception as fallback_error:
536
+ logger.error(f"Fallback to Claude also failed: {fallback_error}")
537
+
388
538
  raise ValueError(f"All Antigravity endpoints failed: {last_error}")
389
539
 
390
540
  response.raise_for_status()
@@ -537,8 +687,6 @@ async def invoke_gemini_agentic(
537
687
  Returns:
538
688
  Final text response from the model
539
689
  """
540
- import uuid
541
-
542
690
  access_token = await _ensure_valid_token(token_store, "gemini")
543
691
  api_model = resolve_gemini_model(model)
544
692
 
@@ -574,11 +722,13 @@ async def invoke_gemini_agentic(
574
722
 
575
723
  # Wrap request body per reference implementation
576
724
  # From request.ts wrapRequestBody()
725
+ import uuid as uuid_module # Local import workaround
726
+
577
727
  wrapped_payload = {
578
728
  "project": project_id,
579
729
  "model": api_model,
580
730
  "userAgent": "antigravity",
581
- "requestId": f"agent-{uuid.uuid4()}",
731
+ "requestId": f"agent-{uuid_module.uuid4()}",
582
732
  "request": inner_payload,
583
733
  }
584
734
 
@@ -688,7 +838,7 @@ async def invoke_gemini_agentic(
688
838
  async def invoke_openai(
689
839
  token_store: TokenStore,
690
840
  prompt: str,
691
- model: str = "gpt-5.2",
841
+ model: str = "gpt-5.2-codex",
692
842
  temperature: float = 0.7,
693
843
  max_tokens: int = 4096,
694
844
  thinking_budget: int = 0,
@@ -717,6 +867,8 @@ async def invoke_openai(
717
867
  "temperature": temperature,
718
868
  "max_tokens": max_tokens,
719
869
  "thinking_budget": thinking_budget,
870
+ "token_store": token_store, # Pass for hooks that need model access
871
+ "provider": "openai", # Identify which provider is being called
720
872
  }
721
873
  hook_manager = get_hook_manager()
722
874
  params = await hook_manager.execute_pre_model_invoke(params)
@@ -728,52 +880,123 @@ async def invoke_openai(
728
880
  max_tokens = params["max_tokens"]
729
881
  thinking_budget = params["thinking_budget"]
730
882
 
883
+ # Extract agent context for logging (may be passed via params or original call)
884
+ agent_context = params.get("agent_context", {})
885
+ agent_type = agent_context.get("agent_type", "direct")
886
+ task_id = agent_context.get("task_id", "")
887
+ description = agent_context.get("description", "")
888
+ prompt_summary = _summarize_prompt(prompt)
889
+
890
+ # Log with agent context and prompt summary
891
+ logger.info(f"[{agent_type}] → {model}: {prompt_summary}")
892
+
893
+ # USER-VISIBLE NOTIFICATION (stderr) - Shows when OpenAI is invoked
894
+ import sys
895
+ task_info = f" task={task_id}" if task_id else ""
896
+ desc_info = f" | {description}" if description else ""
897
+ print(f"🧠 OPENAI: {model} | agent={agent_type}{task_info}{desc_info}", file=sys.stderr)
898
+
731
899
  access_token = await _ensure_valid_token(token_store, "openai")
900
+ logger.info(f"[invoke_openai] Got access token")
901
+
902
+ # ChatGPT Backend API - Uses Codex Responses endpoint
903
+ # Replicates opencode-openai-codex-auth plugin behavior
904
+ api_url = "https://chatgpt.com/backend-api/codex/responses"
732
905
 
733
- # OpenAI Chat Completions API
734
- api_url = "https://api.openai.com/v1/chat/completions"
906
+ # Extract account ID from JWT token
907
+ logger.info(f"[invoke_openai] Extracting account ID from JWT")
908
+ try:
909
+ parts = access_token.split(".")
910
+ payload_b64 = parts[1]
911
+ padding = 4 - len(payload_b64) % 4
912
+ if padding != 4:
913
+ payload_b64 += "=" * padding
914
+ jwt_payload = json_module.loads(base64.urlsafe_b64decode(payload_b64))
915
+ account_id = jwt_payload.get("https://api.openai.com/auth", {}).get("chatgpt_account_id")
916
+ except Exception as e:
917
+ logger.error(f"Failed to extract account ID from JWT: {e}")
918
+ account_id = None
735
919
 
920
+ # Fetch official Codex instructions from GitHub
921
+ instructions = await _fetch_codex_instructions(model)
922
+
923
+ # Headers matching opencode-openai-codex-auth plugin
736
924
  headers = {
737
925
  "Authorization": f"Bearer {access_token}",
738
926
  "Content-Type": "application/json",
927
+ "Accept": "text/event-stream", # SSE stream
928
+ "openai-beta": "responses=experimental",
929
+ "openai-originator": "codex_cli_rs",
739
930
  }
740
931
 
932
+ if account_id:
933
+ headers["x-openai-account-id"] = account_id
934
+
935
+ # Request body matching opencode transformation
741
936
  payload = {
742
937
  "model": model,
743
- "messages": [{"role": "user", "content": prompt}],
744
- "temperature": temperature,
938
+ "store": False, # Required by ChatGPT backend
939
+ "stream": True, # Always stream (handler converts to non-stream if needed)
940
+ "instructions": instructions,
941
+ "input": [{"role": "user", "content": prompt}],
942
+ "reasoning": {"effort": "high" if thinking_budget > 0 else "medium", "summary": "auto"},
943
+ "text": {"verbosity": "medium"},
944
+ "include": ["reasoning.encrypted_content"],
745
945
  }
746
946
 
747
- # Handle thinking budget for O1/O3 style models (GPT-5.2)
748
- if thinking_budget > 0:
749
- payload["max_completion_tokens"] = max_tokens + thinking_budget
750
- # For O1, temperature must be 1.0 or omitted usually, but we'll try to pass it
751
- else:
752
- payload["max_tokens"] = max_tokens
753
-
754
- async with httpx.AsyncClient() as client:
755
- response = await client.post(
756
- api_url,
757
- headers=headers,
758
- json=payload,
759
- timeout=120.0,
760
- )
947
+ # Stream the response and collect text
948
+ text_chunks = []
761
949
 
762
- if response.status_code == 401:
763
- raise ValueError(
764
- "OpenAI authentication failed. Run: python -m mcp_bridge.auth.cli login openai"
765
- )
950
+ logger.info(f"[invoke_openai] Calling {api_url} with model {model}")
951
+ logger.info(f"[invoke_openai] Payload keys: {list(payload.keys())}")
952
+ logger.info(f"[invoke_openai] Instructions length: {len(instructions)}")
766
953
 
767
- response.raise_for_status()
768
-
769
- data = response.json()
954
+ try:
955
+ async with httpx.AsyncClient() as client:
956
+ async with client.stream(
957
+ "POST", api_url, headers=headers, json=payload, timeout=120.0
958
+ ) as response:
959
+ logger.info(f"[invoke_openai] Response status: {response.status_code}")
960
+ if response.status_code == 401:
961
+ raise ValueError(
962
+ "OpenAI authentication failed. Run: stravinsky-auth login openai"
963
+ )
770
964
 
771
- # Extract text from response
772
- try:
773
- choices = data.get("choices", [])
774
- if choices:
775
- message = choices[0].get("message", {})
776
- return message.get("content", "")
965
+ if response.status_code >= 400:
966
+ error_body = await response.aread()
967
+ error_text = error_body.decode("utf-8")
968
+ logger.error(f"OpenAI API error {response.status_code}: {error_text}")
969
+ logger.error(f"Request payload was: {payload}")
970
+ logger.error(f"Request headers were: {headers}")
971
+ raise ValueError(f"OpenAI API error {response.status_code}: {error_text}")
972
+
973
+ # Parse SSE stream for text deltas
974
+ async for line in response.aiter_lines():
975
+ if line.startswith("data: "):
976
+ data_json = line[6:] # Remove "data: " prefix
977
+ try:
978
+ data = json_module.loads(data_json)
979
+ event_type = data.get("type")
980
+
981
+ # Extract text deltas from SSE stream
982
+ if event_type == "response.output_text.delta":
983
+ delta = data.get("delta", "")
984
+ text_chunks.append(delta)
985
+
986
+ except json_module.JSONDecodeError:
987
+ pass # Skip malformed JSON
988
+ except Exception as e:
989
+ logger.warning(f"Error processing SSE event: {e}")
990
+
991
+ # Return collected text
992
+ result = "".join(text_chunks)
993
+ if not result:
777
994
  return "No response generated"
778
- except (KeyError, IndexError) as e:
779
- return f"Error parsing response: {e}"
995
+ return result
996
+
997
+ except httpx.HTTPStatusError as e:
998
+ logger.error(f"HTTP error: {e}")
999
+ raise
1000
+ except Exception as e:
1001
+ logger.error(f"Unexpected error in invoke_openai: {e}")
1002
+ raise ValueError(f"Failed to invoke OpenAI: {e}")