stravinsky 0.2.40__py3-none-any.whl → 0.2.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of stravinsky might be problematic. Click here for more details.
- mcp_bridge/__init__.py +1 -1
- mcp_bridge/auth/token_refresh.py +130 -0
- mcp_bridge/hooks/__init__.py +18 -1
- mcp_bridge/hooks/manager.py +50 -0
- mcp_bridge/hooks/parallel_enforcer.py +127 -0
- mcp_bridge/hooks/pre_compact.py +224 -0
- mcp_bridge/hooks/preemptive_compaction.py +81 -7
- mcp_bridge/hooks/session_idle.py +116 -0
- mcp_bridge/native_hooks/todo_delegation.py +54 -0
- mcp_bridge/prompts/__init__.py +3 -1
- mcp_bridge/prompts/dewey.py +30 -20
- mcp_bridge/prompts/explore.py +46 -8
- mcp_bridge/prompts/planner.py +222 -0
- mcp_bridge/prompts/stravinsky.py +107 -28
- mcp_bridge/server.py +64 -9
- mcp_bridge/server_tools.py +159 -32
- mcp_bridge/tools/agent_manager.py +173 -85
- mcp_bridge/tools/background_tasks.py +2 -1
- mcp_bridge/tools/model_invoke.py +194 -46
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/METADATA +1 -1
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/RECORD +23 -17
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/WHEEL +0 -0
- {stravinsky-0.2.40.dist-info → stravinsky-0.2.52.dist-info}/entry_points.txt +0 -0
mcp_bridge/tools/model_invoke.py
CHANGED
|
@@ -8,22 +8,86 @@ API requests to external model providers.
|
|
|
8
8
|
import logging
|
|
9
9
|
import os
|
|
10
10
|
import time
|
|
11
|
+
import uuid
|
|
12
|
+
import base64
|
|
13
|
+
import json as json_module
|
|
11
14
|
|
|
12
15
|
logger = logging.getLogger(__name__)
|
|
13
16
|
|
|
17
|
+
|
|
18
|
+
def _summarize_prompt(prompt: str, max_length: int = 120) -> str:
|
|
19
|
+
"""
|
|
20
|
+
Generate a short summary of the prompt for logging.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
prompt: The full prompt text
|
|
24
|
+
max_length: Maximum characters to include in summary
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Truncated prompt suitable for logging (single line, max_length chars)
|
|
28
|
+
"""
|
|
29
|
+
if not prompt:
|
|
30
|
+
return "(empty prompt)"
|
|
31
|
+
|
|
32
|
+
# Normalize whitespace: collapse newlines and multiple spaces
|
|
33
|
+
clean = " ".join(prompt.split())
|
|
34
|
+
|
|
35
|
+
if len(clean) <= max_length:
|
|
36
|
+
return clean
|
|
37
|
+
|
|
38
|
+
return clean[:max_length] + "..."
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# Cache for Codex instructions (fetched from GitHub)
|
|
42
|
+
_CODEX_INSTRUCTIONS_CACHE = {}
|
|
43
|
+
_CODEX_INSTRUCTIONS_RELEASE_TAG = "rust-v0.77.0" # Update as needed
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
async def _fetch_codex_instructions(model: str = "gpt-5.2-codex") -> str:
|
|
47
|
+
"""
|
|
48
|
+
Fetch official Codex instructions from GitHub.
|
|
49
|
+
Caches results to avoid repeated fetches.
|
|
50
|
+
"""
|
|
51
|
+
import httpx
|
|
52
|
+
|
|
53
|
+
if model in _CODEX_INSTRUCTIONS_CACHE:
|
|
54
|
+
return _CODEX_INSTRUCTIONS_CACHE[model]
|
|
55
|
+
|
|
56
|
+
# Map model to prompt file
|
|
57
|
+
prompt_file_map = {
|
|
58
|
+
"gpt-5.2-codex": "gpt-5.2-codex_prompt.md",
|
|
59
|
+
"gpt-5.1-codex": "gpt_5_codex_prompt.md",
|
|
60
|
+
"gpt-5.1-codex-max": "gpt_5_codex_max_prompt.md",
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
prompt_file = prompt_file_map.get(model, "gpt-5.2-codex_prompt.md")
|
|
64
|
+
url = f"https://raw.githubusercontent.com/openai/codex/{_CODEX_INSTRUCTIONS_RELEASE_TAG}/codex-rs/core/{prompt_file}"
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
async with httpx.AsyncClient() as client:
|
|
68
|
+
response = await client.get(url, timeout=30.0)
|
|
69
|
+
response.raise_for_status()
|
|
70
|
+
instructions = response.text
|
|
71
|
+
_CODEX_INSTRUCTIONS_CACHE[model] = instructions
|
|
72
|
+
return instructions
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.error(f"Failed to fetch Codex instructions: {e}")
|
|
75
|
+
# Return basic fallback instructions
|
|
76
|
+
return "You are Codex, based on GPT-5. You are running as a coding agent."
|
|
77
|
+
|
|
78
|
+
|
|
14
79
|
# Model name mapping: user-friendly names -> Antigravity API model IDs
|
|
15
80
|
# Per API spec: https://github.com/NoeFabris/opencode-antigravity-auth/blob/main/docs/ANTIGRAVITY_API_SPEC.md
|
|
16
|
-
# VERIFIED GEMINI MODELS (as of
|
|
17
|
-
# - gemini-3-pro-high, gemini-3-pro-low
|
|
18
|
-
# NOTE: There is NO gemini-3-flash in the API - all flash aliases map to gemini-3-pro-low
|
|
81
|
+
# VERIFIED GEMINI MODELS (as of 2026-01):
|
|
82
|
+
# - gemini-3-flash, gemini-3-pro-high, gemini-3-pro-low
|
|
19
83
|
# NOTE: Claude models should use Anthropic API directly, NOT Antigravity
|
|
20
84
|
GEMINI_MODEL_MAP = {
|
|
21
85
|
# Antigravity verified Gemini models (pass-through)
|
|
22
86
|
"gemini-3-pro-low": "gemini-3-pro-low",
|
|
23
87
|
"gemini-3-pro-high": "gemini-3-pro-high",
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
"gemini-
|
|
88
|
+
"gemini-3-flash": "gemini-3-flash",
|
|
89
|
+
# Aliases for convenience
|
|
90
|
+
"gemini-flash": "gemini-3-flash",
|
|
27
91
|
"gemini-pro": "gemini-3-pro-low",
|
|
28
92
|
"gemini-3-pro": "gemini-3-pro-low",
|
|
29
93
|
"gemini": "gemini-3-pro-low", # Default gemini alias
|
|
@@ -84,11 +148,11 @@ def _get_session_id(conversation_key: str | None = None) -> str:
|
|
|
84
148
|
Returns:
|
|
85
149
|
Stable session UUID for this conversation
|
|
86
150
|
"""
|
|
87
|
-
import uuid
|
|
151
|
+
import uuid as uuid_module # Local import workaround
|
|
88
152
|
|
|
89
153
|
key = conversation_key or "default"
|
|
90
154
|
if key not in _SESSION_CACHE:
|
|
91
|
-
_SESSION_CACHE[key] = str(
|
|
155
|
+
_SESSION_CACHE[key] = str(uuid_module.uuid4())
|
|
92
156
|
return _SESSION_CACHE[key]
|
|
93
157
|
|
|
94
158
|
|
|
@@ -261,6 +325,7 @@ async def invoke_gemini(
|
|
|
261
325
|
ValueError: If not authenticated with Gemini
|
|
262
326
|
httpx.HTTPStatusError: If API request fails
|
|
263
327
|
"""
|
|
328
|
+
logger.info(f"[DEBUG] invoke_gemini called, uuid module check: {uuid}")
|
|
264
329
|
# Execute pre-model invoke hooks
|
|
265
330
|
params = {
|
|
266
331
|
"prompt": prompt,
|
|
@@ -268,6 +333,8 @@ async def invoke_gemini(
|
|
|
268
333
|
"temperature": temperature,
|
|
269
334
|
"max_tokens": max_tokens,
|
|
270
335
|
"thinking_budget": thinking_budget,
|
|
336
|
+
"token_store": token_store, # Pass for hooks that need model access
|
|
337
|
+
"provider": "gemini", # Identify which provider is being called
|
|
271
338
|
}
|
|
272
339
|
hook_manager = get_hook_manager()
|
|
273
340
|
params = await hook_manager.execute_pre_model_invoke(params)
|
|
@@ -279,6 +346,14 @@ async def invoke_gemini(
|
|
|
279
346
|
max_tokens = params["max_tokens"]
|
|
280
347
|
thinking_budget = params["thinking_budget"]
|
|
281
348
|
|
|
349
|
+
# Extract agent context for logging (may be passed via params or original call)
|
|
350
|
+
agent_context = params.get("agent_context", {})
|
|
351
|
+
agent_type = agent_context.get("agent_type", "direct")
|
|
352
|
+
prompt_summary = _summarize_prompt(prompt)
|
|
353
|
+
|
|
354
|
+
# Log with agent context and prompt summary
|
|
355
|
+
logger.info(f"[{agent_type}] → {model}: {prompt_summary}")
|
|
356
|
+
|
|
282
357
|
access_token = await _ensure_valid_token(token_store, "gemini")
|
|
283
358
|
|
|
284
359
|
# Resolve user-friendly model name to actual API model ID
|
|
@@ -315,11 +390,19 @@ async def invoke_gemini(
|
|
|
315
390
|
}
|
|
316
391
|
|
|
317
392
|
# Wrap request body per reference implementation
|
|
393
|
+
try:
|
|
394
|
+
import uuid as uuid_module # Local import workaround for MCP context issue
|
|
395
|
+
|
|
396
|
+
request_id = f"invoke-{uuid_module.uuid4()}"
|
|
397
|
+
except Exception as e:
|
|
398
|
+
logger.error(f"UUID IMPORT FAILED: {e}")
|
|
399
|
+
raise RuntimeError(f"CUSTOM ERROR: UUID import failed: {e}")
|
|
400
|
+
|
|
318
401
|
wrapped_payload = {
|
|
319
402
|
"project": project_id,
|
|
320
403
|
"model": api_model,
|
|
321
404
|
"userAgent": "antigravity",
|
|
322
|
-
"requestId":
|
|
405
|
+
"requestId": request_id,
|
|
323
406
|
"request": inner_payload,
|
|
324
407
|
}
|
|
325
408
|
|
|
@@ -537,8 +620,6 @@ async def invoke_gemini_agentic(
|
|
|
537
620
|
Returns:
|
|
538
621
|
Final text response from the model
|
|
539
622
|
"""
|
|
540
|
-
import uuid
|
|
541
|
-
|
|
542
623
|
access_token = await _ensure_valid_token(token_store, "gemini")
|
|
543
624
|
api_model = resolve_gemini_model(model)
|
|
544
625
|
|
|
@@ -574,11 +655,13 @@ async def invoke_gemini_agentic(
|
|
|
574
655
|
|
|
575
656
|
# Wrap request body per reference implementation
|
|
576
657
|
# From request.ts wrapRequestBody()
|
|
658
|
+
import uuid as uuid_module # Local import workaround
|
|
659
|
+
|
|
577
660
|
wrapped_payload = {
|
|
578
661
|
"project": project_id,
|
|
579
662
|
"model": api_model,
|
|
580
663
|
"userAgent": "antigravity",
|
|
581
|
-
"requestId": f"agent-{
|
|
664
|
+
"requestId": f"agent-{uuid_module.uuid4()}",
|
|
582
665
|
"request": inner_payload,
|
|
583
666
|
}
|
|
584
667
|
|
|
@@ -688,7 +771,7 @@ async def invoke_gemini_agentic(
|
|
|
688
771
|
async def invoke_openai(
|
|
689
772
|
token_store: TokenStore,
|
|
690
773
|
prompt: str,
|
|
691
|
-
model: str = "gpt-5.2",
|
|
774
|
+
model: str = "gpt-5.2-codex",
|
|
692
775
|
temperature: float = 0.7,
|
|
693
776
|
max_tokens: int = 4096,
|
|
694
777
|
thinking_budget: int = 0,
|
|
@@ -717,6 +800,8 @@ async def invoke_openai(
|
|
|
717
800
|
"temperature": temperature,
|
|
718
801
|
"max_tokens": max_tokens,
|
|
719
802
|
"thinking_budget": thinking_budget,
|
|
803
|
+
"token_store": token_store, # Pass for hooks that need model access
|
|
804
|
+
"provider": "openai", # Identify which provider is being called
|
|
720
805
|
}
|
|
721
806
|
hook_manager = get_hook_manager()
|
|
722
807
|
params = await hook_manager.execute_pre_model_invoke(params)
|
|
@@ -728,52 +813,115 @@ async def invoke_openai(
|
|
|
728
813
|
max_tokens = params["max_tokens"]
|
|
729
814
|
thinking_budget = params["thinking_budget"]
|
|
730
815
|
|
|
816
|
+
# Extract agent context for logging (may be passed via params or original call)
|
|
817
|
+
agent_context = params.get("agent_context", {})
|
|
818
|
+
agent_type = agent_context.get("agent_type", "direct")
|
|
819
|
+
prompt_summary = _summarize_prompt(prompt)
|
|
820
|
+
|
|
821
|
+
# Log with agent context and prompt summary
|
|
822
|
+
logger.info(f"[{agent_type}] → {model}: {prompt_summary}")
|
|
823
|
+
|
|
731
824
|
access_token = await _ensure_valid_token(token_store, "openai")
|
|
825
|
+
logger.info(f"[invoke_openai] Got access token")
|
|
826
|
+
|
|
827
|
+
# ChatGPT Backend API - Uses Codex Responses endpoint
|
|
828
|
+
# Replicates opencode-openai-codex-auth plugin behavior
|
|
829
|
+
api_url = "https://chatgpt.com/backend-api/codex/responses"
|
|
830
|
+
|
|
831
|
+
# Extract account ID from JWT token
|
|
832
|
+
logger.info(f"[invoke_openai] Extracting account ID from JWT")
|
|
833
|
+
try:
|
|
834
|
+
parts = access_token.split(".")
|
|
835
|
+
payload_b64 = parts[1]
|
|
836
|
+
padding = 4 - len(payload_b64) % 4
|
|
837
|
+
if padding != 4:
|
|
838
|
+
payload_b64 += "=" * padding
|
|
839
|
+
jwt_payload = json_module.loads(base64.urlsafe_b64decode(payload_b64))
|
|
840
|
+
account_id = jwt_payload.get("https://api.openai.com/auth", {}).get("chatgpt_account_id")
|
|
841
|
+
except Exception as e:
|
|
842
|
+
logger.error(f"Failed to extract account ID from JWT: {e}")
|
|
843
|
+
account_id = None
|
|
732
844
|
|
|
733
|
-
#
|
|
734
|
-
|
|
845
|
+
# Fetch official Codex instructions from GitHub
|
|
846
|
+
instructions = await _fetch_codex_instructions(model)
|
|
735
847
|
|
|
848
|
+
# Headers matching opencode-openai-codex-auth plugin
|
|
736
849
|
headers = {
|
|
737
850
|
"Authorization": f"Bearer {access_token}",
|
|
738
851
|
"Content-Type": "application/json",
|
|
852
|
+
"Accept": "text/event-stream", # SSE stream
|
|
853
|
+
"openai-beta": "responses=experimental",
|
|
854
|
+
"openai-originator": "codex_cli_rs",
|
|
739
855
|
}
|
|
740
856
|
|
|
857
|
+
if account_id:
|
|
858
|
+
headers["x-openai-account-id"] = account_id
|
|
859
|
+
|
|
860
|
+
# Request body matching opencode transformation
|
|
741
861
|
payload = {
|
|
742
862
|
"model": model,
|
|
743
|
-
"
|
|
744
|
-
"
|
|
863
|
+
"store": False, # Required by ChatGPT backend
|
|
864
|
+
"stream": True, # Always stream (handler converts to non-stream if needed)
|
|
865
|
+
"instructions": instructions,
|
|
866
|
+
"input": [{"role": "user", "content": prompt}],
|
|
867
|
+
"reasoning": {"effort": "high" if thinking_budget > 0 else "medium", "summary": "auto"},
|
|
868
|
+
"text": {"verbosity": "medium"},
|
|
869
|
+
"include": ["reasoning.encrypted_content"],
|
|
745
870
|
}
|
|
746
871
|
|
|
747
|
-
#
|
|
748
|
-
|
|
749
|
-
payload["max_completion_tokens"] = max_tokens + thinking_budget
|
|
750
|
-
# For O1, temperature must be 1.0 or omitted usually, but we'll try to pass it
|
|
751
|
-
else:
|
|
752
|
-
payload["max_tokens"] = max_tokens
|
|
753
|
-
|
|
754
|
-
async with httpx.AsyncClient() as client:
|
|
755
|
-
response = await client.post(
|
|
756
|
-
api_url,
|
|
757
|
-
headers=headers,
|
|
758
|
-
json=payload,
|
|
759
|
-
timeout=120.0,
|
|
760
|
-
)
|
|
872
|
+
# Stream the response and collect text
|
|
873
|
+
text_chunks = []
|
|
761
874
|
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
)
|
|
766
|
-
|
|
767
|
-
response.raise_for_status()
|
|
875
|
+
logger.info(f"[invoke_openai] Calling {api_url} with model {model}")
|
|
876
|
+
logger.info(f"[invoke_openai] Payload keys: {list(payload.keys())}")
|
|
877
|
+
logger.info(f"[invoke_openai] Instructions length: {len(instructions)}")
|
|
768
878
|
|
|
769
|
-
|
|
879
|
+
try:
|
|
880
|
+
async with httpx.AsyncClient() as client:
|
|
881
|
+
async with client.stream(
|
|
882
|
+
"POST", api_url, headers=headers, json=payload, timeout=120.0
|
|
883
|
+
) as response:
|
|
884
|
+
logger.info(f"[invoke_openai] Response status: {response.status_code}")
|
|
885
|
+
if response.status_code == 401:
|
|
886
|
+
raise ValueError(
|
|
887
|
+
"OpenAI authentication failed. Run: stravinsky-auth login openai"
|
|
888
|
+
)
|
|
770
889
|
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
890
|
+
if response.status_code >= 400:
|
|
891
|
+
error_body = await response.aread()
|
|
892
|
+
error_text = error_body.decode("utf-8")
|
|
893
|
+
logger.error(f"OpenAI API error {response.status_code}: {error_text}")
|
|
894
|
+
logger.error(f"Request payload was: {payload}")
|
|
895
|
+
logger.error(f"Request headers were: {headers}")
|
|
896
|
+
raise ValueError(f"OpenAI API error {response.status_code}: {error_text}")
|
|
897
|
+
|
|
898
|
+
# Parse SSE stream for text deltas
|
|
899
|
+
async for line in response.aiter_lines():
|
|
900
|
+
if line.startswith("data: "):
|
|
901
|
+
data_json = line[6:] # Remove "data: " prefix
|
|
902
|
+
try:
|
|
903
|
+
data = json_module.loads(data_json)
|
|
904
|
+
event_type = data.get("type")
|
|
905
|
+
|
|
906
|
+
# Extract text deltas from SSE stream
|
|
907
|
+
if event_type == "response.output_text.delta":
|
|
908
|
+
delta = data.get("delta", "")
|
|
909
|
+
text_chunks.append(delta)
|
|
910
|
+
|
|
911
|
+
except json_module.JSONDecodeError:
|
|
912
|
+
pass # Skip malformed JSON
|
|
913
|
+
except Exception as e:
|
|
914
|
+
logger.warning(f"Error processing SSE event: {e}")
|
|
915
|
+
|
|
916
|
+
# Return collected text
|
|
917
|
+
result = "".join(text_chunks)
|
|
918
|
+
if not result:
|
|
777
919
|
return "No response generated"
|
|
778
|
-
|
|
779
|
-
|
|
920
|
+
return result
|
|
921
|
+
|
|
922
|
+
except httpx.HTTPStatusError as e:
|
|
923
|
+
logger.error(f"HTTP error: {e}")
|
|
924
|
+
raise
|
|
925
|
+
except Exception as e:
|
|
926
|
+
logger.error(f"Unexpected error in invoke_openai: {e}")
|
|
927
|
+
raise ValueError(f"Failed to invoke OpenAI: {e}")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: stravinsky
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.52
|
|
4
4
|
Summary: MCP Bridge for Claude Code with Multi-Model Support. Install globally: claude mcp add --scope user stravinsky -- uvx stravinsky. Add to CLAUDE.md: See https://pypi.org/project/stravinsky/
|
|
5
5
|
Project-URL: Repository, https://github.com/GratefulDave/stravinsky
|
|
6
6
|
Project-URL: Issues, https://github.com/GratefulDave/stravinsky/issues
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
mcp_bridge/__init__.py,sha256=
|
|
2
|
-
mcp_bridge/server.py,sha256=
|
|
3
|
-
mcp_bridge/server_tools.py,sha256=
|
|
1
|
+
mcp_bridge/__init__.py,sha256=1PHsAGbNJxj2_h0lehC9jzS9Pgx7eDxyLsUXGgyT-LI,23
|
|
2
|
+
mcp_bridge/server.py,sha256=QdWgYZHHI4zWV6R7bE0qA3Go77rxmet__qo4nsY_Qt4,25391
|
|
3
|
+
mcp_bridge/server_tools.py,sha256=IIC_2X6gNFks5ohc9Fzmg3b5FhcWCusvo15r7DqDphQ,27378
|
|
4
4
|
mcp_bridge/auth/__init__.py,sha256=AGHNtKzqvZYMLQ35Qg6aOabpxBqmkR-pjXv8Iby9oMw,797
|
|
5
5
|
mcp_bridge/auth/cli.py,sha256=jaXyrzq6HwnW61g6CHHcj8bF5PJLYrYufD_jmN0jeiQ,8502
|
|
6
6
|
mcp_bridge/auth/oauth.py,sha256=gWYk3KJWbUM1J5AKVDJ_4k4zdQzwwaYJZ5J25If0r8c,12804
|
|
7
7
|
mcp_bridge/auth/openai_oauth.py,sha256=0Ks2X-NXLCBzqs3xnbj9QLZpugICOX5qB5y5vtDENOo,11522
|
|
8
|
+
mcp_bridge/auth/token_refresh.py,sha256=goWp1Wz3yWOoFuxvFMDZLjo8gLPXcajZxHpzZtSoKcQ,3760
|
|
8
9
|
mcp_bridge/auth/token_store.py,sha256=3A6TZJ7Wju6QfhALeX4IMhY5jzb9OWMrDzwRbfAukiU,5650
|
|
9
10
|
mcp_bridge/config/__init__.py,sha256=uapHdrSWWrafVKD9CTB1J_7Dw0_RajRhoDGjy9zH21o,256
|
|
10
11
|
mcp_bridge/config/hooks.py,sha256=WvWC6ZUc8y1IXPlGCjLYAAsGGigd5tWeGiw585OGNwA,4624
|
|
11
|
-
mcp_bridge/hooks/__init__.py,sha256=
|
|
12
|
+
mcp_bridge/hooks/__init__.py,sha256=WF4AavhTD1IcrB4RTLSB0e3GGeEFxeljM3QlHXC9OO0,2867
|
|
12
13
|
mcp_bridge/hooks/agent_reminder.py,sha256=OxKPxi7e2MIBm44Ebzzl9j9fDV5bBgogGF0lj7wun5s,1913
|
|
13
14
|
mcp_bridge/hooks/auto_slash_command.py,sha256=LUndZHxUzEF7PQuOdp3v7VfqNUgsiDE2gzI_TkR9he4,5280
|
|
14
15
|
mcp_bridge/hooks/budget_optimizer.py,sha256=Im0qSGVUdRByk04hP5VyKt7tjlDVYG0LJb6IeUjjnj8,1323
|
|
@@ -19,30 +20,35 @@ mcp_bridge/hooks/directory_context.py,sha256=0VjdJITJIGUqR4-q-wZlB6kmkFQMraaFvU2
|
|
|
19
20
|
mcp_bridge/hooks/edit_recovery.py,sha256=CyhdFF3rV-sqZ13Mn8I8I5oMqB2KLIl6Bziv7GcsX0o,1749
|
|
20
21
|
mcp_bridge/hooks/empty_message_sanitizer.py,sha256=iZZnETju1wBvJXV1k4msV8FxLNg5LkVm2eaFA2nQWxI,6606
|
|
21
22
|
mcp_bridge/hooks/keyword_detector.py,sha256=5kztrsQ2NtT5qdDTC-J49nxPB_D0tIjT1AVAAry4FPE,4866
|
|
22
|
-
mcp_bridge/hooks/manager.py,sha256=
|
|
23
|
-
mcp_bridge/hooks/
|
|
23
|
+
mcp_bridge/hooks/manager.py,sha256=SG08soeyHSzfl9NE9yX2x5B_NEbv9LxJnmRBUaOhGfE,5835
|
|
24
|
+
mcp_bridge/hooks/parallel_enforcer.py,sha256=qzPB7PX22Laz6yC3MCbAmYBhP59P9ac6WHpgXOABOuQ,3651
|
|
25
|
+
mcp_bridge/hooks/pre_compact.py,sha256=SeVMdknqiXich_m7MLSMk_GVh570qva8XIl8-sFDMfQ,6328
|
|
26
|
+
mcp_bridge/hooks/preemptive_compaction.py,sha256=x4-dLWCconVFD-nR93P1EB_tG9n4LHdESq7uDFS3ut8,7937
|
|
27
|
+
mcp_bridge/hooks/session_idle.py,sha256=5wo6XakXglWVCP-HN1i976OptfntwCNnbjxoM--veh4,3525
|
|
24
28
|
mcp_bridge/hooks/session_recovery.py,sha256=c5eoIWJAgMTPY0wq_64MmCUNLCB_GSowZlA3lo4R9LU,6260
|
|
25
29
|
mcp_bridge/hooks/todo_enforcer.py,sha256=LRvVxYILqQ6YuQl3tky4hgyD-irXIPDE5e0EdKr6jcc,2274
|
|
26
30
|
mcp_bridge/hooks/truncator.py,sha256=Rse93FS2aJxJGUOJIGY_ZYAS4yIUQBpBlVFo-IYbbKE,598
|
|
27
31
|
mcp_bridge/native_hooks/context.py,sha256=JBHqnX75qtMO3JAEFooBEAK6DxcsVaXykhrP7tdFm6E,949
|
|
28
32
|
mcp_bridge/native_hooks/edit_recovery.py,sha256=1OWpb3v87txFUsgnNe1hoeTI7rIKCkKYONld5BK1TyA,1503
|
|
29
33
|
mcp_bridge/native_hooks/stravinsky_mode.py,sha256=UCiBk4YtGX6ubKTXYyGZUBIQG_CuFlHKPW-8S494BqU,2856
|
|
34
|
+
mcp_bridge/native_hooks/todo_delegation.py,sha256=3bdOKXcNDTjyV9sQlxQEsvop7nOHcqu6SOdvSnStQko,1520
|
|
30
35
|
mcp_bridge/native_hooks/truncator.py,sha256=h3hb8sZXTvc59C0-5GdZwCVZHlxBKdo47JT9TMxHG3g,530
|
|
31
|
-
mcp_bridge/prompts/__init__.py,sha256=
|
|
36
|
+
mcp_bridge/prompts/__init__.py,sha256=wzveum1x50IXGkE0JFpvHAj8P0yqnSrgtrmyIIpnnBI,358
|
|
32
37
|
mcp_bridge/prompts/delphi.py,sha256=ZlnLY2o1PrK3CxLLaCkozAGPIAJ5OFmw5u8o6-LXmgI,4960
|
|
33
|
-
mcp_bridge/prompts/dewey.py,sha256=
|
|
38
|
+
mcp_bridge/prompts/dewey.py,sha256=u5OaOj8kinZERflWvUv4M4WVAwxw0kJrNT00wHksIHI,9264
|
|
34
39
|
mcp_bridge/prompts/document_writer.py,sha256=hiCbxgTU8HKPJkS0eNpPPtzSqDXPreApU2OqiS6zh-0,5618
|
|
35
|
-
mcp_bridge/prompts/explore.py,sha256=
|
|
40
|
+
mcp_bridge/prompts/explore.py,sha256=oZxh4fe4KNGa_ozakJSpLYeQVfO4-jQ2M1BSOw1Sw10,5801
|
|
36
41
|
mcp_bridge/prompts/frontend.py,sha256=j91I8k5vcVed13eeX-Ebiv49x9Qj4HO_SQN1xhB8TLQ,4943
|
|
37
42
|
mcp_bridge/prompts/multimodal.py,sha256=Svw11N392LjshalasOd80X0Qw_qtOMqu_lD-_HmQDIo,1936
|
|
38
|
-
mcp_bridge/prompts/
|
|
43
|
+
mcp_bridge/prompts/planner.py,sha256=wcdXIxPlZM0UYd9KQSKXnuXTRj2acSnqlqeQ_NxxEqw,6985
|
|
44
|
+
mcp_bridge/prompts/stravinsky.py,sha256=gJgzMgsa6u0fLbFWOkPY2lq78-yJTuD46wrnk-5rem0,27498
|
|
39
45
|
mcp_bridge/tools/__init__.py,sha256=SRnMaUni0BhlvCABBEYeyveNiOAMQPNBXmjUKG6aXQA,1150
|
|
40
|
-
mcp_bridge/tools/agent_manager.py,sha256=
|
|
41
|
-
mcp_bridge/tools/background_tasks.py,sha256=
|
|
46
|
+
mcp_bridge/tools/agent_manager.py,sha256=aJlz_i0dy2MyxWV_rF5LAOzCoTG09GkTLmNE1KB2A0w,31280
|
|
47
|
+
mcp_bridge/tools/background_tasks.py,sha256=bwbVYWCDzuXb3Q_OdIr10r76lgjFTphfjdTmOHYZI7w,5252
|
|
42
48
|
mcp_bridge/tools/code_search.py,sha256=sR-alLQuxaXUFB9hby_wQsQu3Io644wdnpdOM_vm0aw,9978
|
|
43
49
|
mcp_bridge/tools/continuous_loop.py,sha256=MM3FnF3ULuR32h0tqJP8uF48iJg6R9dbyHy_36KLOls,2100
|
|
44
50
|
mcp_bridge/tools/init.py,sha256=sU95M9M_tjsfuew389TrFrxxeCavuSC16qbkydk_6PU,1586
|
|
45
|
-
mcp_bridge/tools/model_invoke.py,sha256=
|
|
51
|
+
mcp_bridge/tools/model_invoke.py,sha256=dET6V7Z6b5KbgtyZO0HH2E18yWd-d2531bffIM0_xrg,32457
|
|
46
52
|
mcp_bridge/tools/project_context.py,sha256=bXKxuW1pGjtIbeNjMgpBoQL-d_CI94UPBVpRjUyhX20,4707
|
|
47
53
|
mcp_bridge/tools/session_manager.py,sha256=tCVLLvO-Kttla7OxPImb_NSGL_9aW46ilq5ej_IcnlA,9252
|
|
48
54
|
mcp_bridge/tools/skill_loader.py,sha256=RQ5eC357pm-6q85G3EyrQugz0S3OO5lxWtM9n9ECF-c,6010
|
|
@@ -51,7 +57,7 @@ mcp_bridge/tools/templates.py,sha256=A7eFk08Y2TZ2FPnsYLkVYCGOm1dd8scpNfcjCl_vC24
|
|
|
51
57
|
mcp_bridge/tools/lsp/__init__.py,sha256=fLiII9qgeachI3MlkO6uGulfUH3T0YDeyEfO65bbxdw,549
|
|
52
58
|
mcp_bridge/tools/lsp/tools.py,sha256=nfXT3LBD5Mas_98f1zwu62jkpInGXUR-1vLl1zIykUw,16315
|
|
53
59
|
mcp_bridge/utils/__init__.py,sha256=pbHV4nq5SLUYcAyTmLUZYrp293Ctud57X8hwsMGA_BM,20
|
|
54
|
-
stravinsky-0.2.
|
|
55
|
-
stravinsky-0.2.
|
|
56
|
-
stravinsky-0.2.
|
|
57
|
-
stravinsky-0.2.
|
|
60
|
+
stravinsky-0.2.52.dist-info/METADATA,sha256=STvBvkJjq9moBLq9_-fQS83e0v0wL7L-nKKp8_SMZzg,7091
|
|
61
|
+
stravinsky-0.2.52.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
62
|
+
stravinsky-0.2.52.dist-info/entry_points.txt,sha256=BISwF7i71Oen7jFVmBXz8fxiU11Cp415wPF0xXG2Q3s,97
|
|
63
|
+
stravinsky-0.2.52.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|