stravinsky 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of stravinsky might be problematic. Click here for more details.
- mcp_bridge/__init__.py +5 -0
- mcp_bridge/auth/__init__.py +32 -0
- mcp_bridge/auth/cli.py +208 -0
- mcp_bridge/auth/oauth.py +418 -0
- mcp_bridge/auth/openai_oauth.py +350 -0
- mcp_bridge/auth/token_store.py +195 -0
- mcp_bridge/config/__init__.py +14 -0
- mcp_bridge/config/hooks.py +174 -0
- mcp_bridge/prompts/__init__.py +18 -0
- mcp_bridge/prompts/delphi.py +110 -0
- mcp_bridge/prompts/dewey.py +183 -0
- mcp_bridge/prompts/document_writer.py +155 -0
- mcp_bridge/prompts/explore.py +118 -0
- mcp_bridge/prompts/frontend.py +112 -0
- mcp_bridge/prompts/multimodal.py +58 -0
- mcp_bridge/prompts/stravinsky.py +326 -0
- mcp_bridge/server.py +866 -0
- mcp_bridge/tools/__init__.py +28 -0
- mcp_bridge/tools/agent_manager.py +665 -0
- mcp_bridge/tools/background_tasks.py +166 -0
- mcp_bridge/tools/code_search.py +301 -0
- mcp_bridge/tools/lsp/__init__.py +29 -0
- mcp_bridge/tools/lsp/tools.py +526 -0
- mcp_bridge/tools/model_invoke.py +233 -0
- mcp_bridge/tools/project_context.py +141 -0
- mcp_bridge/tools/session_manager.py +302 -0
- mcp_bridge/tools/skill_loader.py +212 -0
- mcp_bridge/utils/__init__.py +1 -0
- stravinsky-0.1.2.dist-info/METADATA +159 -0
- stravinsky-0.1.2.dist-info/RECORD +32 -0
- stravinsky-0.1.2.dist-info/WHEEL +4 -0
- stravinsky-0.1.2.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model invocation tools for Gemini and OpenAI.
|
|
3
|
+
|
|
4
|
+
These tools use OAuth tokens from the token store to authenticate
|
|
5
|
+
API requests to external model providers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
from ..auth.token_store import TokenStore
|
|
13
|
+
from ..auth.oauth import refresh_access_token as gemini_refresh, ANTIGRAVITY_HEADERS
|
|
14
|
+
from ..auth.openai_oauth import refresh_access_token as openai_refresh
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def _ensure_valid_token(token_store: TokenStore, provider: str) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Get a valid access token, refreshing if needed.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
token_store: Token store
|
|
23
|
+
provider: Provider name
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Valid access token
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If not authenticated
|
|
30
|
+
"""
|
|
31
|
+
# Check if token needs refresh (with 5 minute buffer)
|
|
32
|
+
if token_store.needs_refresh(provider, buffer_seconds=300):
|
|
33
|
+
token = token_store.get_token(provider)
|
|
34
|
+
|
|
35
|
+
if not token or not token.get("refresh_token"):
|
|
36
|
+
raise ValueError(
|
|
37
|
+
f"Not authenticated with {provider}. "
|
|
38
|
+
f"Run: python -m mcp_bridge.auth.cli login {provider}"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
if provider == "gemini":
|
|
43
|
+
result = gemini_refresh(token["refresh_token"])
|
|
44
|
+
elif provider == "openai":
|
|
45
|
+
result = openai_refresh(token["refresh_token"])
|
|
46
|
+
else:
|
|
47
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
48
|
+
|
|
49
|
+
# Update stored token
|
|
50
|
+
token_store.set_token(
|
|
51
|
+
provider=provider,
|
|
52
|
+
access_token=result.access_token,
|
|
53
|
+
refresh_token=result.refresh_token or token["refresh_token"],
|
|
54
|
+
expires_at=time.time() + result.expires_in,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
return result.access_token
|
|
58
|
+
except Exception as e:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
f"Token refresh failed: {e}. "
|
|
61
|
+
f"Run: python -m mcp_bridge.auth.cli login {provider}"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
access_token = token_store.get_access_token(provider)
|
|
65
|
+
if not access_token:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
f"Not authenticated with {provider}. "
|
|
68
|
+
f"Run: python -m mcp_bridge.auth.cli login {provider}"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
return access_token
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
async def invoke_gemini(
|
|
75
|
+
token_store: TokenStore,
|
|
76
|
+
prompt: str,
|
|
77
|
+
model: str = "gemini-3-flash",
|
|
78
|
+
temperature: float = 0.7,
|
|
79
|
+
max_tokens: int = 4096,
|
|
80
|
+
thinking_budget: int = 0,
|
|
81
|
+
) -> str:
|
|
82
|
+
"""
|
|
83
|
+
Invoke a Gemini model with the given prompt.
|
|
84
|
+
|
|
85
|
+
Uses OAuth authentication with Antigravity credentials.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
token_store: Token store for OAuth credentials
|
|
89
|
+
prompt: The prompt to send to Gemini
|
|
90
|
+
model: Gemini model to use
|
|
91
|
+
temperature: Sampling temperature (0.0-2.0)
|
|
92
|
+
max_tokens: Maximum tokens in response
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
The model's response text.
|
|
96
|
+
|
|
97
|
+
Raises:
|
|
98
|
+
ValueError: If not authenticated with Gemini
|
|
99
|
+
httpx.HTTPStatusError: If API request fails
|
|
100
|
+
"""
|
|
101
|
+
access_token = await _ensure_valid_token(token_store, "gemini")
|
|
102
|
+
|
|
103
|
+
# Gemini API endpoint with OAuth
|
|
104
|
+
api_url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
|
105
|
+
|
|
106
|
+
headers = {
|
|
107
|
+
"Authorization": f"Bearer {access_token}",
|
|
108
|
+
"Content-Type": "application/json",
|
|
109
|
+
**ANTIGRAVITY_HEADERS, # Include Antigravity headers
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
payload = {
|
|
113
|
+
"contents": [{"parts": [{"text": prompt}]}],
|
|
114
|
+
"generationConfig": {
|
|
115
|
+
"temperature": temperature,
|
|
116
|
+
"maxOutputTokens": max_tokens,
|
|
117
|
+
},
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
# Add thinking budget if supported by model/API
|
|
121
|
+
if thinking_budget > 0:
|
|
122
|
+
# For Gemini 2.0+ Thinking models
|
|
123
|
+
payload["generationConfig"]["thinkingConfig"] = {
|
|
124
|
+
"includeThoughts": True,
|
|
125
|
+
"tokenLimit": thinking_budget
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
async with httpx.AsyncClient() as client:
|
|
129
|
+
response = await client.post(
|
|
130
|
+
api_url,
|
|
131
|
+
headers=headers,
|
|
132
|
+
json=payload,
|
|
133
|
+
timeout=120.0,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if response.status_code == 401:
|
|
137
|
+
raise ValueError(
|
|
138
|
+
"Gemini authentication expired. "
|
|
139
|
+
"Run: python -m mcp_bridge.auth.cli login gemini"
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
response.raise_for_status()
|
|
143
|
+
|
|
144
|
+
data = response.json()
|
|
145
|
+
|
|
146
|
+
# Extract text from response
|
|
147
|
+
try:
|
|
148
|
+
candidates = data.get("candidates", [])
|
|
149
|
+
if candidates:
|
|
150
|
+
content = candidates[0].get("content", {})
|
|
151
|
+
parts = content.get("parts", [])
|
|
152
|
+
if parts:
|
|
153
|
+
return parts[0].get("text", "")
|
|
154
|
+
return "No response generated"
|
|
155
|
+
except (KeyError, IndexError) as e:
|
|
156
|
+
return f"Error parsing response: {e}"
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
async def invoke_openai(
|
|
160
|
+
token_store: TokenStore,
|
|
161
|
+
prompt: str,
|
|
162
|
+
model: str = "gpt-5.2",
|
|
163
|
+
temperature: float = 0.7,
|
|
164
|
+
max_tokens: int = 4096,
|
|
165
|
+
thinking_budget: int = 0,
|
|
166
|
+
) -> str:
|
|
167
|
+
"""
|
|
168
|
+
Invoke an OpenAI model with the given prompt.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
token_store: Token store for API key
|
|
172
|
+
prompt: The prompt to send to OpenAI
|
|
173
|
+
model: OpenAI model to use
|
|
174
|
+
temperature: Sampling temperature (0.0-2.0)
|
|
175
|
+
max_tokens: Maximum tokens in response
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
The model's response text.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
ValueError: If not authenticated with OpenAI
|
|
182
|
+
httpx.HTTPStatusError: If API request fails
|
|
183
|
+
"""
|
|
184
|
+
access_token = await _ensure_valid_token(token_store, "openai")
|
|
185
|
+
|
|
186
|
+
# OpenAI Chat Completions API
|
|
187
|
+
api_url = "https://api.openai.com/v1/chat/completions"
|
|
188
|
+
|
|
189
|
+
headers = {
|
|
190
|
+
"Authorization": f"Bearer {access_token}",
|
|
191
|
+
"Content-Type": "application/json",
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
payload = {
|
|
195
|
+
"model": model,
|
|
196
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
197
|
+
"temperature": temperature,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
# Handle thinking budget for O1/O3 style models (GPT-5.2)
|
|
201
|
+
if thinking_budget > 0:
|
|
202
|
+
payload["max_completion_tokens"] = max_tokens + thinking_budget
|
|
203
|
+
# For O1, temperature must be 1.0 or omitted usually, but we'll try to pass it
|
|
204
|
+
else:
|
|
205
|
+
payload["max_tokens"] = max_tokens
|
|
206
|
+
|
|
207
|
+
async with httpx.AsyncClient() as client:
|
|
208
|
+
response = await client.post(
|
|
209
|
+
api_url,
|
|
210
|
+
headers=headers,
|
|
211
|
+
json=payload,
|
|
212
|
+
timeout=120.0,
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
if response.status_code == 401:
|
|
216
|
+
raise ValueError(
|
|
217
|
+
"OpenAI authentication failed. "
|
|
218
|
+
"Run: python -m mcp_bridge.auth.cli login openai"
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
response.raise_for_status()
|
|
222
|
+
|
|
223
|
+
data = response.json()
|
|
224
|
+
|
|
225
|
+
# Extract text from response
|
|
226
|
+
try:
|
|
227
|
+
choices = data.get("choices", [])
|
|
228
|
+
if choices:
|
|
229
|
+
message = choices[0].get("message", {})
|
|
230
|
+
return message.get("content", "")
|
|
231
|
+
return "No response generated"
|
|
232
|
+
except (KeyError, IndexError) as e:
|
|
233
|
+
return f"Error parsing response: {e}"
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Project Context and System Health Tools.
|
|
3
|
+
|
|
4
|
+
Provides the agent with environmental awareness (Git, Rules, Todos)
|
|
5
|
+
and ensures all required dependencies are installed and authenticated.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from ..auth.token_store import TokenStore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def get_project_context(project_path: Optional[str] = None) -> str:
|
|
19
|
+
"""
|
|
20
|
+
Summarize project environment: Git status, local rules, and pending todos.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
project_path: Path to the project root
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Formatted summary of the project context.
|
|
27
|
+
"""
|
|
28
|
+
root = Path(project_path) if project_path else Path.cwd()
|
|
29
|
+
context = []
|
|
30
|
+
|
|
31
|
+
# 1. Git Information
|
|
32
|
+
context.append("### 🌿 Git Context")
|
|
33
|
+
try:
|
|
34
|
+
branch = subprocess.check_output(
|
|
35
|
+
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
36
|
+
stderr=subprocess.DEVNULL, text=True
|
|
37
|
+
).strip()
|
|
38
|
+
status = subprocess.check_output(
|
|
39
|
+
["git", "status", "--short"],
|
|
40
|
+
stderr=subprocess.DEVNULL, text=True
|
|
41
|
+
).strip()
|
|
42
|
+
|
|
43
|
+
context.append(f"**Branch**: `{branch}`")
|
|
44
|
+
if status:
|
|
45
|
+
files_changed = len(status.split("\n"))
|
|
46
|
+
context.append(f"**Status**: {files_changed} files modified (staged/unstaged)")
|
|
47
|
+
else:
|
|
48
|
+
context.append("**Status**: Clean")
|
|
49
|
+
except Exception:
|
|
50
|
+
context.append("**Status**: Not a git repository")
|
|
51
|
+
|
|
52
|
+
# 2. Local Rules (.claude/rules/)
|
|
53
|
+
rules_dir = root / ".claude" / "rules"
|
|
54
|
+
if rules_dir.exists():
|
|
55
|
+
context.append("\n### 📜 Local Project Rules")
|
|
56
|
+
rule_files = list(rules_dir.glob("*.md"))
|
|
57
|
+
if rule_files:
|
|
58
|
+
for rf in rule_files:
|
|
59
|
+
try:
|
|
60
|
+
content = rf.read_text().strip()
|
|
61
|
+
context.append(f"#### {rf.name}\n{content}")
|
|
62
|
+
except Exception:
|
|
63
|
+
continue
|
|
64
|
+
else:
|
|
65
|
+
context.append("_No specific rules found in .claude/rules/_")
|
|
66
|
+
|
|
67
|
+
# 3. Pending Todos
|
|
68
|
+
context.append("\n### 📝 Pending Todos (Top 20)")
|
|
69
|
+
try:
|
|
70
|
+
# Search for [ ] in code files, excluding common noise directories
|
|
71
|
+
todo_cmd = [
|
|
72
|
+
"rg", "--line-number", "--no-heading", "--column",
|
|
73
|
+
"--glob", "!.git/*", "--glob", "!node_modules/*",
|
|
74
|
+
"--glob", "!.venv/*",
|
|
75
|
+
r"\[ \]", str(root)
|
|
76
|
+
]
|
|
77
|
+
todo_output = subprocess.check_output(
|
|
78
|
+
todo_cmd, stderr=subprocess.DEVNULL, text=True
|
|
79
|
+
).strip()
|
|
80
|
+
|
|
81
|
+
if todo_output:
|
|
82
|
+
lines = todo_output.split("\n")[:20]
|
|
83
|
+
for line in lines:
|
|
84
|
+
context.append(f"- {line}")
|
|
85
|
+
if len(todo_output.split("\n")) > 20:
|
|
86
|
+
context.append("_(... and more)_")
|
|
87
|
+
else:
|
|
88
|
+
context.append("_No pending [ ] markers found._")
|
|
89
|
+
except Exception:
|
|
90
|
+
context.append("_Ripgrep not found or error searching for todos._")
|
|
91
|
+
|
|
92
|
+
return "\n".join(context)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
async def get_system_health() -> str:
|
|
96
|
+
"""
|
|
97
|
+
Comprehensive check of system dependencies and authentication status.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Checklist of system health.
|
|
101
|
+
"""
|
|
102
|
+
health = ["## 🏥 Stravinsky System Health Report\n"]
|
|
103
|
+
|
|
104
|
+
# 1. CLI Dependencies
|
|
105
|
+
health.append("### 🛠️ CLI Dependencies")
|
|
106
|
+
dependencies = {
|
|
107
|
+
"rg": "ripgrep",
|
|
108
|
+
"fd": "fd-find",
|
|
109
|
+
"sg": "ast-grep",
|
|
110
|
+
"gh": "GitHub CLI",
|
|
111
|
+
"ruff": "Python Linter",
|
|
112
|
+
"tsc": "TypeScript Compiler",
|
|
113
|
+
"git": "Git"
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
for cmd, name in dependencies.items():
|
|
117
|
+
path = shutil.which(cmd)
|
|
118
|
+
status = "✅" if path else "❌"
|
|
119
|
+
health.append(f"- {status} **{name}** (`{cmd}`): {'Installed' if path else 'MISSING'}")
|
|
120
|
+
|
|
121
|
+
# 2. Authentication Status
|
|
122
|
+
health.append("\n### 🔑 Provider Authentication")
|
|
123
|
+
token_store = TokenStore()
|
|
124
|
+
providers = ["gemini", "openai"]
|
|
125
|
+
|
|
126
|
+
for p in providers:
|
|
127
|
+
has_token = token_store.has_valid_token(p)
|
|
128
|
+
status = "✅" if has_token else "❌"
|
|
129
|
+
health.append(f"- {status} **{p.capitalize()}**: {'Authenticated' if has_token else 'NOT LOGGED IN'}")
|
|
130
|
+
|
|
131
|
+
# 3. Environment
|
|
132
|
+
health.append("\n### 🐍 Environment")
|
|
133
|
+
health.append(f"- **Python**: `{sys.version.split()[0]}`")
|
|
134
|
+
health.append(f"- **Virtualenv**: `{os.environ.get('VIRTUAL_ENV', 'None')}`")
|
|
135
|
+
|
|
136
|
+
health.append("\n---")
|
|
137
|
+
health.append("**Resolution Guide**:")
|
|
138
|
+
health.append("- For missing CLI tools: Use `brew install` or `npm install -g` as appropriate.")
|
|
139
|
+
health.append("- For Auth: Run `python -m mcp_bridge.auth.cli login [provider]`.")
|
|
140
|
+
|
|
141
|
+
return "\n".join(health)
|
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Session Manager Tools
|
|
3
|
+
|
|
4
|
+
Tools for navigating and searching Claude Code session history.
|
|
5
|
+
Sessions are stored in ~/.claude/projects/ as JSONL files.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_sessions_directory() -> Path:
|
|
16
|
+
"""Get the Claude sessions directory."""
|
|
17
|
+
return Path.home() / ".claude" / "projects"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def list_sessions(
|
|
21
|
+
project_path: str | None = None,
|
|
22
|
+
limit: int = 20,
|
|
23
|
+
from_date: str | None = None,
|
|
24
|
+
to_date: str | None = None,
|
|
25
|
+
) -> str:
|
|
26
|
+
"""
|
|
27
|
+
List Claude Code sessions with optional filtering.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
project_path: Filter by project path
|
|
31
|
+
limit: Maximum sessions to return
|
|
32
|
+
from_date: Filter from date (ISO format)
|
|
33
|
+
to_date: Filter until date (ISO format)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Formatted list of sessions.
|
|
37
|
+
"""
|
|
38
|
+
sessions_dir = get_sessions_directory()
|
|
39
|
+
if not sessions_dir.exists():
|
|
40
|
+
return "No sessions directory found"
|
|
41
|
+
|
|
42
|
+
sessions = []
|
|
43
|
+
|
|
44
|
+
# Walk through project directories
|
|
45
|
+
for project_dir in sessions_dir.iterdir():
|
|
46
|
+
if not project_dir.is_dir():
|
|
47
|
+
continue
|
|
48
|
+
|
|
49
|
+
# Check project path filter
|
|
50
|
+
if project_path:
|
|
51
|
+
# Project dirs are hashed, so we'd need a mapping
|
|
52
|
+
# For now, skip this filter
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
# Find session files
|
|
56
|
+
for session_file in project_dir.glob("*.jsonl"):
|
|
57
|
+
try:
|
|
58
|
+
stat = session_file.stat()
|
|
59
|
+
mtime = datetime.fromtimestamp(stat.st_mtime)
|
|
60
|
+
|
|
61
|
+
# Date filters
|
|
62
|
+
if from_date:
|
|
63
|
+
from_dt = datetime.fromisoformat(from_date)
|
|
64
|
+
if mtime < from_dt:
|
|
65
|
+
continue
|
|
66
|
+
if to_date:
|
|
67
|
+
to_dt = datetime.fromisoformat(to_date)
|
|
68
|
+
if mtime > to_dt:
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
sessions.append({
|
|
72
|
+
"id": session_file.stem,
|
|
73
|
+
"path": str(session_file),
|
|
74
|
+
"project": project_dir.name,
|
|
75
|
+
"modified": mtime.isoformat(),
|
|
76
|
+
"size": stat.st_size,
|
|
77
|
+
})
|
|
78
|
+
except Exception:
|
|
79
|
+
continue
|
|
80
|
+
|
|
81
|
+
# Sort by modified time, newest first
|
|
82
|
+
sessions.sort(key=lambda s: s["modified"], reverse=True)
|
|
83
|
+
sessions = sessions[:limit]
|
|
84
|
+
|
|
85
|
+
if not sessions:
|
|
86
|
+
return "No sessions found"
|
|
87
|
+
|
|
88
|
+
lines = [f"Found {len(sessions)} sessions:\n"]
|
|
89
|
+
for s in sessions:
|
|
90
|
+
lines.append(f" {s['id'][:12]}... ({s['modified'][:10]})")
|
|
91
|
+
|
|
92
|
+
return "\n".join(lines)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def read_session(
|
|
96
|
+
session_id: str,
|
|
97
|
+
limit: int | None = None,
|
|
98
|
+
include_metadata: bool = False,
|
|
99
|
+
) -> str:
|
|
100
|
+
"""
|
|
101
|
+
Read messages from a session.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
session_id: Session ID (filename stem)
|
|
105
|
+
limit: Maximum messages to read
|
|
106
|
+
include_metadata: Include message metadata
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Formatted session content.
|
|
110
|
+
"""
|
|
111
|
+
sessions_dir = get_sessions_directory()
|
|
112
|
+
|
|
113
|
+
# Find session file
|
|
114
|
+
session_file = None
|
|
115
|
+
for project_dir in sessions_dir.iterdir():
|
|
116
|
+
if not project_dir.is_dir():
|
|
117
|
+
continue
|
|
118
|
+
candidate = project_dir / f"{session_id}.jsonl"
|
|
119
|
+
if candidate.exists():
|
|
120
|
+
session_file = candidate
|
|
121
|
+
break
|
|
122
|
+
# Also check partial matches
|
|
123
|
+
for f in project_dir.glob(f"{session_id}*.jsonl"):
|
|
124
|
+
session_file = f
|
|
125
|
+
break
|
|
126
|
+
|
|
127
|
+
if not session_file or not session_file.exists():
|
|
128
|
+
return f"Session not found: {session_id}"
|
|
129
|
+
|
|
130
|
+
messages = []
|
|
131
|
+
try:
|
|
132
|
+
with open(session_file) as f:
|
|
133
|
+
for line in f:
|
|
134
|
+
if line.strip():
|
|
135
|
+
try:
|
|
136
|
+
msg = json.loads(line)
|
|
137
|
+
messages.append(msg)
|
|
138
|
+
except json.JSONDecodeError:
|
|
139
|
+
continue
|
|
140
|
+
except Exception as e:
|
|
141
|
+
return f"Error reading session: {e}"
|
|
142
|
+
|
|
143
|
+
if limit and limit > 0:
|
|
144
|
+
messages = messages[:limit]
|
|
145
|
+
|
|
146
|
+
if not messages:
|
|
147
|
+
return "Session is empty"
|
|
148
|
+
|
|
149
|
+
lines = [f"Session: {session_id}\nMessages: {len(messages)}\n"]
|
|
150
|
+
|
|
151
|
+
for i, msg in enumerate(messages[:50]): # Limit display
|
|
152
|
+
role = msg.get("role", "unknown")
|
|
153
|
+
content = msg.get("content", "")
|
|
154
|
+
if isinstance(content, list):
|
|
155
|
+
content = " ".join(str(c.get("text", "")) for c in content if isinstance(c, dict))
|
|
156
|
+
content = content[:200] + "..." if len(content) > 200 else content
|
|
157
|
+
lines.append(f"[{i+1}] {role}: {content}")
|
|
158
|
+
|
|
159
|
+
if len(messages) > 50:
|
|
160
|
+
lines.append(f"\n... and {len(messages) - 50} more messages")
|
|
161
|
+
|
|
162
|
+
return "\n".join(lines)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def search_sessions(
|
|
166
|
+
query: str,
|
|
167
|
+
session_id: str | None = None,
|
|
168
|
+
case_sensitive: bool = False,
|
|
169
|
+
limit: int = 20,
|
|
170
|
+
) -> str:
|
|
171
|
+
"""
|
|
172
|
+
Search across session messages.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
query: Search query
|
|
176
|
+
session_id: Search in specific session only
|
|
177
|
+
case_sensitive: Case-sensitive search
|
|
178
|
+
limit: Maximum results
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Search results with context.
|
|
182
|
+
"""
|
|
183
|
+
sessions_dir = get_sessions_directory()
|
|
184
|
+
results = []
|
|
185
|
+
|
|
186
|
+
search_query = query if case_sensitive else query.lower()
|
|
187
|
+
|
|
188
|
+
# Find session files to search
|
|
189
|
+
session_files = []
|
|
190
|
+
for project_dir in sessions_dir.iterdir():
|
|
191
|
+
if not project_dir.is_dir():
|
|
192
|
+
continue
|
|
193
|
+
|
|
194
|
+
if session_id:
|
|
195
|
+
for f in project_dir.glob(f"{session_id}*.jsonl"):
|
|
196
|
+
session_files.append(f)
|
|
197
|
+
else:
|
|
198
|
+
session_files.extend(project_dir.glob("*.jsonl"))
|
|
199
|
+
|
|
200
|
+
for session_file in session_files[:50]: # Limit sessions to search
|
|
201
|
+
try:
|
|
202
|
+
with open(session_file) as f:
|
|
203
|
+
for line_num, line in enumerate(f):
|
|
204
|
+
if not line.strip():
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
check_line = line if case_sensitive else line.lower()
|
|
208
|
+
if search_query in check_line:
|
|
209
|
+
try:
|
|
210
|
+
msg = json.loads(line)
|
|
211
|
+
content = msg.get("content", "")
|
|
212
|
+
if isinstance(content, list):
|
|
213
|
+
content = " ".join(
|
|
214
|
+
str(c.get("text", "")) for c in content if isinstance(c, dict)
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
results.append({
|
|
218
|
+
"session": session_file.stem[:12],
|
|
219
|
+
"line": line_num,
|
|
220
|
+
"role": msg.get("role", "unknown"),
|
|
221
|
+
"snippet": content[:150],
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
if len(results) >= limit:
|
|
225
|
+
break
|
|
226
|
+
except json.JSONDecodeError:
|
|
227
|
+
continue
|
|
228
|
+
except Exception:
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
if len(results) >= limit:
|
|
232
|
+
break
|
|
233
|
+
|
|
234
|
+
if not results:
|
|
235
|
+
return f"No results for: {query}"
|
|
236
|
+
|
|
237
|
+
lines = [f"Found {len(results)} matches for '{query}':\n"]
|
|
238
|
+
for r in results:
|
|
239
|
+
lines.append(f" [{r['session']}] {r['role']}: {r['snippet']}...")
|
|
240
|
+
|
|
241
|
+
return "\n".join(lines)
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def get_session_info(session_id: str) -> str:
|
|
245
|
+
"""
|
|
246
|
+
Get metadata about a session.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
session_id: Session ID
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
Session metadata and statistics.
|
|
253
|
+
"""
|
|
254
|
+
sessions_dir = get_sessions_directory()
|
|
255
|
+
|
|
256
|
+
# Find session file
|
|
257
|
+
session_file = None
|
|
258
|
+
for project_dir in sessions_dir.iterdir():
|
|
259
|
+
if not project_dir.is_dir():
|
|
260
|
+
continue
|
|
261
|
+
for f in project_dir.glob(f"{session_id}*.jsonl"):
|
|
262
|
+
session_file = f
|
|
263
|
+
break
|
|
264
|
+
if session_file:
|
|
265
|
+
break
|
|
266
|
+
|
|
267
|
+
if not session_file or not session_file.exists():
|
|
268
|
+
return f"Session not found: {session_id}"
|
|
269
|
+
|
|
270
|
+
try:
|
|
271
|
+
stat = session_file.stat()
|
|
272
|
+
message_count = 0
|
|
273
|
+
user_count = 0
|
|
274
|
+
assistant_count = 0
|
|
275
|
+
|
|
276
|
+
with open(session_file) as f:
|
|
277
|
+
for line in f:
|
|
278
|
+
if line.strip():
|
|
279
|
+
try:
|
|
280
|
+
msg = json.loads(line)
|
|
281
|
+
message_count += 1
|
|
282
|
+
role = msg.get("role", "")
|
|
283
|
+
if role == "user":
|
|
284
|
+
user_count += 1
|
|
285
|
+
elif role == "assistant":
|
|
286
|
+
assistant_count += 1
|
|
287
|
+
except json.JSONDecodeError:
|
|
288
|
+
continue
|
|
289
|
+
|
|
290
|
+
lines = [
|
|
291
|
+
f"Session: {session_id}",
|
|
292
|
+
f"File: {session_file}",
|
|
293
|
+
f"Size: {stat.st_size / 1024:.1f} KB",
|
|
294
|
+
f"Modified: {datetime.fromtimestamp(stat.st_mtime).isoformat()}",
|
|
295
|
+
f"Messages: {message_count}",
|
|
296
|
+
f" User: {user_count}",
|
|
297
|
+
f" Assistant: {assistant_count}",
|
|
298
|
+
]
|
|
299
|
+
return "\n".join(lines)
|
|
300
|
+
|
|
301
|
+
except Exception as e:
|
|
302
|
+
return f"Error: {e}"
|