vibecore 0.2.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vibecore might be problematic. Click here for more details.
- vibecore/__init__.py +0 -0
- vibecore/agents/default.py +79 -0
- vibecore/agents/prompts.py +12 -0
- vibecore/agents/task_agent.py +66 -0
- vibecore/cli.py +131 -0
- vibecore/context.py +24 -0
- vibecore/handlers/__init__.py +5 -0
- vibecore/handlers/stream_handler.py +231 -0
- vibecore/main.py +506 -0
- vibecore/main.tcss +0 -0
- vibecore/mcp/__init__.py +6 -0
- vibecore/mcp/manager.py +167 -0
- vibecore/mcp/server_wrapper.py +109 -0
- vibecore/models/__init__.py +5 -0
- vibecore/models/anthropic.py +239 -0
- vibecore/prompts/common_system_prompt.txt +64 -0
- vibecore/py.typed +0 -0
- vibecore/session/__init__.py +5 -0
- vibecore/session/file_lock.py +127 -0
- vibecore/session/jsonl_session.py +236 -0
- vibecore/session/loader.py +193 -0
- vibecore/session/path_utils.py +81 -0
- vibecore/settings.py +161 -0
- vibecore/tools/__init__.py +1 -0
- vibecore/tools/base.py +27 -0
- vibecore/tools/file/__init__.py +5 -0
- vibecore/tools/file/executor.py +282 -0
- vibecore/tools/file/tools.py +184 -0
- vibecore/tools/file/utils.py +78 -0
- vibecore/tools/python/__init__.py +1 -0
- vibecore/tools/python/backends/__init__.py +1 -0
- vibecore/tools/python/backends/terminal_backend.py +58 -0
- vibecore/tools/python/helpers.py +80 -0
- vibecore/tools/python/manager.py +208 -0
- vibecore/tools/python/tools.py +27 -0
- vibecore/tools/shell/__init__.py +5 -0
- vibecore/tools/shell/executor.py +223 -0
- vibecore/tools/shell/tools.py +156 -0
- vibecore/tools/task/__init__.py +5 -0
- vibecore/tools/task/executor.py +51 -0
- vibecore/tools/task/tools.py +51 -0
- vibecore/tools/todo/__init__.py +1 -0
- vibecore/tools/todo/manager.py +31 -0
- vibecore/tools/todo/models.py +36 -0
- vibecore/tools/todo/tools.py +111 -0
- vibecore/utils/__init__.py +5 -0
- vibecore/utils/text.py +28 -0
- vibecore/widgets/core.py +332 -0
- vibecore/widgets/core.tcss +63 -0
- vibecore/widgets/expandable.py +121 -0
- vibecore/widgets/expandable.tcss +69 -0
- vibecore/widgets/info.py +25 -0
- vibecore/widgets/info.tcss +17 -0
- vibecore/widgets/messages.py +232 -0
- vibecore/widgets/messages.tcss +85 -0
- vibecore/widgets/tool_message_factory.py +121 -0
- vibecore/widgets/tool_messages.py +483 -0
- vibecore/widgets/tool_messages.tcss +289 -0
- vibecore-0.2.0a1.dist-info/METADATA +407 -0
- vibecore-0.2.0a1.dist-info/RECORD +63 -0
- vibecore-0.2.0a1.dist-info/WHEEL +4 -0
- vibecore-0.2.0a1.dist-info/entry_points.txt +2 -0
- vibecore-0.2.0a1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""JSONL-based session storage implementation for openai-agents SDK."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from openai.types.responses import ResponseInputItemParam as TResponseInputItem
|
|
10
|
+
|
|
11
|
+
from .file_lock import acquire_file_lock, cleanup_file_lock
|
|
12
|
+
from .path_utils import get_session_file_path
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class JSONLSession:
|
|
18
|
+
"""JSONL-based implementation of the agents.Session protocol.
|
|
19
|
+
|
|
20
|
+
Stores conversation history in JSON Lines format, with one JSON object
|
|
21
|
+
per line. This provides human-readable storage and efficient append operations.
|
|
22
|
+
|
|
23
|
+
The session files are stored at:
|
|
24
|
+
{base_dir}/projects/{canonicalized_project_path}/{session_id}.jsonl
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
session_id: str,
|
|
30
|
+
project_path: str | Path | None = None,
|
|
31
|
+
base_dir: str | Path | None = None,
|
|
32
|
+
):
|
|
33
|
+
"""Initialize the JSONL session.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
session_id: Unique identifier for the session
|
|
37
|
+
project_path: Project path to canonicalize (defaults to cwd)
|
|
38
|
+
base_dir: Base directory for sessions (defaults to ~/.vibecore)
|
|
39
|
+
"""
|
|
40
|
+
self.session_id = session_id
|
|
41
|
+
|
|
42
|
+
# Set default project path to current working directory
|
|
43
|
+
if project_path is None:
|
|
44
|
+
self.project_path = Path.cwd()
|
|
45
|
+
else:
|
|
46
|
+
self.project_path = Path(project_path)
|
|
47
|
+
|
|
48
|
+
# Set default base directory
|
|
49
|
+
if base_dir is None:
|
|
50
|
+
self.base_dir = Path.home() / ".vibecore"
|
|
51
|
+
else:
|
|
52
|
+
self.base_dir = Path(base_dir)
|
|
53
|
+
|
|
54
|
+
# Get the full path to the session file
|
|
55
|
+
self.file_path = get_session_file_path(
|
|
56
|
+
self.session_id,
|
|
57
|
+
self.project_path,
|
|
58
|
+
self.base_dir,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Ensure the parent directory exists
|
|
62
|
+
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
63
|
+
|
|
64
|
+
logger.debug(f"Initialized JSONLSession for {self.session_id} at {self.file_path}")
|
|
65
|
+
|
|
66
|
+
async def get_items(self, limit: int | None = None) -> list["TResponseInputItem"]:
|
|
67
|
+
"""Retrieve the conversation history for this session.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
limit: Maximum number of items to retrieve. If None, retrieves all items.
|
|
71
|
+
When specified, returns the latest N items in chronological order.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
List of input items representing the conversation history
|
|
75
|
+
"""
|
|
76
|
+
# If file doesn't exist, return empty list
|
|
77
|
+
if not self.file_path.exists():
|
|
78
|
+
return []
|
|
79
|
+
|
|
80
|
+
items: list[TResponseInputItem] = []
|
|
81
|
+
|
|
82
|
+
async with acquire_file_lock(self.file_path, exclusive=False):
|
|
83
|
+
try:
|
|
84
|
+
if limit is None:
|
|
85
|
+
# Read entire file sequentially
|
|
86
|
+
with open(self.file_path, encoding="utf-8") as f:
|
|
87
|
+
for line in f:
|
|
88
|
+
line = line.strip()
|
|
89
|
+
if not line:
|
|
90
|
+
continue
|
|
91
|
+
try:
|
|
92
|
+
item = json.loads(line)
|
|
93
|
+
items.append(item)
|
|
94
|
+
except json.JSONDecodeError as e:
|
|
95
|
+
logger.warning(f"Skipping invalid JSON line in {self.file_path}: {e}")
|
|
96
|
+
else:
|
|
97
|
+
# Read file backwards to get last N items efficiently
|
|
98
|
+
# For now, read all and slice (optimize in Phase 2)
|
|
99
|
+
all_items = []
|
|
100
|
+
with open(self.file_path, encoding="utf-8") as f:
|
|
101
|
+
for line in f:
|
|
102
|
+
line = line.strip()
|
|
103
|
+
if not line:
|
|
104
|
+
continue
|
|
105
|
+
try:
|
|
106
|
+
item = json.loads(line)
|
|
107
|
+
all_items.append(item)
|
|
108
|
+
except json.JSONDecodeError as e:
|
|
109
|
+
logger.warning(f"Skipping invalid JSON line in {self.file_path}: {e}")
|
|
110
|
+
|
|
111
|
+
# Return the last N items
|
|
112
|
+
items = all_items[-limit:] if len(all_items) > limit else all_items
|
|
113
|
+
|
|
114
|
+
except FileNotFoundError:
|
|
115
|
+
# File was deleted between existence check and read
|
|
116
|
+
return []
|
|
117
|
+
except Exception as e:
|
|
118
|
+
logger.error(f"Error reading session file {self.file_path}: {e}")
|
|
119
|
+
raise
|
|
120
|
+
|
|
121
|
+
return items
|
|
122
|
+
|
|
123
|
+
async def add_items(self, items: list["TResponseInputItem"]) -> None:
|
|
124
|
+
"""Add new items to the conversation history.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
items: List of input items to add to the history
|
|
128
|
+
"""
|
|
129
|
+
if not items:
|
|
130
|
+
return
|
|
131
|
+
|
|
132
|
+
# Ensure parent directory exists
|
|
133
|
+
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
134
|
+
|
|
135
|
+
async with acquire_file_lock(self.file_path, exclusive=True):
|
|
136
|
+
try:
|
|
137
|
+
# Open file in append mode
|
|
138
|
+
with open(self.file_path, "a", encoding="utf-8") as f:
|
|
139
|
+
for item in items:
|
|
140
|
+
# Write each item as a JSON line
|
|
141
|
+
json_line = json.dumps(item, ensure_ascii=False, separators=(",", ":"))
|
|
142
|
+
f.write(json_line + "\n")
|
|
143
|
+
|
|
144
|
+
# Ensure data is written to disk
|
|
145
|
+
f.flush()
|
|
146
|
+
|
|
147
|
+
logger.debug(f"Added {len(items)} items to session {self.session_id}")
|
|
148
|
+
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.error(f"Error adding items to session file {self.file_path}: {e}")
|
|
151
|
+
raise
|
|
152
|
+
|
|
153
|
+
async def pop_item(self) -> "TResponseInputItem | None":
|
|
154
|
+
"""Remove and return the most recent item from the session.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
The most recent item if it exists, None if the session is empty
|
|
158
|
+
"""
|
|
159
|
+
if not self.file_path.exists():
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
async with acquire_file_lock(self.file_path, exclusive=True):
|
|
163
|
+
try:
|
|
164
|
+
# Read all lines
|
|
165
|
+
with open(self.file_path, encoding="utf-8") as f:
|
|
166
|
+
lines = f.readlines()
|
|
167
|
+
|
|
168
|
+
if not lines:
|
|
169
|
+
return None
|
|
170
|
+
|
|
171
|
+
# Find the last non-empty line
|
|
172
|
+
last_item = None
|
|
173
|
+
last_item_index = -1
|
|
174
|
+
|
|
175
|
+
for i in range(len(lines) - 1, -1, -1):
|
|
176
|
+
line = lines[i].strip()
|
|
177
|
+
if line:
|
|
178
|
+
try:
|
|
179
|
+
last_item = json.loads(line)
|
|
180
|
+
last_item_index = i
|
|
181
|
+
break
|
|
182
|
+
except json.JSONDecodeError as e:
|
|
183
|
+
logger.warning(f"Skipping invalid JSON line in {self.file_path}: {e}")
|
|
184
|
+
|
|
185
|
+
if last_item is None:
|
|
186
|
+
return None
|
|
187
|
+
|
|
188
|
+
# Remove the last item and write back the rest
|
|
189
|
+
remaining_lines = lines[:last_item_index]
|
|
190
|
+
|
|
191
|
+
# Write atomically by writing to a temp file and renaming
|
|
192
|
+
temp_file = self.file_path.with_suffix(".tmp")
|
|
193
|
+
try:
|
|
194
|
+
with open(temp_file, "w", encoding="utf-8") as f:
|
|
195
|
+
f.writelines(remaining_lines)
|
|
196
|
+
|
|
197
|
+
# Atomically replace the original file
|
|
198
|
+
temp_file.replace(self.file_path)
|
|
199
|
+
|
|
200
|
+
except Exception:
|
|
201
|
+
# Clean up temp file if something went wrong
|
|
202
|
+
if temp_file.exists():
|
|
203
|
+
temp_file.unlink()
|
|
204
|
+
raise
|
|
205
|
+
|
|
206
|
+
logger.debug(f"Popped item from session {self.session_id}")
|
|
207
|
+
return last_item
|
|
208
|
+
|
|
209
|
+
except FileNotFoundError:
|
|
210
|
+
# File was deleted between existence check and read
|
|
211
|
+
return None
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.error(f"Error popping item from session file {self.file_path}: {e}")
|
|
214
|
+
raise
|
|
215
|
+
|
|
216
|
+
async def clear_session(self) -> None:
|
|
217
|
+
"""Clear all items for this session."""
|
|
218
|
+
if not self.file_path.exists():
|
|
219
|
+
return
|
|
220
|
+
|
|
221
|
+
async with acquire_file_lock(self.file_path, exclusive=True):
|
|
222
|
+
try:
|
|
223
|
+
# Delete the file
|
|
224
|
+
self.file_path.unlink()
|
|
225
|
+
|
|
226
|
+
# Clean up the lock for this file
|
|
227
|
+
cleanup_file_lock(self.file_path)
|
|
228
|
+
|
|
229
|
+
logger.debug(f"Cleared session {self.session_id}")
|
|
230
|
+
|
|
231
|
+
except FileNotFoundError:
|
|
232
|
+
# File was already deleted
|
|
233
|
+
pass
|
|
234
|
+
except Exception as e:
|
|
235
|
+
logger.error(f"Error clearing session file {self.file_path}: {e}")
|
|
236
|
+
raise
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
"""Session loading functionality for vibecore."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from openai.types.responses import (
|
|
6
|
+
ResponseFunctionToolCall,
|
|
7
|
+
ResponseInputItemParam,
|
|
8
|
+
ResponseOutputItem,
|
|
9
|
+
ResponseOutputMessage,
|
|
10
|
+
ResponseReasoningItem,
|
|
11
|
+
)
|
|
12
|
+
from pydantic import TypeAdapter
|
|
13
|
+
from textual import log
|
|
14
|
+
|
|
15
|
+
from vibecore.session.jsonl_session import JSONLSession
|
|
16
|
+
from vibecore.utils.text import TextExtractor
|
|
17
|
+
from vibecore.widgets.messages import (
|
|
18
|
+
AgentMessage,
|
|
19
|
+
BaseMessage,
|
|
20
|
+
MessageStatus,
|
|
21
|
+
ReasoningMessage,
|
|
22
|
+
UserMessage,
|
|
23
|
+
)
|
|
24
|
+
from vibecore.widgets.tool_message_factory import create_tool_message
|
|
25
|
+
from vibecore.widgets.tool_messages import BaseToolMessage
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SessionLoader:
|
|
29
|
+
"""Loads and parses session history into message widgets."""
|
|
30
|
+
|
|
31
|
+
def __init__(self, session: JSONLSession):
|
|
32
|
+
"""Initialize SessionLoader with a session.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
session: The JSONL session to load from
|
|
36
|
+
"""
|
|
37
|
+
self.session = session
|
|
38
|
+
self.adapter = TypeAdapter(ResponseOutputItem)
|
|
39
|
+
self.tool_calls_pending: dict[str, tuple[str, str]] = {}
|
|
40
|
+
|
|
41
|
+
async def load_history(self) -> list[BaseMessage]:
|
|
42
|
+
"""Load all session items and convert to message widgets.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
List of message widgets from the session history
|
|
46
|
+
|
|
47
|
+
Raises:
|
|
48
|
+
RuntimeError: If there are pending tool calls without outputs
|
|
49
|
+
"""
|
|
50
|
+
session_items = await self.session.get_items()
|
|
51
|
+
messages = []
|
|
52
|
+
|
|
53
|
+
for item in session_items:
|
|
54
|
+
if message := self.parse_session_item(item):
|
|
55
|
+
messages.append(message)
|
|
56
|
+
|
|
57
|
+
self._validate_no_pending_calls()
|
|
58
|
+
return messages
|
|
59
|
+
|
|
60
|
+
def parse_session_item(self, item: ResponseInputItemParam) -> BaseMessage | None:
|
|
61
|
+
"""Parse a single session item into a message widget.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
item: Raw session item from the session
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
A message widget or None if item cannot be parsed
|
|
68
|
+
"""
|
|
69
|
+
# Try to parse as output item first
|
|
70
|
+
if output_item := self._parse_output_item(item):
|
|
71
|
+
return output_item
|
|
72
|
+
|
|
73
|
+
# Otherwise try to parse as input item
|
|
74
|
+
return self._parse_input_item(item)
|
|
75
|
+
|
|
76
|
+
def _parse_output_item(self, item: Any) -> BaseMessage | None:
|
|
77
|
+
"""Try to parse item as ResponseOutputItem.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
item: Raw item dict
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
A message widget or None if not a valid output item
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
output_item = self.adapter.validate_python(item)
|
|
87
|
+
|
|
88
|
+
match output_item:
|
|
89
|
+
case ResponseOutputMessage(role="user", content=content):
|
|
90
|
+
# User message
|
|
91
|
+
text_content = TextExtractor.extract_from_content(content)
|
|
92
|
+
return UserMessage(text_content)
|
|
93
|
+
|
|
94
|
+
case ResponseReasoningItem(summary=summary):
|
|
95
|
+
# assert len(summary) == 1, "Summary must contain exactly one item"
|
|
96
|
+
summary_merged = "\n\n".join(item.text for item in summary) if summary else "Thinking..."
|
|
97
|
+
return ReasoningMessage(summary_merged, status=MessageStatus.IDLE)
|
|
98
|
+
|
|
99
|
+
case ResponseOutputMessage(role="assistant", content=content):
|
|
100
|
+
# Handle assistant messages
|
|
101
|
+
text_content = TextExtractor.extract_from_content(content)
|
|
102
|
+
if text_content:
|
|
103
|
+
# If agent decides to immediately tool call, we often have no text content
|
|
104
|
+
return AgentMessage(text_content, status=MessageStatus.IDLE)
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
case ResponseFunctionToolCall(call_id=call_id, name=name, arguments=arguments) if call_id:
|
|
108
|
+
log(f"Tool call: {name} with arguments: {arguments}")
|
|
109
|
+
# Tool call - store for matching with output
|
|
110
|
+
self._handle_tool_call(call_id, name, str(arguments))
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
case _:
|
|
114
|
+
# Log unknown output item types for debugging
|
|
115
|
+
log(f"Unknown output item type: {type(output_item).__name__}")
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
except Exception:
|
|
119
|
+
# Not a valid output item
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
def _parse_input_item(self, item: Any) -> BaseMessage | None:
|
|
123
|
+
"""Parse items that are input-only (not valid output items).
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
item: Raw item
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
A message widget or None if item cannot be parsed
|
|
130
|
+
"""
|
|
131
|
+
if not isinstance(item, dict):
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
match item:
|
|
135
|
+
case {"role": "user", "content": content}:
|
|
136
|
+
# User message input (EasyInputMessageParam is not convertible to ResponseOutputMessage)
|
|
137
|
+
text_content = str(content) if isinstance(content, list) else content
|
|
138
|
+
return UserMessage(text_content)
|
|
139
|
+
|
|
140
|
+
case {"type": "function_call_output", "call_id": call_id, "output": output}:
|
|
141
|
+
# Tool output - check if we have a pending call
|
|
142
|
+
return self._create_tool_message(call_id, output)
|
|
143
|
+
|
|
144
|
+
case _:
|
|
145
|
+
# Log unhandled input items
|
|
146
|
+
log(f"Unhandled session item: {item}")
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
def _handle_tool_call(self, call_id: str, name: str, arguments: str) -> None:
|
|
150
|
+
"""Track pending tool calls for matching with outputs.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
call_id: The tool call ID
|
|
154
|
+
name: The tool name
|
|
155
|
+
arguments: The tool arguments as a string
|
|
156
|
+
"""
|
|
157
|
+
self.tool_calls_pending[call_id] = (name, arguments)
|
|
158
|
+
|
|
159
|
+
def _create_tool_message(self, call_id: str, output: str) -> BaseToolMessage | None:
|
|
160
|
+
"""Create tool message by matching call_id with pending calls.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
call_id: The tool call ID
|
|
164
|
+
output: The tool output
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
A BaseToolMessage widget, or None if no matching call found
|
|
168
|
+
"""
|
|
169
|
+
if not call_id or call_id not in self.tool_calls_pending:
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
tool_name, command = self.tool_calls_pending.pop(call_id)
|
|
173
|
+
|
|
174
|
+
# Determine status based on output
|
|
175
|
+
output_str = str(output) if output else ""
|
|
176
|
+
status = MessageStatus.SUCCESS
|
|
177
|
+
|
|
178
|
+
# Use factory to create the appropriate tool message with output
|
|
179
|
+
return create_tool_message(
|
|
180
|
+
tool_name=tool_name,
|
|
181
|
+
arguments=command,
|
|
182
|
+
output=output_str,
|
|
183
|
+
status=status,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
def _validate_no_pending_calls(self) -> None:
|
|
187
|
+
"""Validate that there are no pending tool calls without outputs.
|
|
188
|
+
|
|
189
|
+
Raises:
|
|
190
|
+
RuntimeError: If there are pending tool calls
|
|
191
|
+
"""
|
|
192
|
+
if self.tool_calls_pending:
|
|
193
|
+
raise RuntimeError(f"Pending tool calls without outputs found: {self.tool_calls_pending}")
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"""Path utilities for session file management."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def canonicalize_path(path: Path) -> str:
|
|
7
|
+
"""Convert a path to a safe directory name.
|
|
8
|
+
|
|
9
|
+
Converts absolute paths to a safe format suitable for use as a directory name.
|
|
10
|
+
Replaces path separators with hyphens to create a flat namespace.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
path: The path to canonicalize
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
A canonicalized string safe for use as a directory name
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
>>> canonicalize_path(Path("/Users/serialx/workspace/vibecore"))
|
|
20
|
+
'-Users-serialx-workspace-vibecore'
|
|
21
|
+
"""
|
|
22
|
+
# Resolve to absolute path and convert to string
|
|
23
|
+
absolute_path = path.resolve()
|
|
24
|
+
path_str = str(absolute_path)
|
|
25
|
+
|
|
26
|
+
# Replace path separators with hyphens
|
|
27
|
+
# This creates a flat namespace while preserving path uniqueness
|
|
28
|
+
canonicalized = path_str.replace("/", "-")
|
|
29
|
+
|
|
30
|
+
# Handle Windows paths (replace backslashes and colons)
|
|
31
|
+
canonicalized = canonicalized.replace("\\", "-")
|
|
32
|
+
canonicalized = canonicalized.replace(":", "")
|
|
33
|
+
|
|
34
|
+
# Remove any leading/trailing hyphens that might occur
|
|
35
|
+
canonicalized = canonicalized.strip("-")
|
|
36
|
+
|
|
37
|
+
# Ensure we always have a non-empty result
|
|
38
|
+
if not canonicalized:
|
|
39
|
+
canonicalized = "root"
|
|
40
|
+
|
|
41
|
+
return canonicalized
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_session_file_path(
|
|
45
|
+
session_id: str,
|
|
46
|
+
project_path: Path,
|
|
47
|
+
base_dir: Path,
|
|
48
|
+
) -> Path:
|
|
49
|
+
"""Construct the full path to a session file.
|
|
50
|
+
|
|
51
|
+
Creates a path structure like:
|
|
52
|
+
{base_dir}/projects/{canonicalized_project_path}/{session_id}.jsonl
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
session_id: Unique identifier for the session
|
|
56
|
+
project_path: Project path to canonicalize
|
|
57
|
+
base_dir: Base directory for sessions (e.g., ~/.vibecore)
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Full path to the session file
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
>>> get_session_file_path(
|
|
64
|
+
... "chat-2024-01-15",
|
|
65
|
+
... Path("/Users/serialx/workspace/vibecore"),
|
|
66
|
+
... Path.home() / ".vibecore"
|
|
67
|
+
... )
|
|
68
|
+
PosixPath('/Users/serialx/.vibecore/projects/-Users-serialx-workspace-vibecore/chat-2024-01-15.jsonl')
|
|
69
|
+
"""
|
|
70
|
+
# Validate session_id to prevent directory traversal
|
|
71
|
+
if "/" in session_id or "\\" in session_id or ".." in session_id:
|
|
72
|
+
raise ValueError(f"Invalid session_id: {session_id}")
|
|
73
|
+
|
|
74
|
+
# Canonicalize the project path
|
|
75
|
+
canonicalized_project = canonicalize_path(project_path)
|
|
76
|
+
|
|
77
|
+
# Build the full path
|
|
78
|
+
session_dir = base_dir / "projects" / canonicalized_project
|
|
79
|
+
session_file = session_dir / f"{session_id}.jsonl"
|
|
80
|
+
|
|
81
|
+
return session_file
|
vibecore/settings.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""Settings configuration for Vibecore application."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Literal
|
|
6
|
+
|
|
7
|
+
from agents import Model, OpenAIChatCompletionsModel
|
|
8
|
+
from agents.models.multi_provider import MultiProvider
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
|
+
from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, SettingsConfigDict, YamlConfigSettingsSource
|
|
11
|
+
|
|
12
|
+
from vibecore.models import AnthropicModel
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class SessionSettings(BaseModel):
|
|
16
|
+
"""Configuration for session storage."""
|
|
17
|
+
|
|
18
|
+
storage_type: Literal["jsonl", "sqlite"] = Field(
|
|
19
|
+
default="jsonl",
|
|
20
|
+
description="Type of storage backend for sessions",
|
|
21
|
+
)
|
|
22
|
+
base_dir: Path = Field(
|
|
23
|
+
default=Path.home() / ".vibecore",
|
|
24
|
+
description="Base directory for session storage",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class MCPServerConfig(BaseModel):
|
|
29
|
+
"""Configuration for an MCP server."""
|
|
30
|
+
|
|
31
|
+
name: str = Field(
|
|
32
|
+
description="Unique name for this MCP server",
|
|
33
|
+
)
|
|
34
|
+
type: Literal["stdio", "sse", "http"] = Field(
|
|
35
|
+
description="Type of MCP server connection",
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# For stdio servers
|
|
39
|
+
command: str | None = Field(
|
|
40
|
+
default=None,
|
|
41
|
+
description="Command to run for stdio servers (e.g., 'node /path/to/server.js')",
|
|
42
|
+
)
|
|
43
|
+
args: list[str] = Field(
|
|
44
|
+
default_factory=list,
|
|
45
|
+
description="Arguments for the stdio command",
|
|
46
|
+
)
|
|
47
|
+
env: dict[str, str] = Field(
|
|
48
|
+
default_factory=dict,
|
|
49
|
+
description="Environment variables for stdio servers",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# For SSE/HTTP servers
|
|
53
|
+
url: str | None = Field(
|
|
54
|
+
default=None,
|
|
55
|
+
description="URL for SSE or HTTP servers",
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Tool filtering
|
|
59
|
+
allowed_tools: list[str] | None = Field(
|
|
60
|
+
default=None,
|
|
61
|
+
description="List of allowed tool names (whitelist)",
|
|
62
|
+
)
|
|
63
|
+
blocked_tools: list[str] | None = Field(
|
|
64
|
+
default=None,
|
|
65
|
+
description="List of blocked tool names (blacklist)",
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Other options
|
|
69
|
+
cache_tools: bool = Field(
|
|
70
|
+
default=True,
|
|
71
|
+
description="Whether to cache the tool list",
|
|
72
|
+
)
|
|
73
|
+
timeout_seconds: float | None = Field(
|
|
74
|
+
default=30.0,
|
|
75
|
+
description="Timeout for server operations",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class Settings(BaseSettings):
|
|
80
|
+
"""Application settings with environment variable support."""
|
|
81
|
+
|
|
82
|
+
model_config = SettingsConfigDict(
|
|
83
|
+
env_file=".env",
|
|
84
|
+
env_file_encoding="utf-8",
|
|
85
|
+
env_prefix="VIBECORE_",
|
|
86
|
+
yaml_file=["config.yaml"],
|
|
87
|
+
yaml_file_encoding="utf-8",
|
|
88
|
+
case_sensitive=False,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Model configuration
|
|
92
|
+
default_model: str = Field(
|
|
93
|
+
# default="o3",
|
|
94
|
+
# default="gpt-4.1",
|
|
95
|
+
# default="qwen3-30b-a3b-mlx@8bit",
|
|
96
|
+
# default="mistralai/devstral-small-2507",
|
|
97
|
+
default="anthropic/claude-sonnet-4-20250514",
|
|
98
|
+
# default="anthropic/claude-3-5-haiku-20241022",
|
|
99
|
+
# default="litellm/deepseek/deepseek-chat",
|
|
100
|
+
description="Default model to use for agents (e.g., 'gpt-4.1', 'o3-mini', 'anthropic/claude-sonnet-4')",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Agent configuration
|
|
104
|
+
max_turns: int = Field(
|
|
105
|
+
default=200,
|
|
106
|
+
description="Maximum number of turns for agent conversation",
|
|
107
|
+
)
|
|
108
|
+
reasoning_effort: Literal["minimal", "low", "medium", "high"] | None = Field(
|
|
109
|
+
default=None,
|
|
110
|
+
description="Default reasoning effort level for agents (null, 'minimal', 'low', 'medium', 'high')",
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Session configuration
|
|
114
|
+
session: SessionSettings = Field(
|
|
115
|
+
default_factory=SessionSettings,
|
|
116
|
+
description="Session storage configuration",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# MCP server configuration
|
|
120
|
+
mcp_servers: list[MCPServerConfig] = Field(
|
|
121
|
+
default_factory=list,
|
|
122
|
+
description="List of MCP servers to connect to",
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
@property
|
|
126
|
+
def model(self) -> str | Model:
|
|
127
|
+
"""Get the configured model.
|
|
128
|
+
|
|
129
|
+
Returns an AnthropicModel instance if the model name starts with 'anthropic/',
|
|
130
|
+
returns a OpenAIChatCompletionsModel instance if there is a custom base URL set,
|
|
131
|
+
otherwise returns the model name as a plain string (for OpenAI/LiteLLM models).
|
|
132
|
+
"""
|
|
133
|
+
custom_base = "OPENAI_BASE_URL" in os.environ
|
|
134
|
+
if self.default_model.startswith("anthropic/"):
|
|
135
|
+
return AnthropicModel(self.default_model)
|
|
136
|
+
elif custom_base:
|
|
137
|
+
openai_provider = MultiProvider().openai_provider
|
|
138
|
+
return OpenAIChatCompletionsModel(self.default_model, openai_provider._get_client())
|
|
139
|
+
return self.default_model
|
|
140
|
+
|
|
141
|
+
@classmethod
|
|
142
|
+
def settings_customise_sources(
|
|
143
|
+
cls,
|
|
144
|
+
settings_cls: type[BaseSettings],
|
|
145
|
+
init_settings: PydanticBaseSettingsSource,
|
|
146
|
+
env_settings: PydanticBaseSettingsSource,
|
|
147
|
+
dotenv_settings: PydanticBaseSettingsSource,
|
|
148
|
+
file_secret_settings: PydanticBaseSettingsSource,
|
|
149
|
+
) -> tuple[PydanticBaseSettingsSource, ...]:
|
|
150
|
+
"""Configure settings sources to include YAML support."""
|
|
151
|
+
return (
|
|
152
|
+
init_settings,
|
|
153
|
+
env_settings,
|
|
154
|
+
dotenv_settings,
|
|
155
|
+
YamlConfigSettingsSource(settings_cls),
|
|
156
|
+
file_secret_settings,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
# Create a singleton settings instance
|
|
161
|
+
settings = Settings()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Tools for the Vibecore agents."""
|
vibecore/tools/base.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Base utilities for tools."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Protocol
|
|
4
|
+
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ToolRenderer(Protocol):
|
|
9
|
+
"""Protocol for tool-specific renderers."""
|
|
10
|
+
|
|
11
|
+
def render_call(self, tool_name: str, args: dict[str, Any]) -> None:
|
|
12
|
+
"""Render a tool call."""
|
|
13
|
+
...
|
|
14
|
+
|
|
15
|
+
def render_output(self, tool_name: str, output: Any) -> None:
|
|
16
|
+
"""Render tool output."""
|
|
17
|
+
...
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def create_console() -> Console:
|
|
21
|
+
"""Create a configured console instance."""
|
|
22
|
+
return Console()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def render_error(console: Console, error: str) -> None:
|
|
26
|
+
"""Render an error message."""
|
|
27
|
+
console.print(f"[red]Error:[/red] {error}")
|