vibecore 0.3.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vibecore/agents/default.py +3 -3
- vibecore/agents/task.py +3 -3
- vibecore/cli.py +67 -43
- vibecore/context.py +74 -11
- vibecore/flow.py +335 -73
- vibecore/handlers/stream_handler.py +35 -56
- vibecore/main.py +70 -272
- vibecore/session/jsonl_session.py +3 -1
- vibecore/session/loader.py +2 -2
- vibecore/settings.py +48 -1
- vibecore/tools/file/executor.py +59 -13
- vibecore/tools/file/tools.py +9 -9
- vibecore/tools/path_validator.py +251 -0
- vibecore/tools/python/helpers.py +2 -2
- vibecore/tools/python/tools.py +2 -2
- vibecore/tools/shell/executor.py +63 -7
- vibecore/tools/shell/tools.py +9 -9
- vibecore/tools/task/executor.py +2 -2
- vibecore/tools/task/tools.py +2 -2
- vibecore/tools/todo/manager.py +2 -10
- vibecore/tools/todo/models.py +5 -14
- vibecore/tools/todo/tools.py +5 -5
- vibecore/tools/webfetch/tools.py +1 -4
- vibecore/tools/websearch/ddgs/backend.py +1 -1
- vibecore/tools/websearch/tools.py +1 -4
- vibecore/widgets/core.py +3 -17
- vibecore/widgets/feedback.py +164 -0
- vibecore/widgets/feedback.tcss +121 -0
- vibecore/widgets/messages.py +22 -2
- vibecore/widgets/messages.tcss +28 -0
- vibecore/widgets/tool_messages.py +19 -4
- vibecore/widgets/tool_messages.tcss +23 -0
- {vibecore-0.3.0.dist-info → vibecore-0.6.2.dist-info}/METADATA +122 -29
- {vibecore-0.3.0.dist-info → vibecore-0.6.2.dist-info}/RECORD +37 -34
- {vibecore-0.3.0.dist-info → vibecore-0.6.2.dist-info}/WHEEL +0 -0
- {vibecore-0.3.0.dist-info → vibecore-0.6.2.dist-info}/entry_points.txt +0 -0
- {vibecore-0.3.0.dist-info → vibecore-0.6.2.dist-info}/licenses/LICENSE +0 -0
vibecore/flow.py
CHANGED
|
@@ -1,105 +1,367 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import contextlib
|
|
3
|
+
import datetime
|
|
4
|
+
import sys
|
|
2
5
|
import threading
|
|
3
6
|
from collections.abc import Callable, Coroutine
|
|
4
|
-
from typing import
|
|
7
|
+
from typing import Any, Concatenate, Generic, TypeAlias, cast, overload
|
|
5
8
|
|
|
6
|
-
from agents import
|
|
9
|
+
from agents import (
|
|
10
|
+
Agent,
|
|
11
|
+
RunConfig,
|
|
12
|
+
RunHooks,
|
|
13
|
+
Runner,
|
|
14
|
+
Session,
|
|
15
|
+
TResponseInputItem,
|
|
16
|
+
)
|
|
17
|
+
from agents.result import RunResultBase
|
|
18
|
+
from agents.run import DEFAULT_MAX_TURNS
|
|
7
19
|
from textual.pilot import Pilot
|
|
20
|
+
from typing_extensions import TypeVar
|
|
8
21
|
|
|
9
|
-
from vibecore.context import
|
|
22
|
+
from vibecore.context import AppAwareContext
|
|
10
23
|
from vibecore.main import AppIsExiting, VibecoreApp
|
|
24
|
+
from vibecore.session import JSONLSession
|
|
25
|
+
from vibecore.settings import settings
|
|
11
26
|
from vibecore.widgets.core import MyTextArea
|
|
12
27
|
from vibecore.widgets.messages import SystemMessage
|
|
13
28
|
|
|
14
29
|
|
|
15
|
-
class
|
|
16
|
-
"""
|
|
30
|
+
class NoUserInputLeft(Exception):
|
|
31
|
+
"""Raised when no more user inputs are available in static runner."""
|
|
17
32
|
|
|
18
|
-
|
|
19
|
-
"""Get user input with optional prompt message.
|
|
33
|
+
pass
|
|
20
34
|
|
|
21
|
-
Args:
|
|
22
|
-
prompt: Optional prompt to display before getting input.
|
|
23
35
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
36
|
+
TContext = TypeVar("TContext", default=None)
|
|
37
|
+
TWorkflowReturn = TypeVar("TWorkflowReturn", default=RunResultBase)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class VibecoreRunner(Generic[TContext, TWorkflowReturn]):
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
vibecore: "Vibecore[TContext, TWorkflowReturn]",
|
|
44
|
+
context: TContext | None = None,
|
|
45
|
+
session: Session | None = None,
|
|
46
|
+
) -> None:
|
|
47
|
+
self.vibecore = vibecore
|
|
48
|
+
self.context = context
|
|
49
|
+
|
|
50
|
+
if session is None:
|
|
51
|
+
session_id = f"chat-{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
52
|
+
self._session = JSONLSession(
|
|
53
|
+
session_id=session_id,
|
|
54
|
+
project_path=None, # Will use current working directory
|
|
55
|
+
base_dir=settings.session.base_dir,
|
|
56
|
+
)
|
|
57
|
+
else:
|
|
58
|
+
self._session = session
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def session(self) -> Session:
|
|
62
|
+
return self._session
|
|
63
|
+
|
|
64
|
+
async def print(self, message: str) -> None:
|
|
65
|
+
print(message, file=sys.stderr)
|
|
66
|
+
|
|
67
|
+
async def run_agent(
|
|
68
|
+
self,
|
|
69
|
+
starting_agent: Agent[TContext],
|
|
70
|
+
input: str | list[TResponseInputItem],
|
|
71
|
+
*,
|
|
72
|
+
context: TContext | None = None,
|
|
73
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
74
|
+
hooks: RunHooks[TContext] | None = None,
|
|
75
|
+
run_config: RunConfig | None = None,
|
|
76
|
+
previous_response_id: str | None = None,
|
|
77
|
+
session: Session | None = None,
|
|
78
|
+
) -> RunResultBase:
|
|
79
|
+
result = await Runner.run(
|
|
80
|
+
starting_agent=starting_agent,
|
|
81
|
+
input=input, # Pass string directly when using session
|
|
82
|
+
context=context,
|
|
83
|
+
max_turns=max_turns,
|
|
84
|
+
hooks=hooks,
|
|
85
|
+
run_config=run_config,
|
|
86
|
+
previous_response_id=previous_response_id,
|
|
87
|
+
session=session,
|
|
88
|
+
)
|
|
89
|
+
return result
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class VibecoreCliRunner(VibecoreRunner[TContext, TWorkflowReturn]):
|
|
93
|
+
def __init__(
|
|
94
|
+
self,
|
|
95
|
+
vibecore: "Vibecore[TContext, TWorkflowReturn]",
|
|
96
|
+
context: TContext | None = None,
|
|
97
|
+
session: Session | None = None,
|
|
98
|
+
) -> None:
|
|
99
|
+
super().__init__(vibecore, context=context, session=session)
|
|
100
|
+
|
|
101
|
+
async def _user_input(self, prompt: str = "") -> str:
|
|
102
|
+
return input(prompt)
|
|
103
|
+
|
|
104
|
+
async def run(self) -> TWorkflowReturn:
|
|
105
|
+
assert self.vibecore.workflow_logic is not None, (
|
|
106
|
+
"Workflow logic not defined. Please use the @vibecore.workflow() decorator."
|
|
107
|
+
)
|
|
108
|
+
result = None
|
|
109
|
+
while user_message := await self._user_input():
|
|
110
|
+
result = await self.vibecore.workflow_logic(self, user_message)
|
|
28
111
|
|
|
112
|
+
assert result, "No result available after inputs exhausted."
|
|
113
|
+
return result
|
|
29
114
|
|
|
30
|
-
async def flow(
|
|
31
|
-
agent: Agent,
|
|
32
|
-
logic: Callable[[VibecoreApp, VibecoreContext, UserInputFunc], Coroutine],
|
|
33
|
-
headless: bool = False,
|
|
34
|
-
shutdown: bool = False,
|
|
35
|
-
disable_user_input: bool = True,
|
|
36
|
-
):
|
|
37
|
-
ctx = VibecoreContext()
|
|
38
|
-
app = VibecoreApp(ctx, agent, show_welcome=False)
|
|
39
115
|
|
|
40
|
-
|
|
116
|
+
class VibecoreStaticRunner(VibecoreRunner[TContext, TWorkflowReturn]):
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
vibecore: "Vibecore[TContext, TWorkflowReturn]",
|
|
120
|
+
context: TContext | None = None,
|
|
121
|
+
session: Session | None = None,
|
|
122
|
+
) -> None:
|
|
123
|
+
super().__init__(vibecore, context=context, session=session)
|
|
124
|
+
self.inputs: list[str] = []
|
|
125
|
+
self.prints: list[str] = []
|
|
41
126
|
|
|
42
|
-
def
|
|
127
|
+
async def print(self, message: str) -> None:
|
|
128
|
+
# Capture printed messages instead of displaying them
|
|
129
|
+
self.prints.append(message)
|
|
130
|
+
|
|
131
|
+
async def run(self, inputs: list[str] | None = None) -> TWorkflowReturn:
|
|
132
|
+
if inputs is None:
|
|
133
|
+
inputs = []
|
|
134
|
+
assert self.vibecore.workflow_logic is not None, (
|
|
135
|
+
"Workflow logic not defined. Please use the @vibecore.workflow() decorator."
|
|
136
|
+
)
|
|
137
|
+
result = None
|
|
138
|
+
for user_message in inputs:
|
|
139
|
+
result = await self.vibecore.workflow_logic(self, user_message)
|
|
140
|
+
|
|
141
|
+
assert result, "No result available after inputs exhausted."
|
|
142
|
+
return result
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class VibecoreTextualRunner(VibecoreRunner[AppAwareContext, TWorkflowReturn]):
|
|
146
|
+
def __init__(
|
|
147
|
+
self,
|
|
148
|
+
vibecore: "Vibecore[AppAwareContext, TWorkflowReturn]",
|
|
149
|
+
context: AppAwareContext | None = None,
|
|
150
|
+
session: Session | None = None,
|
|
151
|
+
) -> None:
|
|
152
|
+
super().__init__(vibecore, context=context, session=session)
|
|
153
|
+
self.app = VibecoreApp(
|
|
154
|
+
self,
|
|
155
|
+
show_welcome=False,
|
|
156
|
+
)
|
|
157
|
+
self.app_ready_event = asyncio.Event()
|
|
158
|
+
|
|
159
|
+
async def _user_input(self, prompt: str = "") -> str:
|
|
160
|
+
if prompt:
|
|
161
|
+
await self.print(prompt)
|
|
162
|
+
self.app.query_one(MyTextArea).disabled = False
|
|
163
|
+
self.app.query_one(MyTextArea).focus()
|
|
164
|
+
user_input = await self.app.wait_for_user_input()
|
|
165
|
+
if self.vibecore.disable_user_input:
|
|
166
|
+
self.app.query_one(MyTextArea).disabled = True
|
|
167
|
+
return user_input
|
|
168
|
+
|
|
169
|
+
async def print(self, message: str) -> None:
|
|
170
|
+
await self.app.add_message(SystemMessage(message))
|
|
171
|
+
|
|
172
|
+
async def run_agent(
|
|
173
|
+
self,
|
|
174
|
+
starting_agent: Agent[AppAwareContext],
|
|
175
|
+
input: str | list[TResponseInputItem],
|
|
176
|
+
*,
|
|
177
|
+
context: AppAwareContext | None = None,
|
|
178
|
+
max_turns: int = DEFAULT_MAX_TURNS,
|
|
179
|
+
hooks: RunHooks[AppAwareContext] | None = None,
|
|
180
|
+
run_config: RunConfig | None = None,
|
|
181
|
+
previous_response_id: str | None = None,
|
|
182
|
+
session: Session | None = None,
|
|
183
|
+
) -> RunResultBase:
|
|
184
|
+
result = Runner.run_streamed(
|
|
185
|
+
starting_agent=starting_agent,
|
|
186
|
+
input=input, # Pass string directly when using session
|
|
187
|
+
context=context,
|
|
188
|
+
max_turns=max_turns,
|
|
189
|
+
hooks=hooks,
|
|
190
|
+
run_config=run_config,
|
|
191
|
+
previous_response_id=previous_response_id,
|
|
192
|
+
session=session,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
self.app.current_worker = self.app.handle_streamed_response(result)
|
|
196
|
+
await self.app.current_worker.wait()
|
|
197
|
+
return result
|
|
198
|
+
|
|
199
|
+
def on_app_ready(self) -> None:
|
|
43
200
|
"""Called when app is ready to process events."""
|
|
44
|
-
app_ready_event.set()
|
|
201
|
+
self.app_ready_event.set()
|
|
45
202
|
|
|
46
|
-
async def
|
|
203
|
+
async def _run_app(self) -> None:
|
|
47
204
|
"""Run the apps message loop.
|
|
48
205
|
|
|
49
206
|
Args:
|
|
50
207
|
app: App to run.
|
|
51
208
|
"""
|
|
52
|
-
|
|
53
|
-
with app._context():
|
|
209
|
+
with self.app._context():
|
|
54
210
|
try:
|
|
55
|
-
app._loop = asyncio.get_running_loop()
|
|
56
|
-
app._thread_id = threading.get_ident()
|
|
57
|
-
await app._process_messages(
|
|
58
|
-
ready_callback=on_app_ready,
|
|
59
|
-
headless=
|
|
211
|
+
self.app._loop = asyncio.get_running_loop()
|
|
212
|
+
self.app._thread_id = threading.get_ident()
|
|
213
|
+
await self.app._process_messages(
|
|
214
|
+
ready_callback=self.on_app_ready,
|
|
215
|
+
headless=False,
|
|
60
216
|
)
|
|
61
217
|
finally:
|
|
62
|
-
app_ready_event.set()
|
|
218
|
+
self.app_ready_event.set()
|
|
63
219
|
|
|
64
|
-
async def
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
return
|
|
220
|
+
async def _run_logic(self) -> TWorkflowReturn:
|
|
221
|
+
assert self.vibecore.workflow_logic is not None, (
|
|
222
|
+
"Workflow logic not defined. Please use the @vibecore.workflow() decorator."
|
|
223
|
+
)
|
|
224
|
+
while True:
|
|
225
|
+
user_message = await self._user_input()
|
|
226
|
+
result = await self.vibecore.workflow_logic(self, user_message)
|
|
227
|
+
assert result, "No result available after inputs exhausted."
|
|
228
|
+
return result
|
|
229
|
+
|
|
230
|
+
async def run(self, inputs: list[str] | None = None, shutdown: bool = False) -> TWorkflowReturn:
|
|
231
|
+
if inputs:
|
|
232
|
+
self.app.message_queue.extend(inputs)
|
|
233
|
+
app_task = asyncio.create_task(self._run_app(), name=f"run_app({self.app})")
|
|
234
|
+
await self.app_ready_event.wait()
|
|
235
|
+
|
|
236
|
+
await self.app.load_session_history(self.session)
|
|
237
|
+
pilot = Pilot(self.app)
|
|
238
|
+
logic_task: asyncio.Task[TWorkflowReturn] | None = None
|
|
239
|
+
|
|
240
|
+
await pilot._wait_for_screen()
|
|
241
|
+
if self.vibecore.disable_user_input:
|
|
242
|
+
self.app.query_one(MyTextArea).disabled = True
|
|
243
|
+
logic_task = asyncio.create_task(self._run_logic(), name="logic_task")
|
|
244
|
+
done, pending = await asyncio.wait([logic_task, app_task], return_when=asyncio.FIRST_COMPLETED)
|
|
73
245
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
await pilot._wait_for_screen()
|
|
86
|
-
if disable_user_input:
|
|
87
|
-
app.query_one(MyTextArea).disabled = True
|
|
88
|
-
logic_task = asyncio.create_task(run_logic(app, ctx, user_input), name="logic_task")
|
|
89
|
-
done, pending = await asyncio.wait([logic_task, app_task], return_when=asyncio.FIRST_COMPLETED)
|
|
90
|
-
|
|
91
|
-
# If app has exited and logic is still running, cancel logic
|
|
92
|
-
if app_task in done and logic_task in pending:
|
|
93
|
-
logic_task.cancel()
|
|
94
|
-
# If logic is finished and app is still running
|
|
95
|
-
elif logic_task in done and app_task in pending:
|
|
96
|
-
if shutdown:
|
|
97
|
-
if not headless:
|
|
246
|
+
# If app has exited and logic is still running, cancel logic
|
|
247
|
+
if app_task in done and logic_task in pending:
|
|
248
|
+
logic_task.cancel()
|
|
249
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
250
|
+
await logic_task
|
|
251
|
+
raise AppIsExiting()
|
|
252
|
+
# If logic is finished and app is still running
|
|
253
|
+
elif logic_task in done and app_task in pending:
|
|
254
|
+
result = logic_task.result()
|
|
255
|
+
if shutdown:
|
|
98
256
|
await pilot._wait_for_screen()
|
|
99
257
|
await asyncio.sleep(1.0)
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
258
|
+
self.app.exit()
|
|
259
|
+
await app_task
|
|
260
|
+
else:
|
|
261
|
+
await self.print("Workflow complete. Press Ctrl-Q to exit.")
|
|
262
|
+
# Enable text input so users can interact freely
|
|
263
|
+
self.app.query_one(MyTextArea).disabled = False
|
|
264
|
+
# Wait until app is exited
|
|
265
|
+
await app_task
|
|
266
|
+
return result
|
|
267
|
+
raise AssertionError(f"Unexpected state: done={done}, pending={pending}")
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
WorkflowLogic: TypeAlias = Callable[
|
|
271
|
+
Concatenate[VibecoreRunner[TContext, TWorkflowReturn], str, ...],
|
|
272
|
+
Coroutine[Any, Any, TWorkflowReturn],
|
|
273
|
+
]
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
class Vibecore(Generic[TContext, TWorkflowReturn]):
|
|
277
|
+
def __init__(self, disable_user_input: bool = True) -> None:
|
|
278
|
+
self.workflow_logic: WorkflowLogic[TContext, TWorkflowReturn] | None = None
|
|
279
|
+
self.disable_user_input = disable_user_input
|
|
280
|
+
|
|
281
|
+
def workflow(
|
|
282
|
+
self,
|
|
283
|
+
) -> Callable[[WorkflowLogic[TContext, TWorkflowReturn]], WorkflowLogic[TContext, TWorkflowReturn]]:
|
|
284
|
+
"""Decorator to define the workflow logic for the app.
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
A decorator that wraps the workflow logic function.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
def decorator(
|
|
291
|
+
func: WorkflowLogic[TContext, TWorkflowReturn],
|
|
292
|
+
) -> WorkflowLogic[TContext, TWorkflowReturn]:
|
|
293
|
+
self.workflow_logic = func
|
|
294
|
+
return func
|
|
295
|
+
|
|
296
|
+
return decorator
|
|
297
|
+
|
|
298
|
+
@overload
|
|
299
|
+
async def run_textual(
|
|
300
|
+
self,
|
|
301
|
+
inputs: str | None = None,
|
|
302
|
+
context: AppAwareContext | None = None,
|
|
303
|
+
session: Session | None = None,
|
|
304
|
+
shutdown: bool = False,
|
|
305
|
+
) -> TWorkflowReturn: ...
|
|
306
|
+
@overload
|
|
307
|
+
async def run_textual(
|
|
308
|
+
self,
|
|
309
|
+
inputs: list[str] | None = None,
|
|
310
|
+
context: AppAwareContext | None = None,
|
|
311
|
+
session: Session | None = None,
|
|
312
|
+
shutdown: bool = False,
|
|
313
|
+
) -> TWorkflowReturn: ...
|
|
314
|
+
|
|
315
|
+
async def run_textual(
|
|
316
|
+
self,
|
|
317
|
+
inputs: str | list[str] | None = None,
|
|
318
|
+
context: AppAwareContext | None = None,
|
|
319
|
+
session: Session | None = None,
|
|
320
|
+
shutdown: bool = False,
|
|
321
|
+
) -> TWorkflowReturn:
|
|
322
|
+
if isinstance(inputs, str):
|
|
323
|
+
inputs = [inputs]
|
|
324
|
+
|
|
325
|
+
if self.workflow_logic is None:
|
|
326
|
+
raise ValueError("Workflow logic not defined. Please use the @vibecore.workflow() decorator.")
|
|
327
|
+
|
|
328
|
+
assert isinstance(context, AppAwareContext) or context is None, (
|
|
329
|
+
"Textual runner requires AppAwareContext or None."
|
|
330
|
+
)
|
|
331
|
+
# Type checker needs help: after the assertion, we know context is AppAwareContext | None
|
|
332
|
+
# and this Vibecore instance can be treated as Vibecore[AppAwareContext, TWorkflowReturn]
|
|
333
|
+
runner = VibecoreTextualRunner(
|
|
334
|
+
cast("Vibecore[AppAwareContext, TWorkflowReturn]", self),
|
|
335
|
+
context=context,
|
|
336
|
+
session=session,
|
|
337
|
+
)
|
|
338
|
+
return await runner.run(inputs=inputs, shutdown=shutdown)
|
|
339
|
+
|
|
340
|
+
async def run_cli(self, context: TContext | None = None, session: Session | None = None) -> TWorkflowReturn:
|
|
341
|
+
if self.workflow_logic is None:
|
|
342
|
+
raise ValueError("Workflow logic not defined. Please use the @vibecore.workflow() decorator.")
|
|
343
|
+
|
|
344
|
+
runner = VibecoreCliRunner(self, context=context, session=session)
|
|
345
|
+
return await runner.run()
|
|
346
|
+
|
|
347
|
+
@overload
|
|
348
|
+
async def run(
|
|
349
|
+
self, inputs: str, context: TContext | None = None, session: Session | None = None
|
|
350
|
+
) -> TWorkflowReturn: ...
|
|
351
|
+
|
|
352
|
+
@overload
|
|
353
|
+
async def run(
|
|
354
|
+
self, inputs: list[str], context: TContext | None = None, session: Session | None = None
|
|
355
|
+
) -> TWorkflowReturn: ...
|
|
356
|
+
|
|
357
|
+
async def run(
|
|
358
|
+
self, inputs: str | list[str], context: TContext | None = None, session: Session | None = None
|
|
359
|
+
) -> TWorkflowReturn:
|
|
360
|
+
if isinstance(inputs, str):
|
|
361
|
+
inputs = [inputs]
|
|
362
|
+
|
|
363
|
+
if self.workflow_logic is None:
|
|
364
|
+
raise ValueError("Workflow logic not defined. Please use the @vibecore.workflow() decorator.")
|
|
365
|
+
|
|
366
|
+
runner = VibecoreStaticRunner(self, context=context, session=session)
|
|
367
|
+
return await runner.run(inputs=inputs)
|
|
@@ -4,8 +4,6 @@ import json
|
|
|
4
4
|
from typing import Protocol
|
|
5
5
|
|
|
6
6
|
from agents import (
|
|
7
|
-
Agent,
|
|
8
|
-
AgentUpdatedStreamEvent,
|
|
9
7
|
MessageOutputItem,
|
|
10
8
|
RawResponsesStreamEvent,
|
|
11
9
|
RunItemStreamEvent,
|
|
@@ -15,15 +13,12 @@ from agents import (
|
|
|
15
13
|
ToolCallOutputItem,
|
|
16
14
|
)
|
|
17
15
|
from openai.types.responses import (
|
|
18
|
-
ResponseCompletedEvent,
|
|
19
16
|
ResponseFunctionToolCall,
|
|
20
17
|
ResponseOutputItemAddedEvent,
|
|
21
18
|
ResponseOutputItemDoneEvent,
|
|
22
19
|
ResponseReasoningItem,
|
|
23
|
-
ResponseReasoningSummaryPartAddedEvent,
|
|
24
|
-
ResponseReasoningSummaryTextDeltaEvent,
|
|
25
|
-
ResponseReasoningSummaryTextDoneEvent,
|
|
26
20
|
ResponseTextDeltaEvent,
|
|
21
|
+
ResponseTextDoneEvent,
|
|
27
22
|
)
|
|
28
23
|
|
|
29
24
|
from vibecore.widgets.messages import (
|
|
@@ -43,8 +38,8 @@ class MessageHandler(Protocol):
|
|
|
43
38
|
"""Add a message to the widget's message list."""
|
|
44
39
|
...
|
|
45
40
|
|
|
46
|
-
async def
|
|
47
|
-
"""
|
|
41
|
+
async def handle_agent_message_update(self, message: BaseMessage) -> None:
|
|
42
|
+
"""Message in the widget's message list is updated with new delta or status"""
|
|
48
43
|
...
|
|
49
44
|
|
|
50
45
|
async def handle_agent_error(self, error: Exception) -> None:
|
|
@@ -82,7 +77,15 @@ class AgentStreamHandler:
|
|
|
82
77
|
self.agent_message = AgentMessage(self.message_content, status=MessageStatus.EXECUTING)
|
|
83
78
|
await self.message_handler.handle_agent_message(self.agent_message)
|
|
84
79
|
else:
|
|
85
|
-
|
|
80
|
+
# When content is short, we update more frequently for better UX
|
|
81
|
+
content_is_short_and_semantically_should_update = len(self.message_content) < 1000 and (
|
|
82
|
+
self.message_content.endswith(".") or "\n" in delta
|
|
83
|
+
)
|
|
84
|
+
# Else when content is long, we update less frequently to avoid UI lag
|
|
85
|
+
should_update_bulk_delta = len(self.message_content) - len(self.agent_message.text) > 200
|
|
86
|
+
if content_is_short_and_semantically_should_update or should_update_bulk_delta:
|
|
87
|
+
self.agent_message.update(self.message_content)
|
|
88
|
+
await self.message_handler.handle_agent_message_update(self.agent_message)
|
|
86
89
|
|
|
87
90
|
async def handle_tool_call(self, tool_name: str, arguments: str, call_id: str) -> None:
|
|
88
91
|
"""Create and display tool message when tool is invoked.
|
|
@@ -108,11 +111,19 @@ class AgentStreamHandler:
|
|
|
108
111
|
if call_id in self.tool_messages:
|
|
109
112
|
tool_message = self.tool_messages[call_id]
|
|
110
113
|
tool_message.update(MessageStatus.SUCCESS, str(output))
|
|
114
|
+
await self.message_handler.handle_agent_message_update(tool_message)
|
|
111
115
|
|
|
112
116
|
async def handle_message_complete(self) -> None:
|
|
113
117
|
"""Finalize agent message when complete."""
|
|
118
|
+
if self.tool_messages:
|
|
119
|
+
# Some tool messages such as transfer_to_* may still be in executing status
|
|
120
|
+
# since it never gets a tool output event. We mark them as success here.
|
|
121
|
+
for tool_message in self.tool_messages.values():
|
|
122
|
+
if tool_message.status == MessageStatus.EXECUTING:
|
|
123
|
+
tool_message.status = MessageStatus.SUCCESS
|
|
114
124
|
if self.agent_message:
|
|
115
125
|
self.agent_message.update(self.message_content, status=MessageStatus.IDLE)
|
|
126
|
+
await self.message_handler.handle_agent_message_update(self.agent_message)
|
|
116
127
|
self.agent_message = None
|
|
117
128
|
self.message_content = ""
|
|
118
129
|
|
|
@@ -132,42 +143,25 @@ class AgentStreamHandler:
|
|
|
132
143
|
self.reasoning_messages[reasoning_id] = reasoning_message
|
|
133
144
|
await self.message_handler.handle_agent_message(reasoning_message)
|
|
134
145
|
|
|
135
|
-
case ResponseReasoningSummaryPartAddedEvent() as e:
|
|
136
|
-
reasoning_id = e.item_id
|
|
137
|
-
reasoning_message = self.reasoning_messages[reasoning_id]
|
|
138
|
-
assert reasoning_message, f"Reasoning message with ID {reasoning_id} not found"
|
|
139
|
-
updated = reasoning_message.text + "\n\n" if reasoning_message.text != "Thinking..." else ""
|
|
140
|
-
reasoning_message.update(updated, status=MessageStatus.EXECUTING)
|
|
141
|
-
|
|
142
|
-
case ResponseReasoningSummaryTextDeltaEvent(item_id=reasoning_id, delta=delta):
|
|
143
|
-
reasoning_message = self.reasoning_messages[reasoning_id]
|
|
144
|
-
assert reasoning_message, f"Reasoning message with ID {reasoning_id} not found"
|
|
145
|
-
updated = reasoning_message.text + delta
|
|
146
|
-
reasoning_message.update(updated, status=MessageStatus.EXECUTING)
|
|
147
|
-
|
|
148
|
-
case ResponseReasoningSummaryTextDoneEvent(item_id=reasoning_id) as e:
|
|
149
|
-
pass
|
|
150
|
-
|
|
151
146
|
case ResponseOutputItemDoneEvent(item=ResponseReasoningItem() as item):
|
|
152
147
|
reasoning_id = item.id
|
|
153
148
|
reasoning_message = self.reasoning_messages[reasoning_id]
|
|
154
149
|
assert reasoning_message, f"Reasoning message with ID {reasoning_id} not found"
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
# TODO(serialx): We should move all tool call lifecycle start to here
|
|
159
|
-
# ResponseOutputItemDoneEvent can have race conditions in case of task tool calls
|
|
160
|
-
# Because we stream sub-agent events to our method: handle_task_tool_event()
|
|
161
|
-
# XXX(serialx): Just handle task tool call lifecycle here for now
|
|
162
|
-
# I'm deferring this because `arguments` is not available here... need to refactor
|
|
163
|
-
if tool_name == "task":
|
|
164
|
-
await self.handle_tool_call(tool_name, "{}", call_id)
|
|
150
|
+
text = "\n\n".join(summary.text for summary in item.summary)
|
|
151
|
+
reasoning_message.update(text, status=MessageStatus.IDLE)
|
|
152
|
+
await self.message_handler.handle_agent_message_update(reasoning_message)
|
|
165
153
|
|
|
166
154
|
case ResponseTextDeltaEvent(delta=delta) if delta:
|
|
167
155
|
await self.handle_text_delta(delta)
|
|
168
156
|
|
|
169
|
-
case
|
|
170
|
-
|
|
157
|
+
case ResponseTextDoneEvent() as e:
|
|
158
|
+
self.agent_message = AgentMessage(e.text, status=MessageStatus.IDLE)
|
|
159
|
+
await self.message_handler.handle_agent_message(self.agent_message)
|
|
160
|
+
|
|
161
|
+
case (
|
|
162
|
+
ResponseOutputItemDoneEvent(
|
|
163
|
+
item=ResponseFunctionToolCall(name=tool_name, arguments=arguments, call_id=call_id)
|
|
164
|
+
) as e
|
|
171
165
|
):
|
|
172
166
|
# XXX(serialx): See above comments
|
|
173
167
|
if tool_name == "task":
|
|
@@ -182,33 +176,18 @@ class AgentStreamHandler:
|
|
|
182
176
|
else:
|
|
183
177
|
await self.handle_tool_call(tool_name, arguments, call_id)
|
|
184
178
|
|
|
185
|
-
case ResponseCompletedEvent():
|
|
186
|
-
# When in agent handoff or stop at tool situations, the tools should be in executing status.
|
|
187
|
-
# We find all the executing status tool messages and mark them as success.
|
|
188
|
-
for tool_message in self.tool_messages.values():
|
|
189
|
-
if tool_message.status == MessageStatus.EXECUTING:
|
|
190
|
-
tool_message.status = MessageStatus.SUCCESS
|
|
191
|
-
|
|
192
179
|
case RunItemStreamEvent(item=item):
|
|
193
180
|
# log(f"RunItemStreamEvent item: {item.type}")
|
|
194
181
|
match item:
|
|
195
|
-
case ToolCallItem():
|
|
196
|
-
|
|
197
|
-
case ToolCallOutputItem(
|
|
182
|
+
case ToolCallItem(call_output=ResponseFunctionToolCall() as call):
|
|
183
|
+
await self.handle_tool_call(call.name, call.arguments, call.call_id)
|
|
184
|
+
case ToolCallOutputItem(raw_item=raw_item, output=output):
|
|
198
185
|
# Find the corresponding tool message by call_id
|
|
199
|
-
if
|
|
200
|
-
isinstance(raw_item, dict)
|
|
201
|
-
and "call_id" in raw_item
|
|
202
|
-
and raw_item["call_id"] in self.tool_messages
|
|
203
|
-
):
|
|
186
|
+
if raw_item["type"] == "function_call_output" and raw_item["call_id"] in self.tool_messages:
|
|
204
187
|
await self.handle_tool_output(output, raw_item["call_id"])
|
|
205
188
|
case MessageOutputItem():
|
|
206
189
|
await self.handle_message_complete()
|
|
207
190
|
|
|
208
|
-
case AgentUpdatedStreamEvent(new_agent=new_agent):
|
|
209
|
-
# log(f"AgentUpdatedStreamEvent new_agent: {new_agent.name}")
|
|
210
|
-
await self.message_handler.handle_agent_update(new_agent)
|
|
211
|
-
|
|
212
191
|
async def handle_task_tool_event(self, tool_name: str, tool_call_id: str, event: StreamEvent) -> None:
|
|
213
192
|
"""Handle streaming events from task tool sub-agents.
|
|
214
193
|
|