kader 0.1.5__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli/app.py +98 -61
- cli/app.tcss +27 -382
- cli/utils.py +1 -6
- cli/widgets/conversation.py +50 -4
- kader/__init__.py +2 -0
- kader/agent/agents.py +8 -0
- kader/agent/base.py +68 -5
- kader/memory/types.py +60 -0
- kader/prompts/__init__.py +9 -1
- kader/prompts/agent_prompts.py +28 -0
- kader/prompts/templates/executor_agent.j2 +70 -0
- kader/prompts/templates/kader_planner.j2 +71 -0
- kader/providers/ollama.py +2 -2
- kader/tools/__init__.py +26 -0
- kader/tools/agent.py +452 -0
- kader/tools/filesys.py +1 -1
- kader/tools/todo.py +43 -2
- kader/utils/__init__.py +10 -0
- kader/utils/checkpointer.py +371 -0
- kader/utils/context_aggregator.py +347 -0
- kader/workflows/__init__.py +13 -0
- kader/workflows/base.py +71 -0
- kader/workflows/planner_executor.py +251 -0
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/METADATA +38 -1
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/RECORD +27 -18
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/WHEEL +0 -0
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context Aggregator module for aggregating sub-agent checkpoint contexts.
|
|
3
|
+
|
|
4
|
+
Aggregates checkpoint.md files from sub-agents into a unified context file
|
|
5
|
+
in the main session's executors directory.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from kader.memory.types import aread_text, awrite_text, get_default_memory_dir
|
|
11
|
+
from kader.providers.base import Message
|
|
12
|
+
from kader.providers.ollama import OllamaProvider
|
|
13
|
+
|
|
14
|
+
AGGREGATOR_SYSTEM_PROMPT = """You are an assistant that aggregates and merges checkpoint summaries from multiple sub-agents.
|
|
15
|
+
Given checkpoint summaries from different sub-agents, create a unified summary that combines all information.
|
|
16
|
+
|
|
17
|
+
Your merged summary MUST include the following sections:
|
|
18
|
+
|
|
19
|
+
## Directory Structure
|
|
20
|
+
Merge all directory structures from sub-agents into a unified tree.
|
|
21
|
+
Use a tree-like format:
|
|
22
|
+
```
|
|
23
|
+
project/
|
|
24
|
+
├── src/
|
|
25
|
+
│ └── main.py
|
|
26
|
+
└── README.md
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Actions Performed
|
|
30
|
+
Summarize the main accomplishments and significant actions taken by all sub-agents.
|
|
31
|
+
Focus on high-level outcomes, not individual steps. For example:
|
|
32
|
+
- "Implemented user authentication module with login/logout functionality"
|
|
33
|
+
- "Fixed database connection issues and added retry logic"
|
|
34
|
+
- "Created REST API endpoints for user management"
|
|
35
|
+
|
|
36
|
+
Do NOT list every single action (like reading files, running commands, etc.).
|
|
37
|
+
Only mention the meaningful outcomes and key decisions.
|
|
38
|
+
|
|
39
|
+
If a section has no relevant content, write "None" under that section.
|
|
40
|
+
|
|
41
|
+
IMPORTANT: Remove duplicates and merge similar items. Keep the summary organized and clean.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ContextAggregator:
|
|
46
|
+
"""
|
|
47
|
+
Aggregates checkpoint contexts from sub-agents.
|
|
48
|
+
|
|
49
|
+
Reads checkpoint.md files from sub-agent directories and merges them
|
|
50
|
+
into a unified checkpoint.md in the executors directory.
|
|
51
|
+
|
|
52
|
+
Example:
|
|
53
|
+
aggregator = ContextAggregator(session_id="main-session-id")
|
|
54
|
+
md_path = aggregator.aggregate("/path/to/subagent/checkpoint.md")
|
|
55
|
+
print(f"Aggregated checkpoint saved to: {md_path}")
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
session_id: str,
|
|
61
|
+
model: str = "gpt-oss:120b-cloud",
|
|
62
|
+
host: str | None = None,
|
|
63
|
+
) -> None:
|
|
64
|
+
"""
|
|
65
|
+
Initialize the ContextAggregator.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
session_id: The main session ID for the executors directory
|
|
69
|
+
model: Ollama model identifier (default: "gpt-oss:120b-cloud")
|
|
70
|
+
host: Optional Ollama server host
|
|
71
|
+
"""
|
|
72
|
+
self._session_id = session_id
|
|
73
|
+
self._provider = OllamaProvider(model=model, host=host)
|
|
74
|
+
|
|
75
|
+
def _get_executors_dir(self) -> Path:
|
|
76
|
+
"""
|
|
77
|
+
Get the executors directory path for the main session.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Path to ~/.kader/memory/sessions/<session-id>/executors/
|
|
81
|
+
"""
|
|
82
|
+
return get_default_memory_dir() / "sessions" / self._session_id / "executors"
|
|
83
|
+
|
|
84
|
+
def _get_aggregated_checkpoint_path(self) -> Path:
|
|
85
|
+
"""
|
|
86
|
+
Get the path for the aggregated checkpoint file.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
Path to the aggregated checkpoint.md in executors directory
|
|
90
|
+
"""
|
|
91
|
+
return self._get_executors_dir() / "checkpoint.md"
|
|
92
|
+
|
|
93
|
+
def _load_existing_aggregated(self) -> str | None:
|
|
94
|
+
"""
|
|
95
|
+
Load existing aggregated checkpoint if it exists.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Content of existing aggregated checkpoint, or None if not exists
|
|
99
|
+
"""
|
|
100
|
+
checkpoint_path = self._get_aggregated_checkpoint_path()
|
|
101
|
+
if checkpoint_path.exists():
|
|
102
|
+
try:
|
|
103
|
+
return checkpoint_path.read_text(encoding="utf-8")
|
|
104
|
+
except Exception:
|
|
105
|
+
return None
|
|
106
|
+
return None
|
|
107
|
+
|
|
108
|
+
async def _aload_existing_aggregated(self) -> str | None:
|
|
109
|
+
"""
|
|
110
|
+
Asynchronously load existing aggregated checkpoint if it exists.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Content of existing aggregated checkpoint, or None if not exists
|
|
114
|
+
"""
|
|
115
|
+
checkpoint_path = self._get_aggregated_checkpoint_path()
|
|
116
|
+
if checkpoint_path.exists():
|
|
117
|
+
try:
|
|
118
|
+
return await aread_text(checkpoint_path)
|
|
119
|
+
except Exception:
|
|
120
|
+
return None
|
|
121
|
+
return None
|
|
122
|
+
|
|
123
|
+
def _load_subagent_checkpoint(self, checkpoint_path: str | Path) -> str | None:
|
|
124
|
+
"""
|
|
125
|
+
Load a sub-agent's checkpoint content.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
checkpoint_path: Path to the sub-agent's checkpoint.md file
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Content of the checkpoint file, or None if not found
|
|
132
|
+
"""
|
|
133
|
+
checkpoint_path = self._get_executors_dir() / checkpoint_path
|
|
134
|
+
path = Path(checkpoint_path)
|
|
135
|
+
if path.exists():
|
|
136
|
+
try:
|
|
137
|
+
return path.read_text(encoding="utf-8")
|
|
138
|
+
except Exception:
|
|
139
|
+
return None
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
async def _aload_subagent_checkpoint(
|
|
143
|
+
self, checkpoint_path: str | Path
|
|
144
|
+
) -> str | None:
|
|
145
|
+
"""
|
|
146
|
+
Asynchronously load a sub-agent's checkpoint content.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
checkpoint_path: Path to the sub-agent's checkpoint.md file
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Content of the checkpoint file, or None if not found
|
|
153
|
+
"""
|
|
154
|
+
full_path = self._get_executors_dir() / checkpoint_path
|
|
155
|
+
path = Path(full_path)
|
|
156
|
+
if path.exists():
|
|
157
|
+
try:
|
|
158
|
+
return await aread_text(path)
|
|
159
|
+
except Exception:
|
|
160
|
+
return None
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
def _merge_checkpoints(
|
|
164
|
+
self,
|
|
165
|
+
existing_aggregated: str | None,
|
|
166
|
+
new_checkpoint: str,
|
|
167
|
+
subagent_name: str | None = None,
|
|
168
|
+
) -> str:
|
|
169
|
+
"""
|
|
170
|
+
Merge a new sub-agent checkpoint into the existing aggregated checkpoint.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
existing_aggregated: Content of existing aggregated checkpoint
|
|
174
|
+
new_checkpoint: Content of the new sub-agent checkpoint
|
|
175
|
+
subagent_name: Optional name of the sub-agent for labeling
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Merged checkpoint content
|
|
179
|
+
"""
|
|
180
|
+
# If no existing aggregated checkpoint, use the new checkpoint directly
|
|
181
|
+
if not existing_aggregated:
|
|
182
|
+
return new_checkpoint
|
|
183
|
+
|
|
184
|
+
user_prompt = f"""Here is the existing aggregated checkpoint from previous sub-agents:
|
|
185
|
+
|
|
186
|
+
---
|
|
187
|
+
{existing_aggregated}
|
|
188
|
+
---
|
|
189
|
+
|
|
190
|
+
Here is the new checkpoint from sub-agent{f' "{subagent_name}"' if subagent_name else ""}:
|
|
191
|
+
|
|
192
|
+
---
|
|
193
|
+
{new_checkpoint}
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
Merge the new checkpoint into the existing aggregated checkpoint.
|
|
197
|
+
Combine items into the appropriate sections, remove duplicates, and keep everything organized."""
|
|
198
|
+
|
|
199
|
+
messages = [
|
|
200
|
+
Message.system(AGGREGATOR_SYSTEM_PROMPT),
|
|
201
|
+
Message.user(user_prompt),
|
|
202
|
+
]
|
|
203
|
+
|
|
204
|
+
response = self._provider.invoke(messages)
|
|
205
|
+
return response.content
|
|
206
|
+
|
|
207
|
+
async def _amerge_checkpoints(
|
|
208
|
+
self,
|
|
209
|
+
existing_aggregated: str | None,
|
|
210
|
+
new_checkpoint: str,
|
|
211
|
+
subagent_name: str | None = None,
|
|
212
|
+
) -> str:
|
|
213
|
+
"""
|
|
214
|
+
Merge a new sub-agent checkpoint into the existing aggregated checkpoint (async).
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
existing_aggregated: Content of existing aggregated checkpoint
|
|
218
|
+
new_checkpoint: Content of the new sub-agent checkpoint
|
|
219
|
+
subagent_name: Optional name of the sub-agent for labeling
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Merged checkpoint content
|
|
223
|
+
"""
|
|
224
|
+
# If no existing aggregated checkpoint, use the new checkpoint directly
|
|
225
|
+
if not existing_aggregated:
|
|
226
|
+
return new_checkpoint
|
|
227
|
+
|
|
228
|
+
user_prompt = f"""Here is the existing aggregated checkpoint from previous sub-agents:
|
|
229
|
+
|
|
230
|
+
---
|
|
231
|
+
{existing_aggregated}
|
|
232
|
+
---
|
|
233
|
+
|
|
234
|
+
Here is the new checkpoint from sub-agent{f' "{subagent_name}"' if subagent_name else ""}:
|
|
235
|
+
|
|
236
|
+
---
|
|
237
|
+
{new_checkpoint}
|
|
238
|
+
---
|
|
239
|
+
|
|
240
|
+
Merge the new checkpoint into the existing aggregated checkpoint.
|
|
241
|
+
Combine items into the appropriate sections, remove duplicates, and keep everything organized."""
|
|
242
|
+
|
|
243
|
+
messages = [
|
|
244
|
+
Message.system(AGGREGATOR_SYSTEM_PROMPT),
|
|
245
|
+
Message.user(user_prompt),
|
|
246
|
+
]
|
|
247
|
+
|
|
248
|
+
response = await self._provider.ainvoke(messages)
|
|
249
|
+
return response.content
|
|
250
|
+
|
|
251
|
+
def aggregate(
|
|
252
|
+
self, subagent_checkpoint_path: str, subagent_name: str | None = None
|
|
253
|
+
) -> str:
|
|
254
|
+
"""
|
|
255
|
+
Aggregate a sub-agent's checkpoint into the main executors checkpoint.
|
|
256
|
+
|
|
257
|
+
If checkpoint.md exists in the executors directory, it will be updated
|
|
258
|
+
with the new sub-agent's checkpoint. Otherwise, a new aggregated file is created.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
subagent_checkpoint_path: Path to the sub-agent's checkpoint.md file
|
|
262
|
+
subagent_name: Optional name of the sub-agent for labeling in the summary
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Absolute path to the aggregated checkpoint file
|
|
266
|
+
|
|
267
|
+
Raises:
|
|
268
|
+
FileNotFoundError: If the sub-agent checkpoint file doesn't exist
|
|
269
|
+
ValueError: If the sub-agent checkpoint is empty
|
|
270
|
+
"""
|
|
271
|
+
# Load the sub-agent's checkpoint
|
|
272
|
+
new_checkpoint = self._load_subagent_checkpoint(subagent_checkpoint_path)
|
|
273
|
+
if not new_checkpoint:
|
|
274
|
+
raise FileNotFoundError(
|
|
275
|
+
f"Sub-agent checkpoint not found: {subagent_checkpoint_path}"
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
if not new_checkpoint.strip():
|
|
279
|
+
raise ValueError(
|
|
280
|
+
f"Sub-agent checkpoint is empty: {subagent_checkpoint_path}"
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Load existing aggregated checkpoint if it exists
|
|
284
|
+
existing_aggregated = self._load_existing_aggregated()
|
|
285
|
+
|
|
286
|
+
# Merge the checkpoints
|
|
287
|
+
merged_content = self._merge_checkpoints(
|
|
288
|
+
existing_aggregated, new_checkpoint, subagent_name
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Ensure executors directory exists
|
|
292
|
+
aggregated_path = self._get_aggregated_checkpoint_path()
|
|
293
|
+
aggregated_path.parent.mkdir(parents=True, exist_ok=True)
|
|
294
|
+
|
|
295
|
+
# Save the aggregated checkpoint
|
|
296
|
+
aggregated_path.write_text(merged_content, encoding="utf-8")
|
|
297
|
+
|
|
298
|
+
return str(aggregated_path)
|
|
299
|
+
|
|
300
|
+
async def aaggregate(
|
|
301
|
+
self, subagent_checkpoint_path: str, subagent_name: str | None = None
|
|
302
|
+
) -> str:
|
|
303
|
+
"""
|
|
304
|
+
Aggregate a sub-agent's checkpoint into the main executors checkpoint (async).
|
|
305
|
+
|
|
306
|
+
If checkpoint.md exists in the executors directory, it will be updated
|
|
307
|
+
with the new sub-agent's checkpoint. Otherwise, a new aggregated file is created.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
subagent_checkpoint_path: Path to the sub-agent's checkpoint.md file
|
|
311
|
+
subagent_name: Optional name of the sub-agent for labeling in the summary
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Absolute path to the aggregated checkpoint file
|
|
315
|
+
|
|
316
|
+
Raises:
|
|
317
|
+
FileNotFoundError: If the sub-agent checkpoint file doesn't exist
|
|
318
|
+
ValueError: If the sub-agent checkpoint is empty
|
|
319
|
+
"""
|
|
320
|
+
# Load the sub-agent's checkpoint (async)
|
|
321
|
+
new_checkpoint = await self._aload_subagent_checkpoint(subagent_checkpoint_path)
|
|
322
|
+
if not new_checkpoint:
|
|
323
|
+
raise FileNotFoundError(
|
|
324
|
+
f"Sub-agent checkpoint not found: {subagent_checkpoint_path}"
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
if not new_checkpoint.strip():
|
|
328
|
+
raise ValueError(
|
|
329
|
+
f"Sub-agent checkpoint is empty: {subagent_checkpoint_path}"
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# Load existing aggregated checkpoint if it exists (async)
|
|
333
|
+
existing_aggregated = await self._aload_existing_aggregated()
|
|
334
|
+
|
|
335
|
+
# Merge the checkpoints
|
|
336
|
+
merged_content = await self._amerge_checkpoints(
|
|
337
|
+
existing_aggregated, new_checkpoint, subagent_name
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Ensure executors directory exists
|
|
341
|
+
aggregated_path = self._get_aggregated_checkpoint_path()
|
|
342
|
+
aggregated_path.parent.mkdir(parents=True, exist_ok=True)
|
|
343
|
+
|
|
344
|
+
# Save the aggregated checkpoint (async)
|
|
345
|
+
await awrite_text(aggregated_path, merged_content)
|
|
346
|
+
|
|
347
|
+
return str(aggregated_path)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Kader Workflows Module.
|
|
3
|
+
|
|
4
|
+
Provides workflow implementations for orchestrating agents in complex task flows.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from kader.workflows.base import BaseWorkflow
|
|
8
|
+
from kader.workflows.planner_executor import PlannerExecutorWorkflow
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"BaseWorkflow",
|
|
12
|
+
"PlannerExecutorWorkflow",
|
|
13
|
+
]
|
kader/workflows/base.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base Workflow Class.
|
|
3
|
+
|
|
4
|
+
Abstract base class for all workflow implementations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from kader.providers.base import BaseLLMProvider
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseWorkflow(ABC):
|
|
14
|
+
"""
|
|
15
|
+
Abstract base class for workflow implementations.
|
|
16
|
+
|
|
17
|
+
Workflows orchestrate multiple agents or agent interactions to accomplish
|
|
18
|
+
complex tasks. They provide a structured way to compose agent behaviors
|
|
19
|
+
and manage the flow of information between agents.
|
|
20
|
+
|
|
21
|
+
Subclasses must implement:
|
|
22
|
+
- run: Synchronous workflow execution
|
|
23
|
+
- arun: Asynchronous workflow execution
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
name: str,
|
|
29
|
+
provider: Optional[BaseLLMProvider] = None,
|
|
30
|
+
model_name: str = "qwen3-coder:480b-cloud",
|
|
31
|
+
interrupt_before_tool: bool = True,
|
|
32
|
+
) -> None:
|
|
33
|
+
"""
|
|
34
|
+
Initialize the base workflow.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
name: Name of the workflow instance.
|
|
38
|
+
provider: LLM provider for agents in the workflow.
|
|
39
|
+
model_name: Model to use if no provider specified.
|
|
40
|
+
interrupt_before_tool: Whether to pause before tool execution.
|
|
41
|
+
"""
|
|
42
|
+
self.name = name
|
|
43
|
+
self.provider = provider
|
|
44
|
+
self.model_name = model_name
|
|
45
|
+
self.interrupt_before_tool = interrupt_before_tool
|
|
46
|
+
|
|
47
|
+
@abstractmethod
|
|
48
|
+
def run(self, task: str) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Execute the workflow synchronously.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
task: The task or goal to accomplish.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
A summary of the workflow execution and results.
|
|
57
|
+
"""
|
|
58
|
+
...
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
async def arun(self, task: str) -> str:
|
|
62
|
+
"""
|
|
63
|
+
Execute the workflow asynchronously.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
task: The task or goal to accomplish.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
A summary of the workflow execution and results.
|
|
70
|
+
"""
|
|
71
|
+
...
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Planner Executor Workflow.
|
|
3
|
+
|
|
4
|
+
Orchestrates a PlanningAgent with TodoTool and AgentTool to break down tasks
|
|
5
|
+
and delegate sub-tasks to executor agents.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import uuid
|
|
9
|
+
from typing import Callable, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from kader.agent.agents import PlanningAgent
|
|
12
|
+
from kader.memory import SlidingWindowConversationManager
|
|
13
|
+
from kader.memory.types import get_default_memory_dir
|
|
14
|
+
from kader.prompts import KaderPlannerPrompt
|
|
15
|
+
from kader.providers.base import BaseLLMProvider, Message
|
|
16
|
+
from kader.tools import AgentTool, TodoTool, ToolRegistry
|
|
17
|
+
from kader.utils import Checkpointer
|
|
18
|
+
|
|
19
|
+
from .base import BaseWorkflow
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PlannerExecutorWorkflow(BaseWorkflow):
|
|
23
|
+
"""
|
|
24
|
+
Planner-Executor Workflow using PlanningAgent with sub-agent delegation.
|
|
25
|
+
|
|
26
|
+
This workflow:
|
|
27
|
+
1. Accepts a user task
|
|
28
|
+
2. PlanningAgent creates a plan using TodoTool
|
|
29
|
+
3. For each sub-task, PlanningAgent can delegate to AgentTool (executor)
|
|
30
|
+
4. Executor outputs are added to planner's memory as assistant messages
|
|
31
|
+
5. PlanningAgent updates TodoTool status and continues until complete
|
|
32
|
+
|
|
33
|
+
Example:
|
|
34
|
+
workflow = PlannerExecutorWorkflow(name="project_workflow")
|
|
35
|
+
result = workflow.run("Create a Python project with README and tests")
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
name: str,
|
|
41
|
+
provider: Optional[BaseLLMProvider] = None,
|
|
42
|
+
model_name: str = "qwen3-coder:480b-cloud",
|
|
43
|
+
interrupt_before_tool: bool = True,
|
|
44
|
+
tool_confirmation_callback: Optional[
|
|
45
|
+
Callable[..., Tuple[bool, Optional[str]]]
|
|
46
|
+
] = None,
|
|
47
|
+
direct_execution_callback: Optional[Callable[..., None]] = None,
|
|
48
|
+
tool_execution_result_callback: Optional[Callable[..., None]] = None,
|
|
49
|
+
use_persistence: bool = False,
|
|
50
|
+
session_id: Optional[str] = None,
|
|
51
|
+
executor_names: Optional[list[str]] = None,
|
|
52
|
+
) -> None:
|
|
53
|
+
"""
|
|
54
|
+
Initialize the Planner-Executor workflow.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
name: Name of the workflow instance.
|
|
58
|
+
provider: LLM provider for agents.
|
|
59
|
+
model_name: Model to use if no provider specified.
|
|
60
|
+
interrupt_before_tool: Whether to pause before tool execution.
|
|
61
|
+
tool_confirmation_callback: Callback for tool confirmation UI.
|
|
62
|
+
use_persistence: Enable session persistence.
|
|
63
|
+
session_id: Optional session ID to resume.
|
|
64
|
+
executor_names: Names for executor sub-agents (default: ["executor"]).
|
|
65
|
+
"""
|
|
66
|
+
super().__init__(
|
|
67
|
+
name=name,
|
|
68
|
+
provider=provider,
|
|
69
|
+
model_name=model_name,
|
|
70
|
+
interrupt_before_tool=interrupt_before_tool,
|
|
71
|
+
)
|
|
72
|
+
self.tool_confirmation_callback = tool_confirmation_callback
|
|
73
|
+
self.direct_execution_callback = direct_execution_callback
|
|
74
|
+
self.tool_execution_result_callback = tool_execution_result_callback
|
|
75
|
+
self.use_persistence = use_persistence
|
|
76
|
+
self.session_id = session_id if session_id else str(uuid.uuid4())
|
|
77
|
+
self.executor_names = executor_names or ["executor"]
|
|
78
|
+
|
|
79
|
+
# Build the planner agent with tools
|
|
80
|
+
self._planner = self._build_planner()
|
|
81
|
+
|
|
82
|
+
def _load_checkpoint_context(self) -> Optional[str]:
|
|
83
|
+
"""
|
|
84
|
+
Load checkpoint context from the session directory if it exists.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
The checkpoint markdown content if file exists, None otherwise.
|
|
88
|
+
"""
|
|
89
|
+
if not self.session_id:
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
checkpoint_path = (
|
|
93
|
+
get_default_memory_dir() / "sessions" / self.session_id / "checkpoint.md"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if checkpoint_path.exists():
|
|
97
|
+
try:
|
|
98
|
+
return checkpoint_path.read_text(encoding="utf-8")
|
|
99
|
+
except Exception:
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
def _build_planner(self) -> PlanningAgent:
|
|
105
|
+
"""Build the PlanningAgent with TodoTool and AgentTool(s)."""
|
|
106
|
+
registry = ToolRegistry()
|
|
107
|
+
|
|
108
|
+
# TodoTool is added implicitly by PlanningAgent, but we ensure it's there
|
|
109
|
+
registry.register(TodoTool())
|
|
110
|
+
|
|
111
|
+
# Add AgentTool(s) for sub-task delegation
|
|
112
|
+
for executor_name in self.executor_names:
|
|
113
|
+
agent_tool = AgentTool(
|
|
114
|
+
name=executor_name,
|
|
115
|
+
description=(
|
|
116
|
+
f"Delegate a sub-task to the '{executor_name}' agent. "
|
|
117
|
+
"Use this when a specific task needs to be executed by a "
|
|
118
|
+
"specialized worker agent. Provide a clear task description."
|
|
119
|
+
),
|
|
120
|
+
provider=self.provider,
|
|
121
|
+
model_name=self.model_name,
|
|
122
|
+
interrupt_before_tool=self.interrupt_before_tool,
|
|
123
|
+
tool_confirmation_callback=self.tool_confirmation_callback,
|
|
124
|
+
direct_execution_callback=self.direct_execution_callback,
|
|
125
|
+
tool_execution_result_callback=self.tool_execution_result_callback,
|
|
126
|
+
)
|
|
127
|
+
registry.register(agent_tool)
|
|
128
|
+
|
|
129
|
+
# Create memory for the planner
|
|
130
|
+
memory = SlidingWindowConversationManager(window_size=20)
|
|
131
|
+
|
|
132
|
+
# Load checkpoint context if it exists from previous iterations
|
|
133
|
+
checkpoint_context = self._load_checkpoint_context()
|
|
134
|
+
|
|
135
|
+
# Create the Kader system prompt with tool descriptions and context
|
|
136
|
+
system_prompt = KaderPlannerPrompt(
|
|
137
|
+
tools=registry.tools,
|
|
138
|
+
context=checkpoint_context,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Build the PlanningAgent
|
|
142
|
+
# Note: The planner itself runs without interruption (TodoTool, AgentTool execute directly)
|
|
143
|
+
# Sub-agents inside AgentTool can still have their own interrupt settings
|
|
144
|
+
planner = PlanningAgent(
|
|
145
|
+
name=f"{self.name}_planner",
|
|
146
|
+
tools=registry,
|
|
147
|
+
system_prompt=system_prompt,
|
|
148
|
+
provider=self.provider,
|
|
149
|
+
memory=memory,
|
|
150
|
+
model_name=self.model_name,
|
|
151
|
+
session_id=self.session_id,
|
|
152
|
+
use_persistence=self.use_persistence,
|
|
153
|
+
interrupt_before_tool=False, # Planner executes TodoTool/AgentTool directly
|
|
154
|
+
tool_confirmation_callback=self.tool_confirmation_callback,
|
|
155
|
+
direct_execution_callback=self.direct_execution_callback,
|
|
156
|
+
tool_execution_result_callback=self.tool_execution_result_callback,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
return planner
|
|
160
|
+
|
|
161
|
+
def _add_executor_output_to_memory(self, executor_name: str, output: str) -> None:
|
|
162
|
+
"""
|
|
163
|
+
Add executor agent output to planner's memory as assistant message.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
executor_name: Name of the executor that produced the output.
|
|
167
|
+
output: The output/result from the executor.
|
|
168
|
+
"""
|
|
169
|
+
# Format the executor output as an assistant message
|
|
170
|
+
formatted_output = f"[{executor_name} completed]: {output}"
|
|
171
|
+
self._planner.memory.add_message(Message.assistant(formatted_output))
|
|
172
|
+
|
|
173
|
+
def _create_checkpoint(self) -> Optional[str]:
|
|
174
|
+
"""
|
|
175
|
+
Create a checkpoint of the current session using the Checkpointer.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Path to the checkpoint file if created, None otherwise.
|
|
179
|
+
"""
|
|
180
|
+
if not self.session_id or not self.use_persistence:
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
checkpointer = Checkpointer()
|
|
185
|
+
memory_path = f"{self.session_id}/conversation.json"
|
|
186
|
+
checkpoint_path = checkpointer.generate_checkpoint(memory_path)
|
|
187
|
+
return checkpoint_path
|
|
188
|
+
except Exception:
|
|
189
|
+
# Silently fail if checkpointing fails - don't interrupt workflow
|
|
190
|
+
return None
|
|
191
|
+
|
|
192
|
+
def run(self, task: str) -> str:
|
|
193
|
+
"""
|
|
194
|
+
Execute the planner-executor workflow synchronously.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
task: The main task to accomplish.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Final response from the planner summarizing completed work.
|
|
201
|
+
"""
|
|
202
|
+
# Invoke the planner with the task
|
|
203
|
+
# The PlanningAgent will:
|
|
204
|
+
# 1. Create a plan using TodoTool
|
|
205
|
+
# 2. Delegate sub-tasks using AgentTool when needed
|
|
206
|
+
# 3. Update todo status as tasks complete
|
|
207
|
+
# 4. Continue until all tasks are done
|
|
208
|
+
|
|
209
|
+
response = self._planner.invoke(task)
|
|
210
|
+
|
|
211
|
+
# Create checkpoint after execution completes
|
|
212
|
+
self._create_checkpoint()
|
|
213
|
+
|
|
214
|
+
# Extract content from response
|
|
215
|
+
if hasattr(response, "content"):
|
|
216
|
+
return str(response.content)
|
|
217
|
+
elif isinstance(response, dict):
|
|
218
|
+
return str(response.get("content", str(response)))
|
|
219
|
+
else:
|
|
220
|
+
return str(response)
|
|
221
|
+
|
|
222
|
+
async def arun(self, task: str) -> str:
|
|
223
|
+
"""
|
|
224
|
+
Execute the planner-executor workflow asynchronously.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
task: The main task to accomplish.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Final response from the planner summarizing completed work.
|
|
231
|
+
"""
|
|
232
|
+
response = await self._planner.ainvoke(task)
|
|
233
|
+
|
|
234
|
+
# Create checkpoint after execution completes
|
|
235
|
+
self._create_checkpoint()
|
|
236
|
+
|
|
237
|
+
if hasattr(response, "content"):
|
|
238
|
+
return str(response.content)
|
|
239
|
+
elif isinstance(response, dict):
|
|
240
|
+
return str(response.get("content", str(response)))
|
|
241
|
+
else:
|
|
242
|
+
return str(response)
|
|
243
|
+
|
|
244
|
+
@property
|
|
245
|
+
def planner(self) -> PlanningAgent:
|
|
246
|
+
"""Get the underlying PlanningAgent instance."""
|
|
247
|
+
return self._planner
|
|
248
|
+
|
|
249
|
+
def reset(self) -> None:
|
|
250
|
+
"""Reset the workflow by rebuilding the planner with fresh memory."""
|
|
251
|
+
self._planner = self._build_planner()
|