kader 0.1.5__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli/app.py +98 -61
- cli/app.tcss +27 -382
- cli/utils.py +1 -6
- cli/widgets/conversation.py +50 -4
- kader/__init__.py +2 -0
- kader/agent/agents.py +8 -0
- kader/agent/base.py +68 -5
- kader/memory/types.py +60 -0
- kader/prompts/__init__.py +9 -1
- kader/prompts/agent_prompts.py +28 -0
- kader/prompts/templates/executor_agent.j2 +70 -0
- kader/prompts/templates/kader_planner.j2 +71 -0
- kader/providers/ollama.py +2 -2
- kader/tools/__init__.py +26 -0
- kader/tools/agent.py +452 -0
- kader/tools/filesys.py +1 -1
- kader/tools/todo.py +43 -2
- kader/utils/__init__.py +10 -0
- kader/utils/checkpointer.py +371 -0
- kader/utils/context_aggregator.py +347 -0
- kader/workflows/__init__.py +13 -0
- kader/workflows/base.py +71 -0
- kader/workflows/planner_executor.py +251 -0
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/METADATA +38 -1
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/RECORD +27 -18
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/WHEEL +0 -0
- {kader-0.1.5.dist-info → kader-1.0.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Checkpointer module for generating step-by-step summaries of agent memory.
|
|
3
|
+
|
|
4
|
+
Uses OllamaProvider to analyze conversation history and produce
|
|
5
|
+
human-readable markdown summaries.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from kader.memory.types import (
|
|
12
|
+
aload_json,
|
|
13
|
+
aread_text,
|
|
14
|
+
awrite_text,
|
|
15
|
+
get_default_memory_dir,
|
|
16
|
+
load_json,
|
|
17
|
+
)
|
|
18
|
+
from kader.providers.base import Message
|
|
19
|
+
from kader.providers.ollama import OllamaProvider
|
|
20
|
+
|
|
21
|
+
CHECKPOINT_SYSTEM_PROMPT = """You are an assistant that summarizes agent conversation histories.
|
|
22
|
+
Given a conversation between a user and an AI agent, create a structured summary in markdown format.
|
|
23
|
+
|
|
24
|
+
Your summary MUST include the following sections:
|
|
25
|
+
|
|
26
|
+
## Directory Structure
|
|
27
|
+
List the directory structure of any files/folders created or modified during the conversation.
|
|
28
|
+
Use a tree-like format:
|
|
29
|
+
```
|
|
30
|
+
project/
|
|
31
|
+
├── src/
|
|
32
|
+
│ └── main.py
|
|
33
|
+
└── README.md
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Actions Performed
|
|
37
|
+
Summarize the main accomplishments and significant actions taken by the agent.
|
|
38
|
+
Focus on high-level outcomes, not individual steps. For example:
|
|
39
|
+
- "Implemented user authentication module with login/logout functionality"
|
|
40
|
+
- "Fixed database connection issues and added retry logic"
|
|
41
|
+
- "Created REST API endpoints for user management"
|
|
42
|
+
|
|
43
|
+
Do NOT list every single action (like reading files, running commands, etc.).
|
|
44
|
+
Only mention the meaningful outcomes and key decisions.
|
|
45
|
+
|
|
46
|
+
If a section has no relevant content, write "None" under that section.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Checkpointer:
|
|
51
|
+
"""
|
|
52
|
+
Generates step-by-step markdown summaries of agent memory.
|
|
53
|
+
|
|
54
|
+
Uses OllamaProvider to analyze conversation history from memory files
|
|
55
|
+
and produce human-readable checkpoint summaries.
|
|
56
|
+
|
|
57
|
+
Example:
|
|
58
|
+
checkpointer = Checkpointer()
|
|
59
|
+
md_path = checkpointer.generate_checkpoint("session-id/conversation.json")
|
|
60
|
+
print(f"Checkpoint saved to: {md_path}")
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
model: str = "gpt-oss:120b-cloud",
|
|
66
|
+
host: str | None = None,
|
|
67
|
+
) -> None:
|
|
68
|
+
"""
|
|
69
|
+
Initialize the Checkpointer.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
model: Ollama model identifier (default: "gpt-oss:120b-cloud")
|
|
73
|
+
host: Optional Ollama server host
|
|
74
|
+
"""
|
|
75
|
+
self._provider = OllamaProvider(model=model, host=host)
|
|
76
|
+
|
|
77
|
+
def _load_memory(self, memory_path: Path) -> dict[str, Any]:
|
|
78
|
+
"""
|
|
79
|
+
Load memory JSON from the specified path.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
memory_path: Absolute path to the memory JSON file
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Dictionary containing the memory data
|
|
86
|
+
|
|
87
|
+
Raises:
|
|
88
|
+
FileNotFoundError: If the memory file doesn't exist
|
|
89
|
+
"""
|
|
90
|
+
if not memory_path.exists():
|
|
91
|
+
raise FileNotFoundError(f"Memory file not found: {memory_path}")
|
|
92
|
+
|
|
93
|
+
return load_json(memory_path)
|
|
94
|
+
|
|
95
|
+
def _extract_messages(self, memory_data: dict[str, Any]) -> list[dict[str, Any]]:
|
|
96
|
+
"""
|
|
97
|
+
Extract messages from memory data.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
memory_data: Dictionary containing memory data
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
List of message dictionaries
|
|
104
|
+
"""
|
|
105
|
+
# Handle different memory formats
|
|
106
|
+
if "messages" in memory_data:
|
|
107
|
+
# Standard conversation format
|
|
108
|
+
messages = memory_data["messages"]
|
|
109
|
+
# Extract inner message if wrapped in ConversationMessage format
|
|
110
|
+
return [
|
|
111
|
+
msg.get("message", msg) if isinstance(msg, dict) else msg
|
|
112
|
+
for msg in messages
|
|
113
|
+
]
|
|
114
|
+
elif "conversation" in memory_data:
|
|
115
|
+
# Alternative format
|
|
116
|
+
return memory_data["conversation"]
|
|
117
|
+
else:
|
|
118
|
+
# Return empty if no known format
|
|
119
|
+
return []
|
|
120
|
+
|
|
121
|
+
def _format_conversation_for_prompt(self, messages: list[dict[str, Any]]) -> str:
|
|
122
|
+
"""
|
|
123
|
+
Format messages into a readable string for the LLM prompt.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
messages: List of message dictionaries
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Formatted string representation of the conversation
|
|
130
|
+
"""
|
|
131
|
+
lines = []
|
|
132
|
+
for i, msg in enumerate(messages, 1):
|
|
133
|
+
role = msg.get("role", "unknown").upper()
|
|
134
|
+
content = msg.get("content", "")
|
|
135
|
+
|
|
136
|
+
# Handle tool calls
|
|
137
|
+
tool_calls = msg.get("tool_calls", [])
|
|
138
|
+
if tool_calls:
|
|
139
|
+
lines.append(f"[{i}] {role}: (calling tools)")
|
|
140
|
+
for tc in tool_calls:
|
|
141
|
+
func = tc.get("function", {})
|
|
142
|
+
name = func.get("name", "unknown")
|
|
143
|
+
args = func.get("arguments", {})
|
|
144
|
+
lines.append(f" -> Tool: {name}")
|
|
145
|
+
lines.append(f" Args: {args}")
|
|
146
|
+
elif content:
|
|
147
|
+
# Truncate very long content
|
|
148
|
+
if len(content) > 1000:
|
|
149
|
+
content = content[:1000] + "... [truncated]"
|
|
150
|
+
lines.append(f"[{i}] {role}: {content}")
|
|
151
|
+
|
|
152
|
+
# Handle tool call ID (tool results)
|
|
153
|
+
tool_call_id = msg.get("tool_call_id")
|
|
154
|
+
if tool_call_id:
|
|
155
|
+
lines.append(f" (tool result for: {tool_call_id})")
|
|
156
|
+
|
|
157
|
+
return "\n".join(lines)
|
|
158
|
+
|
|
159
|
+
def _generate_summary(
|
|
160
|
+
self, conversation_text: str, existing_checkpoint: str | None = None
|
|
161
|
+
) -> str:
|
|
162
|
+
"""
|
|
163
|
+
Generate a step-by-step summary using the LLM (synchronous).
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
conversation_text: Formatted conversation text
|
|
167
|
+
existing_checkpoint: Existing checkpoint content to update, if any
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Markdown summary of the conversation
|
|
171
|
+
"""
|
|
172
|
+
if existing_checkpoint:
|
|
173
|
+
user_prompt = f"""Here is the existing checkpoint from previous iterations:
|
|
174
|
+
|
|
175
|
+
---
|
|
176
|
+
{existing_checkpoint}
|
|
177
|
+
---
|
|
178
|
+
|
|
179
|
+
Here is the new conversation to incorporate:
|
|
180
|
+
|
|
181
|
+
---
|
|
182
|
+
{conversation_text}
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
Update the existing checkpoint by incorporating the new information. Merge new items into the existing sections.
|
|
186
|
+
Keep all previously documented content and add new content from this iteration."""
|
|
187
|
+
else:
|
|
188
|
+
user_prompt = f"""Please analyze this agent conversation and create a checkpoint summary:
|
|
189
|
+
|
|
190
|
+
---
|
|
191
|
+
{conversation_text}
|
|
192
|
+
---
|
|
193
|
+
|
|
194
|
+
Create a structured summary following the format specified."""
|
|
195
|
+
|
|
196
|
+
messages = [
|
|
197
|
+
Message.system(CHECKPOINT_SYSTEM_PROMPT),
|
|
198
|
+
Message.user(user_prompt),
|
|
199
|
+
]
|
|
200
|
+
|
|
201
|
+
response = self._provider.invoke(messages)
|
|
202
|
+
return response.content
|
|
203
|
+
|
|
204
|
+
async def _agenerate_summary(
|
|
205
|
+
self, conversation_text: str, existing_checkpoint: str | None = None
|
|
206
|
+
) -> str:
|
|
207
|
+
"""
|
|
208
|
+
Generate a step-by-step summary using the LLM (asynchronous).
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
conversation_text: Formatted conversation text
|
|
212
|
+
existing_checkpoint: Existing checkpoint content to update, if any
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Markdown summary of the conversation
|
|
216
|
+
"""
|
|
217
|
+
if existing_checkpoint:
|
|
218
|
+
user_prompt = f"""Here is the existing checkpoint from previous iterations:
|
|
219
|
+
|
|
220
|
+
---
|
|
221
|
+
{existing_checkpoint}
|
|
222
|
+
---
|
|
223
|
+
|
|
224
|
+
Here is the new conversation to incorporate:
|
|
225
|
+
|
|
226
|
+
---
|
|
227
|
+
{conversation_text}
|
|
228
|
+
---
|
|
229
|
+
|
|
230
|
+
Update the existing checkpoint by incorporating the new information. Merge new items into the existing sections.
|
|
231
|
+
Keep all previously documented content and add new content from this iteration."""
|
|
232
|
+
else:
|
|
233
|
+
user_prompt = f"""Please analyze this agent conversation and create a checkpoint summary:
|
|
234
|
+
|
|
235
|
+
---
|
|
236
|
+
{conversation_text}
|
|
237
|
+
---
|
|
238
|
+
|
|
239
|
+
Create a structured summary following the format specified."""
|
|
240
|
+
|
|
241
|
+
messages = [
|
|
242
|
+
Message.system(CHECKPOINT_SYSTEM_PROMPT),
|
|
243
|
+
Message.user(user_prompt),
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
response = await self._provider.ainvoke(messages)
|
|
247
|
+
return response.content
|
|
248
|
+
|
|
249
|
+
def _load_existing_checkpoint(self, checkpoint_path: Path) -> str | None:
|
|
250
|
+
"""
|
|
251
|
+
Load existing checkpoint content if it exists.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
checkpoint_path: Path to the checkpoint file
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Checkpoint content if exists, None otherwise
|
|
258
|
+
"""
|
|
259
|
+
if checkpoint_path.exists():
|
|
260
|
+
try:
|
|
261
|
+
return checkpoint_path.read_text(encoding="utf-8")
|
|
262
|
+
except Exception:
|
|
263
|
+
return None
|
|
264
|
+
return None
|
|
265
|
+
|
|
266
|
+
async def _aload_existing_checkpoint(self, checkpoint_path: Path) -> str | None:
|
|
267
|
+
"""
|
|
268
|
+
Asynchronously load existing checkpoint content if it exists.
|
|
269
|
+
|
|
270
|
+
Args:
|
|
271
|
+
checkpoint_path: Path to the checkpoint file
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
Checkpoint content if exists, None otherwise
|
|
275
|
+
"""
|
|
276
|
+
if checkpoint_path.exists():
|
|
277
|
+
try:
|
|
278
|
+
return await aread_text(checkpoint_path)
|
|
279
|
+
except Exception:
|
|
280
|
+
return None
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
def generate_checkpoint(self, memory_path: str) -> str:
|
|
284
|
+
"""
|
|
285
|
+
Generate a checkpoint markdown file from an agent's memory (synchronous).
|
|
286
|
+
|
|
287
|
+
If a checkpoint already exists, it will be updated instead of overwritten.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
memory_path: Relative path within ~/.kader/memory/sessions/
|
|
291
|
+
(e.g., "session-id/conversation.json")
|
|
292
|
+
Or absolute path to the memory JSON file.
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
Absolute path to the generated markdown file
|
|
296
|
+
|
|
297
|
+
Raises:
|
|
298
|
+
FileNotFoundError: If the memory file doesn't exist
|
|
299
|
+
ValueError: If no messages found in memory
|
|
300
|
+
"""
|
|
301
|
+
# Resolve path
|
|
302
|
+
path = Path(memory_path)
|
|
303
|
+
if not path.is_absolute():
|
|
304
|
+
base_dir = get_default_memory_dir() / "sessions"
|
|
305
|
+
path = base_dir / memory_path
|
|
306
|
+
|
|
307
|
+
# Load and parse memory
|
|
308
|
+
memory_data = self._load_memory(path)
|
|
309
|
+
messages = self._extract_messages(memory_data)
|
|
310
|
+
|
|
311
|
+
if not messages:
|
|
312
|
+
raise ValueError(f"No messages found in memory file: {path}")
|
|
313
|
+
|
|
314
|
+
# Check for existing checkpoint
|
|
315
|
+
checkpoint_path = path.parent / "checkpoint.md"
|
|
316
|
+
existing_checkpoint = self._load_existing_checkpoint(checkpoint_path)
|
|
317
|
+
|
|
318
|
+
# Format and generate summary
|
|
319
|
+
conversation_text = self._format_conversation_for_prompt(messages)
|
|
320
|
+
summary = self._generate_summary(conversation_text, existing_checkpoint)
|
|
321
|
+
|
|
322
|
+
# Save checkpoint markdown
|
|
323
|
+
checkpoint_path.write_text(summary, encoding="utf-8")
|
|
324
|
+
|
|
325
|
+
return str(checkpoint_path)
|
|
326
|
+
|
|
327
|
+
async def agenerate_checkpoint(self, memory_path: str) -> str:
|
|
328
|
+
"""
|
|
329
|
+
Generate a checkpoint markdown file from an agent's memory (asynchronous).
|
|
330
|
+
|
|
331
|
+
If a checkpoint already exists, it will be updated instead of overwritten.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
memory_path: Relative path within ~/.kader/memory/sessions/
|
|
335
|
+
(e.g., "session-id/conversation.json")
|
|
336
|
+
Or absolute path to the memory JSON file.
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
Absolute path to the generated markdown file
|
|
340
|
+
|
|
341
|
+
Raises:
|
|
342
|
+
FileNotFoundError: If the memory file doesn't exist
|
|
343
|
+
ValueError: If no messages found in memory
|
|
344
|
+
"""
|
|
345
|
+
# Resolve path
|
|
346
|
+
path = Path(memory_path)
|
|
347
|
+
if not path.is_absolute():
|
|
348
|
+
base_dir = get_default_memory_dir() / "sessions"
|
|
349
|
+
path = base_dir / memory_path
|
|
350
|
+
|
|
351
|
+
# Load and parse memory (async)
|
|
352
|
+
if not path.exists():
|
|
353
|
+
raise FileNotFoundError(f"Memory file not found: {path}")
|
|
354
|
+
memory_data = await aload_json(path)
|
|
355
|
+
messages = self._extract_messages(memory_data)
|
|
356
|
+
|
|
357
|
+
if not messages:
|
|
358
|
+
raise ValueError(f"No messages found in memory file: {path}")
|
|
359
|
+
|
|
360
|
+
# Check for existing checkpoint (async)
|
|
361
|
+
checkpoint_path = path.parent / "checkpoint.md"
|
|
362
|
+
existing_checkpoint = await self._aload_existing_checkpoint(checkpoint_path)
|
|
363
|
+
|
|
364
|
+
# Format and generate summary
|
|
365
|
+
conversation_text = self._format_conversation_for_prompt(messages)
|
|
366
|
+
summary = await self._agenerate_summary(conversation_text, existing_checkpoint)
|
|
367
|
+
|
|
368
|
+
# Save checkpoint markdown (async)
|
|
369
|
+
await awrite_text(checkpoint_path, summary)
|
|
370
|
+
|
|
371
|
+
return str(checkpoint_path)
|