emdash-core 0.1.7__py3-none-any.whl → 0.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/__init__.py +6 -1
- emdash_core/agent/__init__.py +4 -0
- emdash_core/agent/events.py +52 -1
- emdash_core/agent/inprocess_subagent.py +123 -10
- emdash_core/agent/prompts/__init__.py +6 -0
- emdash_core/agent/prompts/main_agent.py +53 -3
- emdash_core/agent/prompts/plan_mode.py +255 -0
- emdash_core/agent/prompts/subagents.py +84 -16
- emdash_core/agent/prompts/workflow.py +270 -56
- emdash_core/agent/providers/base.py +4 -0
- emdash_core/agent/providers/factory.py +2 -2
- emdash_core/agent/providers/models.py +7 -0
- emdash_core/agent/providers/openai_provider.py +137 -13
- emdash_core/agent/runner/__init__.py +49 -0
- emdash_core/agent/runner/agent_runner.py +753 -0
- emdash_core/agent/runner/context.py +451 -0
- emdash_core/agent/runner/factory.py +108 -0
- emdash_core/agent/runner/plan.py +217 -0
- emdash_core/agent/runner/sdk_runner.py +324 -0
- emdash_core/agent/runner/utils.py +67 -0
- emdash_core/agent/skills.py +358 -0
- emdash_core/agent/toolkit.py +85 -5
- emdash_core/agent/toolkits/plan.py +9 -11
- emdash_core/agent/tools/__init__.py +3 -2
- emdash_core/agent/tools/coding.py +48 -4
- emdash_core/agent/tools/modes.py +207 -55
- emdash_core/agent/tools/search.py +4 -0
- emdash_core/agent/tools/skill.py +193 -0
- emdash_core/agent/tools/spec.py +61 -94
- emdash_core/agent/tools/task.py +41 -2
- emdash_core/agent/tools/tasks.py +15 -78
- emdash_core/api/agent.py +562 -8
- emdash_core/api/index.py +1 -1
- emdash_core/api/projectmd.py +4 -2
- emdash_core/api/router.py +2 -0
- emdash_core/api/skills.py +241 -0
- emdash_core/checkpoint/__init__.py +40 -0
- emdash_core/checkpoint/cli.py +175 -0
- emdash_core/checkpoint/git_operations.py +250 -0
- emdash_core/checkpoint/manager.py +231 -0
- emdash_core/checkpoint/models.py +107 -0
- emdash_core/checkpoint/storage.py +201 -0
- emdash_core/config.py +1 -1
- emdash_core/core/config.py +18 -2
- emdash_core/graph/schema.py +5 -5
- emdash_core/ingestion/orchestrator.py +19 -10
- emdash_core/models/agent.py +1 -1
- emdash_core/server.py +42 -0
- emdash_core/skills/frontend-design/SKILL.md +56 -0
- emdash_core/sse/stream.py +5 -0
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.33.dist-info}/METADATA +2 -2
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.33.dist-info}/RECORD +54 -37
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.33.dist-info}/entry_points.txt +1 -0
- emdash_core/agent/runner.py +0 -601
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.33.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
"""Context management functions for the agent runner.
|
|
2
|
+
|
|
3
|
+
This module contains functions for estimating, compacting, and managing
|
|
4
|
+
conversation context during agent runs.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional, TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
from ...utils.logger import log
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..toolkit import AgentToolkit
|
|
13
|
+
from ..events import AgentEventEmitter
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def estimate_context_tokens(messages: list[dict], system_prompt: Optional[str] = None) -> int:
|
|
17
|
+
"""Estimate the current context window size in tokens.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
messages: Conversation messages
|
|
21
|
+
system_prompt: Optional system prompt to include in estimation
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
Estimated token count for the context
|
|
25
|
+
"""
|
|
26
|
+
total_chars = 0
|
|
27
|
+
|
|
28
|
+
# Count characters in all messages
|
|
29
|
+
for msg in messages:
|
|
30
|
+
content = msg.get("content", "")
|
|
31
|
+
if isinstance(content, str):
|
|
32
|
+
total_chars += len(content)
|
|
33
|
+
elif isinstance(content, list):
|
|
34
|
+
# Handle multi-part messages (e.g., with images)
|
|
35
|
+
for part in content:
|
|
36
|
+
if isinstance(part, dict) and "text" in part:
|
|
37
|
+
total_chars += len(part["text"])
|
|
38
|
+
|
|
39
|
+
# Add role overhead (~4 tokens per message for role/structure)
|
|
40
|
+
total_chars += 16
|
|
41
|
+
|
|
42
|
+
# Also count system prompt
|
|
43
|
+
if system_prompt:
|
|
44
|
+
total_chars += len(system_prompt)
|
|
45
|
+
|
|
46
|
+
# Estimate: ~4 characters per token
|
|
47
|
+
return total_chars // 4
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def get_context_breakdown(
|
|
51
|
+
messages: list[dict],
|
|
52
|
+
system_prompt: Optional[str] = None,
|
|
53
|
+
) -> tuple[dict, list[dict]]:
|
|
54
|
+
"""Get breakdown of context usage by message type.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
messages: Conversation messages
|
|
58
|
+
system_prompt: Optional system prompt
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
Tuple of (breakdown dict, list of largest messages)
|
|
62
|
+
"""
|
|
63
|
+
breakdown = {
|
|
64
|
+
"system_prompt": len(system_prompt) // 4 if system_prompt else 0,
|
|
65
|
+
"user": 0,
|
|
66
|
+
"assistant": 0,
|
|
67
|
+
"tool_results": 0,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
# Track individual message sizes for finding largest
|
|
71
|
+
message_sizes = []
|
|
72
|
+
|
|
73
|
+
for i, msg in enumerate(messages):
|
|
74
|
+
role = msg.get("role", "unknown")
|
|
75
|
+
content = msg.get("content", "")
|
|
76
|
+
|
|
77
|
+
# Calculate content size
|
|
78
|
+
if isinstance(content, str):
|
|
79
|
+
size = len(content)
|
|
80
|
+
elif isinstance(content, list):
|
|
81
|
+
size = sum(len(p.get("text", "")) for p in content if isinstance(p, dict))
|
|
82
|
+
else:
|
|
83
|
+
size = 0
|
|
84
|
+
|
|
85
|
+
tokens = size // 4
|
|
86
|
+
|
|
87
|
+
# Categorize
|
|
88
|
+
if role == "user":
|
|
89
|
+
breakdown["user"] += tokens
|
|
90
|
+
elif role == "assistant":
|
|
91
|
+
breakdown["assistant"] += tokens
|
|
92
|
+
elif role == "tool":
|
|
93
|
+
breakdown["tool_results"] += tokens
|
|
94
|
+
|
|
95
|
+
# Track for largest messages
|
|
96
|
+
if tokens > 100: # Only track substantial messages
|
|
97
|
+
# Try to get a label for this message
|
|
98
|
+
label = f"{role}[{i}]"
|
|
99
|
+
if role == "tool":
|
|
100
|
+
tool_call_id = msg.get("tool_call_id", "")
|
|
101
|
+
# Try to find the tool name from previous assistant message
|
|
102
|
+
for prev_msg in reversed(messages[:i]):
|
|
103
|
+
if prev_msg.get("role") == "assistant" and "tool_calls" in prev_msg:
|
|
104
|
+
for tc in prev_msg.get("tool_calls", []):
|
|
105
|
+
if tc.get("id") == tool_call_id:
|
|
106
|
+
label = tc.get("function", {}).get("name", "tool")
|
|
107
|
+
break
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
message_sizes.append({
|
|
111
|
+
"index": i,
|
|
112
|
+
"role": role,
|
|
113
|
+
"label": label,
|
|
114
|
+
"tokens": tokens,
|
|
115
|
+
"preview": content[:100] if isinstance(content, str) else str(content)[:100],
|
|
116
|
+
})
|
|
117
|
+
|
|
118
|
+
# Sort by size and get top 5
|
|
119
|
+
message_sizes.sort(key=lambda x: x["tokens"], reverse=True)
|
|
120
|
+
largest = message_sizes[:5]
|
|
121
|
+
|
|
122
|
+
return breakdown, largest
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def maybe_compact_context(
|
|
126
|
+
messages: list[dict],
|
|
127
|
+
provider: object,
|
|
128
|
+
emitter: "AgentEventEmitter",
|
|
129
|
+
system_prompt: Optional[str] = None,
|
|
130
|
+
threshold: float = 0.8,
|
|
131
|
+
) -> list[dict]:
|
|
132
|
+
"""Proactively compact context if approaching limit.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
messages: Current conversation messages
|
|
136
|
+
provider: LLM provider instance
|
|
137
|
+
emitter: Event emitter for notifications
|
|
138
|
+
system_prompt: System prompt for token estimation
|
|
139
|
+
threshold: Trigger compaction at this % of context limit (default 80%)
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Original or compacted messages
|
|
143
|
+
"""
|
|
144
|
+
context_tokens = estimate_context_tokens(messages, system_prompt)
|
|
145
|
+
context_limit = provider.get_context_limit()
|
|
146
|
+
|
|
147
|
+
# Check if we need to compact
|
|
148
|
+
if context_tokens < context_limit * threshold:
|
|
149
|
+
return messages # No compaction needed
|
|
150
|
+
|
|
151
|
+
log.info(
|
|
152
|
+
f"Context at {context_tokens:,}/{context_limit:,} tokens "
|
|
153
|
+
f"({context_tokens/context_limit:.0%}), compacting..."
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return compact_messages_with_llm(
|
|
157
|
+
messages, emitter, target_tokens=int(context_limit * 0.5)
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def compact_messages_with_llm(
|
|
162
|
+
messages: list[dict],
|
|
163
|
+
emitter: "AgentEventEmitter",
|
|
164
|
+
target_tokens: int,
|
|
165
|
+
) -> list[dict]:
|
|
166
|
+
"""Use fast LLM to summarize middle messages.
|
|
167
|
+
|
|
168
|
+
Preserves:
|
|
169
|
+
- First message (original user request)
|
|
170
|
+
- Last 4 messages (recent context)
|
|
171
|
+
- Summarizes everything in between
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
messages: Current conversation messages
|
|
175
|
+
emitter: Event emitter for notifications
|
|
176
|
+
target_tokens: Target token count after compaction
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Compacted messages list
|
|
180
|
+
"""
|
|
181
|
+
from ..subagent import get_model_for_tier
|
|
182
|
+
from ..providers import get_provider
|
|
183
|
+
|
|
184
|
+
if len(messages) <= 5:
|
|
185
|
+
return messages # Too few to compact
|
|
186
|
+
|
|
187
|
+
# Split messages
|
|
188
|
+
first_msg = messages[0]
|
|
189
|
+
recent_msgs = messages[-4:]
|
|
190
|
+
middle_msgs = messages[1:-4]
|
|
191
|
+
|
|
192
|
+
if not middle_msgs:
|
|
193
|
+
return messages
|
|
194
|
+
|
|
195
|
+
# Build summary prompt
|
|
196
|
+
middle_content = format_messages_for_summary(middle_msgs)
|
|
197
|
+
|
|
198
|
+
prompt = f"""Summarize this conversation history concisely.
|
|
199
|
+
|
|
200
|
+
PRESERVE (include verbatim if present):
|
|
201
|
+
- Code snippets and file paths
|
|
202
|
+
- Error messages
|
|
203
|
+
- Key decisions made
|
|
204
|
+
- Important tool results (file contents, search results)
|
|
205
|
+
|
|
206
|
+
CONDENSE:
|
|
207
|
+
- Repetitive searches
|
|
208
|
+
- Verbose tool outputs
|
|
209
|
+
- Intermediate reasoning
|
|
210
|
+
|
|
211
|
+
CONVERSATION HISTORY:
|
|
212
|
+
{middle_content}
|
|
213
|
+
|
|
214
|
+
OUTPUT FORMAT:
|
|
215
|
+
Provide a concise summary (max 2000 tokens) that captures the essential context needed to continue this task."""
|
|
216
|
+
|
|
217
|
+
# Use fast model for summarization
|
|
218
|
+
fast_model = get_model_for_tier("fast")
|
|
219
|
+
fast_provider = get_provider(fast_model)
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
emitter.emit_thinking("Compacting context with fast model...")
|
|
223
|
+
|
|
224
|
+
response = fast_provider.chat(
|
|
225
|
+
messages=[{"role": "user", "content": prompt}],
|
|
226
|
+
system="You are a context summarizer. Be concise but preserve code and technical details.",
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
summary = response.content or ""
|
|
230
|
+
|
|
231
|
+
log.info(
|
|
232
|
+
f"Compacted {len(middle_msgs)} messages into summary "
|
|
233
|
+
f"({len(summary)} chars)"
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Build compacted messages
|
|
237
|
+
return [
|
|
238
|
+
first_msg,
|
|
239
|
+
{
|
|
240
|
+
"role": "assistant",
|
|
241
|
+
"content": f"[Context Summary]\n{summary}\n[End Summary]",
|
|
242
|
+
},
|
|
243
|
+
*recent_msgs,
|
|
244
|
+
]
|
|
245
|
+
except Exception as e:
|
|
246
|
+
log.warning(f"LLM compaction failed: {e}, falling back to truncation")
|
|
247
|
+
return [first_msg] + recent_msgs
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def format_messages_for_summary(messages: list[dict]) -> str:
|
|
251
|
+
"""Format messages for summarization prompt.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
messages: Messages to format
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Formatted string for summarization
|
|
258
|
+
"""
|
|
259
|
+
parts = []
|
|
260
|
+
for msg in messages:
|
|
261
|
+
role = msg.get("role", "unknown")
|
|
262
|
+
content = msg.get("content", "")
|
|
263
|
+
|
|
264
|
+
# Handle tool calls in assistant messages
|
|
265
|
+
if role == "assistant" and "tool_calls" in msg:
|
|
266
|
+
tool_calls = msg.get("tool_calls", [])
|
|
267
|
+
tool_info = [
|
|
268
|
+
f"Called: {tc.get('function', {}).get('name', 'unknown')}"
|
|
269
|
+
for tc in tool_calls
|
|
270
|
+
]
|
|
271
|
+
content = f"{content}\n[Tools: {', '.join(tool_info)}]" if content else f"[Tools: {', '.join(tool_info)}]"
|
|
272
|
+
|
|
273
|
+
# Truncate very long content
|
|
274
|
+
if len(content) > 4000:
|
|
275
|
+
content = content[:4000] + "\n[...truncated...]"
|
|
276
|
+
|
|
277
|
+
parts.append(f"[{role.upper()}]\n{content}")
|
|
278
|
+
|
|
279
|
+
return "\n\n---\n\n".join(parts)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def get_reranked_context(
|
|
283
|
+
toolkit: "AgentToolkit",
|
|
284
|
+
current_query: str,
|
|
285
|
+
) -> dict:
|
|
286
|
+
"""Get reranked context items based on the current query.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
toolkit: Agent toolkit instance
|
|
290
|
+
current_query: Current query for relevance ranking
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Dict with item_count and items list
|
|
294
|
+
"""
|
|
295
|
+
try:
|
|
296
|
+
from ...context.service import ContextService
|
|
297
|
+
from ...context.reranker import rerank_context_items
|
|
298
|
+
|
|
299
|
+
# Get exploration steps for context extraction
|
|
300
|
+
steps = toolkit.get_exploration_steps()
|
|
301
|
+
if not steps:
|
|
302
|
+
return {"item_count": 0, "items": []}
|
|
303
|
+
|
|
304
|
+
# Use context service to extract context items from exploration
|
|
305
|
+
service = ContextService(connection=toolkit.connection)
|
|
306
|
+
terminal_id = service.get_terminal_id()
|
|
307
|
+
|
|
308
|
+
# Update context with exploration steps
|
|
309
|
+
service.update_context(
|
|
310
|
+
terminal_id=terminal_id,
|
|
311
|
+
exploration_steps=steps,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
# Get context items
|
|
315
|
+
items = service.get_context_items(terminal_id)
|
|
316
|
+
if not items:
|
|
317
|
+
return {"item_count": 0, "items": []}
|
|
318
|
+
|
|
319
|
+
# Rerank by query relevance
|
|
320
|
+
if current_query:
|
|
321
|
+
items = rerank_context_items(
|
|
322
|
+
items,
|
|
323
|
+
current_query,
|
|
324
|
+
top_k=20,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Convert to serializable format
|
|
328
|
+
result_items = []
|
|
329
|
+
for item in items[:20]: # Limit to 20 items
|
|
330
|
+
result_items.append({
|
|
331
|
+
"name": item.qualified_name,
|
|
332
|
+
"type": item.entity_type,
|
|
333
|
+
"file": item.file_path,
|
|
334
|
+
"score": round(item.score, 3) if hasattr(item, 'score') else None,
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
return {
|
|
338
|
+
"item_count": len(result_items),
|
|
339
|
+
"items": result_items,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
except Exception as e:
|
|
343
|
+
log.debug(f"Failed to get reranked context: {e}")
|
|
344
|
+
return {"item_count": 0, "items": []}
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def emit_context_frame(
|
|
348
|
+
toolkit: "AgentToolkit",
|
|
349
|
+
emitter: "AgentEventEmitter",
|
|
350
|
+
messages: list[dict],
|
|
351
|
+
system_prompt: Optional[str],
|
|
352
|
+
current_query: str,
|
|
353
|
+
total_input_tokens: int,
|
|
354
|
+
total_output_tokens: int,
|
|
355
|
+
) -> None:
|
|
356
|
+
"""Emit a context frame event with current exploration state.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
toolkit: Agent toolkit instance
|
|
360
|
+
emitter: Event emitter
|
|
361
|
+
messages: Current conversation messages
|
|
362
|
+
system_prompt: System prompt for estimation
|
|
363
|
+
current_query: Current query for reranking
|
|
364
|
+
total_input_tokens: Total input tokens used
|
|
365
|
+
total_output_tokens: Total output tokens used
|
|
366
|
+
"""
|
|
367
|
+
# Get exploration steps from toolkit session
|
|
368
|
+
steps = toolkit.get_exploration_steps()
|
|
369
|
+
|
|
370
|
+
# Estimate current context window tokens and get breakdown
|
|
371
|
+
context_tokens = 0
|
|
372
|
+
context_breakdown = {}
|
|
373
|
+
largest_messages = []
|
|
374
|
+
if messages:
|
|
375
|
+
context_tokens = estimate_context_tokens(messages, system_prompt)
|
|
376
|
+
context_breakdown, largest_messages = get_context_breakdown(messages, system_prompt)
|
|
377
|
+
|
|
378
|
+
# Summarize exploration by tool
|
|
379
|
+
tool_counts: dict[str, int] = {}
|
|
380
|
+
entities_found = 0
|
|
381
|
+
step_details: list[dict] = []
|
|
382
|
+
|
|
383
|
+
for step in steps:
|
|
384
|
+
tool_name = getattr(step, 'tool', 'unknown')
|
|
385
|
+
tool_counts[tool_name] = tool_counts.get(tool_name, 0) + 1
|
|
386
|
+
|
|
387
|
+
# Count entities from the step
|
|
388
|
+
step_entities = getattr(step, 'entities_found', [])
|
|
389
|
+
entities_found += len(step_entities)
|
|
390
|
+
|
|
391
|
+
# Collect step details
|
|
392
|
+
params = getattr(step, 'params', {})
|
|
393
|
+
summary = getattr(step, 'result_summary', '')
|
|
394
|
+
|
|
395
|
+
# Extract meaningful info based on tool type
|
|
396
|
+
detail = {
|
|
397
|
+
"tool": tool_name,
|
|
398
|
+
"summary": summary,
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
# Add relevant params based on tool
|
|
402
|
+
if tool_name == 'read_file' and 'file_path' in params:
|
|
403
|
+
detail["file"] = params['file_path']
|
|
404
|
+
elif tool_name == 'read_file' and 'path' in params:
|
|
405
|
+
detail["file"] = params['path']
|
|
406
|
+
elif tool_name in ('grep', 'semantic_search') and 'query' in params:
|
|
407
|
+
detail["query"] = params['query']
|
|
408
|
+
elif tool_name == 'glob' and 'pattern' in params:
|
|
409
|
+
detail["pattern"] = params['pattern']
|
|
410
|
+
elif tool_name == 'list_files' and 'path' in params:
|
|
411
|
+
detail["path"] = params['path']
|
|
412
|
+
|
|
413
|
+
# Add content preview if available
|
|
414
|
+
content_preview = getattr(step, 'content_preview', None)
|
|
415
|
+
if content_preview:
|
|
416
|
+
detail["content_preview"] = content_preview
|
|
417
|
+
|
|
418
|
+
# Add token count if available
|
|
419
|
+
token_count = getattr(step, 'token_count', 0)
|
|
420
|
+
if token_count > 0:
|
|
421
|
+
detail["tokens"] = token_count
|
|
422
|
+
|
|
423
|
+
# Add entities if any
|
|
424
|
+
if step_entities:
|
|
425
|
+
detail["entities"] = step_entities[:5] # Limit to 5
|
|
426
|
+
|
|
427
|
+
step_details.append(detail)
|
|
428
|
+
|
|
429
|
+
exploration_steps = [
|
|
430
|
+
{"tool": tool, "count": count}
|
|
431
|
+
for tool, count in tool_counts.items()
|
|
432
|
+
]
|
|
433
|
+
|
|
434
|
+
# Build context frame data
|
|
435
|
+
adding = {
|
|
436
|
+
"exploration_steps": exploration_steps,
|
|
437
|
+
"entities_found": entities_found,
|
|
438
|
+
"step_count": len(steps),
|
|
439
|
+
"details": step_details[-20:], # Last 20 steps
|
|
440
|
+
"input_tokens": total_input_tokens,
|
|
441
|
+
"output_tokens": total_output_tokens,
|
|
442
|
+
"context_tokens": context_tokens, # Current context window size
|
|
443
|
+
"context_breakdown": context_breakdown, # Tokens by message type
|
|
444
|
+
"largest_messages": largest_messages, # Top 5 biggest messages
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
# Get reranked context items
|
|
448
|
+
reading = get_reranked_context(toolkit, current_query)
|
|
449
|
+
|
|
450
|
+
# Emit the context frame
|
|
451
|
+
emitter.emit_context_frame(adding=adding, reading=reading)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""Runner factory for hybrid SDK/Provider routing.
|
|
2
|
+
|
|
3
|
+
This module provides factory functions to create the appropriate runner
|
|
4
|
+
based on the model type:
|
|
5
|
+
- Claude models → SDKAgentRunner (uses Anthropic Agent SDK)
|
|
6
|
+
- Other models → AgentRunner (uses OpenAI-compatible API)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional, Union, TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
from .sdk_runner import SDKAgentRunner, is_claude_model
|
|
13
|
+
from .agent_runner import AgentRunner
|
|
14
|
+
from ...utils.logger import log
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from ..events import AgentEventEmitter
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_runner(
|
|
21
|
+
model: str,
|
|
22
|
+
cwd: Optional[str] = None,
|
|
23
|
+
emitter: Optional["AgentEventEmitter"] = None,
|
|
24
|
+
system_prompt: Optional[str] = None,
|
|
25
|
+
plan_mode: bool = False,
|
|
26
|
+
prefer_sdk: bool = True,
|
|
27
|
+
**kwargs,
|
|
28
|
+
) -> Union[SDKAgentRunner, AgentRunner]:
|
|
29
|
+
"""Get the appropriate runner for the model.
|
|
30
|
+
|
|
31
|
+
Routes to SDKAgentRunner for Claude models (uses Anthropic Agent SDK),
|
|
32
|
+
or AgentRunner for other models (uses OpenAI-compatible API).
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
model: Model string (e.g., "claude-sonnet-4", "fireworks:minimax-m2p1")
|
|
36
|
+
cwd: Working directory
|
|
37
|
+
emitter: Event emitter for streaming
|
|
38
|
+
system_prompt: Custom system prompt
|
|
39
|
+
plan_mode: If True, restrict to read-only tools
|
|
40
|
+
prefer_sdk: If True, prefer SDK for Claude models (default True)
|
|
41
|
+
**kwargs: Additional arguments passed to runner
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
SDKAgentRunner or AgentRunner instance
|
|
45
|
+
|
|
46
|
+
Example:
|
|
47
|
+
# Claude model → uses SDK
|
|
48
|
+
runner = get_runner("claude-sonnet-4")
|
|
49
|
+
|
|
50
|
+
# Fireworks model → uses standard provider
|
|
51
|
+
runner = get_runner("fireworks:accounts/fireworks/models/minimax-m2p1")
|
|
52
|
+
|
|
53
|
+
# Force standard provider even for Claude
|
|
54
|
+
runner = get_runner("claude-sonnet-4", prefer_sdk=False)
|
|
55
|
+
"""
|
|
56
|
+
use_sdk = prefer_sdk and is_claude_model(model)
|
|
57
|
+
|
|
58
|
+
if use_sdk:
|
|
59
|
+
log.info(f"Using SDKAgentRunner for Claude model: {model}")
|
|
60
|
+
return SDKAgentRunner(
|
|
61
|
+
model=model,
|
|
62
|
+
cwd=cwd,
|
|
63
|
+
emitter=emitter,
|
|
64
|
+
system_prompt=system_prompt,
|
|
65
|
+
plan_mode=plan_mode,
|
|
66
|
+
)
|
|
67
|
+
else:
|
|
68
|
+
log.info(f"Using AgentRunner for model: {model}")
|
|
69
|
+
# Import toolkit here to avoid circular imports
|
|
70
|
+
from ..toolkit import AgentToolkit
|
|
71
|
+
|
|
72
|
+
# Generate plan file path when in plan mode
|
|
73
|
+
plan_file_path = None
|
|
74
|
+
repo_root = Path(cwd) if cwd else Path.cwd()
|
|
75
|
+
if plan_mode:
|
|
76
|
+
plan_file_path = str(repo_root / ".emdash" / "plan.md")
|
|
77
|
+
# Ensure .emdash directory exists
|
|
78
|
+
(repo_root / ".emdash").mkdir(exist_ok=True)
|
|
79
|
+
|
|
80
|
+
toolkit = AgentToolkit(
|
|
81
|
+
repo_root=repo_root,
|
|
82
|
+
plan_mode=plan_mode,
|
|
83
|
+
plan_file_path=plan_file_path,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
return AgentRunner(
|
|
87
|
+
toolkit=toolkit,
|
|
88
|
+
model=model,
|
|
89
|
+
emitter=emitter,
|
|
90
|
+
system_prompt=system_prompt,
|
|
91
|
+
**kwargs,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def create_hybrid_runner(
|
|
96
|
+
model: str,
|
|
97
|
+
**kwargs,
|
|
98
|
+
) -> Union[SDKAgentRunner, AgentRunner]:
|
|
99
|
+
"""Convenience alias for get_runner.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
model: Model string
|
|
103
|
+
**kwargs: Passed to get_runner
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Appropriate runner instance
|
|
107
|
+
"""
|
|
108
|
+
return get_runner(model, **kwargs)
|