emdash-core 0.1.7__py3-none-any.whl → 0.1.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/__init__.py +6 -1
- emdash_core/agent/events.py +29 -0
- emdash_core/agent/prompts/__init__.py +5 -0
- emdash_core/agent/prompts/main_agent.py +22 -2
- emdash_core/agent/prompts/plan_mode.py +126 -0
- emdash_core/agent/prompts/subagents.py +11 -7
- emdash_core/agent/prompts/workflow.py +138 -43
- emdash_core/agent/providers/base.py +4 -0
- emdash_core/agent/providers/models.py +7 -0
- emdash_core/agent/providers/openai_provider.py +74 -2
- emdash_core/agent/runner.py +556 -34
- emdash_core/agent/skills.py +319 -0
- emdash_core/agent/toolkit.py +48 -0
- emdash_core/agent/tools/__init__.py +3 -2
- emdash_core/agent/tools/modes.py +197 -53
- emdash_core/agent/tools/search.py +4 -0
- emdash_core/agent/tools/skill.py +193 -0
- emdash_core/agent/tools/spec.py +61 -94
- emdash_core/agent/tools/tasks.py +15 -78
- emdash_core/api/agent.py +7 -7
- emdash_core/api/index.py +1 -1
- emdash_core/api/projectmd.py +4 -2
- emdash_core/api/router.py +2 -0
- emdash_core/api/skills.py +241 -0
- emdash_core/checkpoint/__init__.py +40 -0
- emdash_core/checkpoint/cli.py +175 -0
- emdash_core/checkpoint/git_operations.py +250 -0
- emdash_core/checkpoint/manager.py +231 -0
- emdash_core/checkpoint/models.py +107 -0
- emdash_core/checkpoint/storage.py +201 -0
- emdash_core/config.py +1 -1
- emdash_core/core/config.py +18 -2
- emdash_core/graph/schema.py +5 -5
- emdash_core/ingestion/orchestrator.py +19 -10
- emdash_core/models/agent.py +1 -1
- emdash_core/server.py +42 -0
- emdash_core/sse/stream.py +1 -0
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.25.dist-info}/METADATA +1 -2
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.25.dist-info}/RECORD +41 -31
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.25.dist-info}/entry_points.txt +1 -0
- {emdash_core-0.1.7.dist-info → emdash_core-0.1.25.dist-info}/WHEEL +0 -0
|
@@ -30,6 +30,9 @@ PROVIDER_CONFIG = {
|
|
|
30
30
|
# Providers that support the reasoning parameter via extra_body
|
|
31
31
|
REASONING_SUPPORTED_PROVIDERS = {"openai"}
|
|
32
32
|
|
|
33
|
+
# Providers that support extended thinking
|
|
34
|
+
THINKING_SUPPORTED_PROVIDERS = {"anthropic"}
|
|
35
|
+
|
|
33
36
|
|
|
34
37
|
class OpenAIProvider(LLMProvider):
|
|
35
38
|
"""
|
|
@@ -131,6 +134,8 @@ class OpenAIProvider(LLMProvider):
|
|
|
131
134
|
)
|
|
132
135
|
|
|
133
136
|
self._reasoning_override = self._parse_bool_env("EMDASH_LLM_REASONING")
|
|
137
|
+
self._thinking_override = self._parse_bool_env("EMDASH_LLM_THINKING")
|
|
138
|
+
self._thinking_budget = int(os.environ.get("EMDASH_THINKING_BUDGET", "10000"))
|
|
134
139
|
|
|
135
140
|
self.client = OpenAI(
|
|
136
141
|
api_key=api_key,
|
|
@@ -191,6 +196,7 @@ class OpenAIProvider(LLMProvider):
|
|
|
191
196
|
tools: Optional[list[dict]] = None,
|
|
192
197
|
system: Optional[str] = None,
|
|
193
198
|
reasoning: bool = False,
|
|
199
|
+
thinking: bool = False,
|
|
194
200
|
images: Optional[list[ImageContent]] = None,
|
|
195
201
|
) -> LLMResponse:
|
|
196
202
|
"""
|
|
@@ -201,6 +207,7 @@ class OpenAIProvider(LLMProvider):
|
|
|
201
207
|
tools: Optional list of tool schemas (OpenAI format)
|
|
202
208
|
system: Optional system prompt
|
|
203
209
|
reasoning: Enable reasoning mode (for models that support it)
|
|
210
|
+
thinking: Enable extended thinking (for Anthropic models)
|
|
204
211
|
images: Optional list of images for vision-capable models
|
|
205
212
|
|
|
206
213
|
Returns:
|
|
@@ -212,6 +219,8 @@ class OpenAIProvider(LLMProvider):
|
|
|
212
219
|
|
|
213
220
|
if self._reasoning_override is not None:
|
|
214
221
|
reasoning = self._reasoning_override
|
|
222
|
+
if self._thinking_override is not None:
|
|
223
|
+
thinking = self._thinking_override
|
|
215
224
|
|
|
216
225
|
# Build completion kwargs
|
|
217
226
|
kwargs = {
|
|
@@ -229,6 +238,22 @@ class OpenAIProvider(LLMProvider):
|
|
|
229
238
|
if reasoning and self._provider in REASONING_SUPPORTED_PROVIDERS and not is_custom_api:
|
|
230
239
|
kwargs["extra_body"] = {"reasoning": {"enabled": True}}
|
|
231
240
|
|
|
241
|
+
# Add extended thinking for Anthropic models
|
|
242
|
+
# This uses Anthropic's native thinking parameter
|
|
243
|
+
if thinking and self._provider in THINKING_SUPPORTED_PROVIDERS and not is_custom_api:
|
|
244
|
+
extra_body = kwargs.get("extra_body", {})
|
|
245
|
+
extra_body["thinking"] = {
|
|
246
|
+
"type": "enabled",
|
|
247
|
+
"budget_tokens": self._thinking_budget,
|
|
248
|
+
}
|
|
249
|
+
kwargs["extra_body"] = extra_body
|
|
250
|
+
log.info(
|
|
251
|
+
"Extended thinking enabled provider={} model={} budget={}",
|
|
252
|
+
self._provider,
|
|
253
|
+
self.model,
|
|
254
|
+
self._thinking_budget,
|
|
255
|
+
)
|
|
256
|
+
|
|
232
257
|
# Add images if provided (vision support)
|
|
233
258
|
if images:
|
|
234
259
|
log.info(
|
|
@@ -322,8 +347,32 @@ class OpenAIProvider(LLMProvider):
|
|
|
322
347
|
choice = response.choices[0]
|
|
323
348
|
message = choice.message
|
|
324
349
|
|
|
325
|
-
# Extract content
|
|
326
|
-
content =
|
|
350
|
+
# Extract content and thinking
|
|
351
|
+
content = None
|
|
352
|
+
thinking = None
|
|
353
|
+
|
|
354
|
+
# Check if content is a list of content blocks (Anthropic extended thinking)
|
|
355
|
+
raw_content = message.content
|
|
356
|
+
if isinstance(raw_content, list):
|
|
357
|
+
# Content blocks format (Anthropic with extended thinking)
|
|
358
|
+
text_parts = []
|
|
359
|
+
thinking_parts = []
|
|
360
|
+
for block in raw_content:
|
|
361
|
+
if hasattr(block, "type"):
|
|
362
|
+
if block.type == "thinking":
|
|
363
|
+
thinking_parts.append(getattr(block, "thinking", ""))
|
|
364
|
+
elif block.type == "text":
|
|
365
|
+
text_parts.append(getattr(block, "text", ""))
|
|
366
|
+
elif isinstance(block, dict):
|
|
367
|
+
if block.get("type") == "thinking":
|
|
368
|
+
thinking_parts.append(block.get("thinking", ""))
|
|
369
|
+
elif block.get("type") == "text":
|
|
370
|
+
text_parts.append(block.get("text", ""))
|
|
371
|
+
content = "\n".join(text_parts) if text_parts else None
|
|
372
|
+
thinking = "\n".join(thinking_parts) if thinking_parts else None
|
|
373
|
+
else:
|
|
374
|
+
# Simple string content
|
|
375
|
+
content = raw_content
|
|
327
376
|
|
|
328
377
|
# Extract tool calls
|
|
329
378
|
tool_calls = []
|
|
@@ -338,17 +387,32 @@ class OpenAIProvider(LLMProvider):
|
|
|
338
387
|
# Extract token usage if available
|
|
339
388
|
input_tokens = 0
|
|
340
389
|
output_tokens = 0
|
|
390
|
+
thinking_tokens = 0
|
|
341
391
|
if hasattr(response, "usage") and response.usage:
|
|
342
392
|
input_tokens = getattr(response.usage, "prompt_tokens", 0) or 0
|
|
343
393
|
output_tokens = getattr(response.usage, "completion_tokens", 0) or 0
|
|
394
|
+
# Anthropic returns thinking tokens in cache_creation_input_tokens or similar
|
|
395
|
+
# For now, estimate from the thinking content length
|
|
396
|
+
if thinking:
|
|
397
|
+
thinking_tokens = len(thinking) // 4 # Rough estimate
|
|
398
|
+
|
|
399
|
+
if thinking:
|
|
400
|
+
log.info(
|
|
401
|
+
"Extended thinking captured provider={} model={} thinking_len={}",
|
|
402
|
+
self._provider,
|
|
403
|
+
self.model,
|
|
404
|
+
len(thinking),
|
|
405
|
+
)
|
|
344
406
|
|
|
345
407
|
return LLMResponse(
|
|
346
408
|
content=content,
|
|
409
|
+
thinking=thinking,
|
|
347
410
|
tool_calls=tool_calls,
|
|
348
411
|
raw=response,
|
|
349
412
|
stop_reason=choice.finish_reason,
|
|
350
413
|
input_tokens=input_tokens,
|
|
351
414
|
output_tokens=output_tokens,
|
|
415
|
+
thinking_tokens=thinking_tokens,
|
|
352
416
|
)
|
|
353
417
|
|
|
354
418
|
def get_context_limit(self) -> int:
|
|
@@ -373,6 +437,14 @@ class OpenAIProvider(LLMProvider):
|
|
|
373
437
|
# For unknown models, assume no vision support
|
|
374
438
|
return False
|
|
375
439
|
|
|
440
|
+
def supports_thinking(self) -> bool:
|
|
441
|
+
"""Check if this model supports extended thinking."""
|
|
442
|
+
if self.chat_model:
|
|
443
|
+
return self.chat_model.spec.supports_thinking
|
|
444
|
+
|
|
445
|
+
# For unknown models, check if provider supports thinking
|
|
446
|
+
return self._provider in THINKING_SUPPORTED_PROVIDERS
|
|
447
|
+
|
|
376
448
|
def _format_image_for_api(self, image: ImageContent) -> dict:
|
|
377
449
|
"""Format an image for OpenAI/Anthropic API.
|
|
378
450
|
|