skillengine 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- skillengine/__init__.py +267 -0
- skillengine/adapters/__init__.py +26 -0
- skillengine/adapters/anthropic.py +347 -0
- skillengine/adapters/base.py +345 -0
- skillengine/adapters/openai.py +312 -0
- skillengine/adapters/registry.py +299 -0
- skillengine/adapters/transform.py +120 -0
- skillengine/agent.py +2224 -0
- skillengine/cache.py +44 -0
- skillengine/cli.py +725 -0
- skillengine/commands.py +333 -0
- skillengine/config.py +163 -0
- skillengine/context.py +262 -0
- skillengine/context_files.py +72 -0
- skillengine/engine.py +580 -0
- skillengine/events.py +455 -0
- skillengine/extensions/__init__.py +40 -0
- skillengine/extensions/api.py +105 -0
- skillengine/extensions/manager.py +339 -0
- skillengine/extensions/models.py +63 -0
- skillengine/filters/__init__.py +8 -0
- skillengine/filters/base.py +78 -0
- skillengine/filters/default.py +152 -0
- skillengine/loaders/__init__.py +8 -0
- skillengine/loaders/base.py +93 -0
- skillengine/loaders/markdown.py +290 -0
- skillengine/logging.py +114 -0
- skillengine/memory/__init__.py +11 -0
- skillengine/memory/client.py +205 -0
- skillengine/memory/config.py +28 -0
- skillengine/memory/extension.py +93 -0
- skillengine/memory/hooks.py +108 -0
- skillengine/memory/tools.py +245 -0
- skillengine/model_registry.py +346 -0
- skillengine/models.py +237 -0
- skillengine/models_catalog.py +188 -0
- skillengine/modes/__init__.py +8 -0
- skillengine/modes/interactive.py +126 -0
- skillengine/modes/json_mode.py +44 -0
- skillengine/modes/rpc_mode.py +241 -0
- skillengine/packages/__init__.py +22 -0
- skillengine/packages/manager.py +282 -0
- skillengine/packages/models.py +75 -0
- skillengine/packages/source.py +45 -0
- skillengine/prompts.py +147 -0
- skillengine/runtime/__init__.py +17 -0
- skillengine/runtime/base.py +129 -0
- skillengine/runtime/bash.py +188 -0
- skillengine/runtime/boxlite.py +438 -0
- skillengine/runtime/code_mode.py +682 -0
- skillengine/runtime/subprocess_streaming.py +147 -0
- skillengine/sandbox/__init__.py +11 -0
- skillengine/sandbox/runner.py +91 -0
- skillengine/session/__init__.py +67 -0
- skillengine/session/manager.py +478 -0
- skillengine/session/models.py +158 -0
- skillengine/session/store.py +190 -0
- skillengine/session/tree.py +142 -0
- skillengine/tools/__init__.py +61 -0
- skillengine/tools/bash.py +140 -0
- skillengine/tools/edit.py +185 -0
- skillengine/tools/find.py +202 -0
- skillengine/tools/grep.py +307 -0
- skillengine/tools/ls.py +268 -0
- skillengine/tools/read.py +188 -0
- skillengine/tools/registry.py +80 -0
- skillengine/tools/write.py +87 -0
- skillengine/transports/__init__.py +18 -0
- skillengine/transports/auto.py +42 -0
- skillengine/transports/base.py +42 -0
- skillengine/transports/sse.py +29 -0
- skillengine/transports/websocket.py +94 -0
- skillengine/tui/__init__.py +56 -0
- skillengine/tui/ansi.py +271 -0
- skillengine/tui/autocomplete.py +327 -0
- skillengine/tui/component.py +109 -0
- skillengine/tui/container.py +166 -0
- skillengine/tui/editor_widget.py +340 -0
- skillengine/tui/input_widget.py +329 -0
- skillengine/tui/keybindings.py +221 -0
- skillengine/tui/keys.py +341 -0
- skillengine/tui/markdown_widget.py +129 -0
- skillengine/tui/overlay.py +198 -0
- skillengine/tui/renderer.py +199 -0
- skillengine/tui/select_list.py +304 -0
- skillengine/tui/theme/__init__.py +20 -0
- skillengine/tui/theme/defaults.py +110 -0
- skillengine/tui/theme/loader.py +70 -0
- skillengine/tui/theme/models.py +154 -0
- skillengine/tui/theme/schema.py +73 -0
- skillengine/utils/__init__.py +1 -0
- skillengine/utils/json_parse.py +34 -0
- skillengine/web/__init__.py +5 -0
- skillengine/web/server.py +191 -0
- skillengine/web/static/app.js +459 -0
- skillengine/web/static/index.html +72 -0
- skillengine/web/static/style.css +533 -0
- skillengine/web/storage.py +129 -0
- skillengine-0.1.0.dist-info/METADATA +442 -0
- skillengine-0.1.0.dist-info/RECORD +102 -0
- skillengine-0.1.0.dist-info/WHEEL +4 -0
- skillengine-0.1.0.dist-info/entry_points.txt +2 -0
skillengine/__init__.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SkillEngine - A standalone skills execution engine for LLM agents.
|
|
3
|
+
|
|
4
|
+
This library provides a framework for defining, loading, filtering, and executing
|
|
5
|
+
skills in LLM-based agent systems. It is designed to be framework-agnostic and
|
|
6
|
+
can be integrated with any LLM provider (OpenAI, Anthropic, etc.).
|
|
7
|
+
|
|
8
|
+
Example:
|
|
9
|
+
from skillengine import SkillsEngine, SkillsConfig
|
|
10
|
+
|
|
11
|
+
# Initialize engine
|
|
12
|
+
engine = SkillsEngine(
|
|
13
|
+
config=SkillsConfig(
|
|
14
|
+
skill_dirs=["./skills", "~/.agent/skills"],
|
|
15
|
+
watch=True,
|
|
16
|
+
)
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# Load and filter skills
|
|
20
|
+
skills = engine.load_skills()
|
|
21
|
+
eligible = engine.filter_skills(skills)
|
|
22
|
+
|
|
23
|
+
# Generate prompt for LLM
|
|
24
|
+
prompt = engine.format_prompt(eligible)
|
|
25
|
+
|
|
26
|
+
# Execute a skill
|
|
27
|
+
result = await engine.execute("github", args={"action": "list-prs"})
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
from skillengine.adapters.registry import AdapterFactory, AdapterRegistry
|
|
31
|
+
from skillengine.agent import (
|
|
32
|
+
AgentAbortedError,
|
|
33
|
+
AgentConfig,
|
|
34
|
+
AgentMessage,
|
|
35
|
+
AgentRunner,
|
|
36
|
+
create_agent,
|
|
37
|
+
)
|
|
38
|
+
from skillengine.cache import (
|
|
39
|
+
get_cache_config_openai,
|
|
40
|
+
get_cache_control_anthropic,
|
|
41
|
+
)
|
|
42
|
+
from skillengine.commands import CommandRegistry, CommandResult
|
|
43
|
+
from skillengine.config import CacheRetention, SkillEntryConfig, SkillsConfig
|
|
44
|
+
from skillengine.context import (
|
|
45
|
+
ContextCompactor,
|
|
46
|
+
ContextManager,
|
|
47
|
+
SlidingWindowCompactor,
|
|
48
|
+
TokenBudgetCompactor,
|
|
49
|
+
estimate_message_tokens,
|
|
50
|
+
estimate_messages_tokens,
|
|
51
|
+
estimate_tokens,
|
|
52
|
+
)
|
|
53
|
+
from skillengine.context_files import ContextFile, load_context_files
|
|
54
|
+
from skillengine.engine import SkillsEngine
|
|
55
|
+
from skillengine.events import (
|
|
56
|
+
AFTER_TOOL_RESULT,
|
|
57
|
+
AGENT_END,
|
|
58
|
+
AGENT_START,
|
|
59
|
+
BEFORE_TOOL_CALL,
|
|
60
|
+
COMPACTION,
|
|
61
|
+
CONTEXT_TRANSFORM,
|
|
62
|
+
INPUT,
|
|
63
|
+
MODEL_CHANGE,
|
|
64
|
+
SESSION_END,
|
|
65
|
+
SESSION_START,
|
|
66
|
+
TOOL_EXECUTION_UPDATE,
|
|
67
|
+
TURN_END,
|
|
68
|
+
TURN_START,
|
|
69
|
+
AfterToolResultEvent,
|
|
70
|
+
AgentEndEvent,
|
|
71
|
+
AgentStartEvent,
|
|
72
|
+
BeforeToolCallEvent,
|
|
73
|
+
CompactionEvent,
|
|
74
|
+
ContextTransformEvent,
|
|
75
|
+
ContextTransformEventResult,
|
|
76
|
+
EventBus,
|
|
77
|
+
InputEvent,
|
|
78
|
+
InputEventResult,
|
|
79
|
+
ModelChangeEvent,
|
|
80
|
+
SessionEndEvent,
|
|
81
|
+
SessionStartEvent,
|
|
82
|
+
StreamEvent,
|
|
83
|
+
ToolCallEventResult,
|
|
84
|
+
ToolExecutionUpdateEvent,
|
|
85
|
+
ToolResultEventResult,
|
|
86
|
+
TurnEndEvent,
|
|
87
|
+
TurnStartEvent,
|
|
88
|
+
)
|
|
89
|
+
from skillengine.extensions import (
|
|
90
|
+
CommandInfo,
|
|
91
|
+
ExtensionAPI,
|
|
92
|
+
ExtensionInfo,
|
|
93
|
+
ExtensionManager,
|
|
94
|
+
ToolInfo,
|
|
95
|
+
)
|
|
96
|
+
from skillengine.filters import DefaultSkillFilter, SkillFilter
|
|
97
|
+
from skillengine.loaders import MarkdownSkillLoader, SkillLoader
|
|
98
|
+
from skillengine.model_registry import (
|
|
99
|
+
DEFAULT_THINKING_BUDGETS,
|
|
100
|
+
CostBreakdown,
|
|
101
|
+
ModelCost,
|
|
102
|
+
ModelDefinition,
|
|
103
|
+
ModelRegistry,
|
|
104
|
+
ThinkingLevel,
|
|
105
|
+
TokenUsage,
|
|
106
|
+
Transport,
|
|
107
|
+
adjust_max_tokens_for_thinking,
|
|
108
|
+
map_thinking_level_to_anthropic_effort,
|
|
109
|
+
map_thinking_level_to_openai_effort,
|
|
110
|
+
)
|
|
111
|
+
from skillengine.models import (
|
|
112
|
+
ImageContent,
|
|
113
|
+
MessageContent,
|
|
114
|
+
Skill,
|
|
115
|
+
SkillAction,
|
|
116
|
+
SkillActionParam,
|
|
117
|
+
SkillEntry,
|
|
118
|
+
SkillInstallSpec,
|
|
119
|
+
SkillInvocationPolicy,
|
|
120
|
+
SkillMetadata,
|
|
121
|
+
SkillRequirements,
|
|
122
|
+
SkillSnapshot,
|
|
123
|
+
TextContent,
|
|
124
|
+
)
|
|
125
|
+
from skillengine.prompts import PromptTemplate, PromptTemplateLoader
|
|
126
|
+
from skillengine.runtime import BashRuntime, CodeModeRuntime, SkillRuntime
|
|
127
|
+
|
|
128
|
+
# Optional: BoxLite sandbox runtime
|
|
129
|
+
try:
|
|
130
|
+
from skillengine.runtime.boxlite import BoxLiteRuntime, SecurityLevel
|
|
131
|
+
except ImportError:
|
|
132
|
+
pass
|
|
133
|
+
|
|
134
|
+
# Optional: Sandbox module (requires BoxLite)
|
|
135
|
+
try:
|
|
136
|
+
from skillengine.sandbox import SandboxedAgentRunner
|
|
137
|
+
except ImportError:
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
# Optional: memory module
|
|
141
|
+
try:
|
|
142
|
+
from skillengine.memory import MemoryConfig, OpenVikingClient, setup_memory
|
|
143
|
+
except ImportError:
|
|
144
|
+
pass
|
|
145
|
+
|
|
146
|
+
__version__ = "0.1.0"
|
|
147
|
+
|
|
148
|
+
__all__ = [
|
|
149
|
+
# Core models
|
|
150
|
+
"Skill",
|
|
151
|
+
"SkillMetadata",
|
|
152
|
+
"SkillRequirements",
|
|
153
|
+
"SkillSnapshot",
|
|
154
|
+
"SkillEntry",
|
|
155
|
+
"SkillInvocationPolicy",
|
|
156
|
+
"SkillInstallSpec",
|
|
157
|
+
"SkillAction",
|
|
158
|
+
"SkillActionParam",
|
|
159
|
+
# Content types (multi-modal)
|
|
160
|
+
"TextContent",
|
|
161
|
+
"ImageContent",
|
|
162
|
+
"MessageContent",
|
|
163
|
+
# Config
|
|
164
|
+
"SkillsConfig",
|
|
165
|
+
"SkillEntryConfig",
|
|
166
|
+
"CacheRetention",
|
|
167
|
+
# Engine
|
|
168
|
+
"SkillsEngine",
|
|
169
|
+
# Agent
|
|
170
|
+
"AgentRunner",
|
|
171
|
+
"AgentConfig",
|
|
172
|
+
"AgentMessage",
|
|
173
|
+
"AgentAbortedError",
|
|
174
|
+
"create_agent",
|
|
175
|
+
# Events
|
|
176
|
+
"EventBus",
|
|
177
|
+
"AGENT_START",
|
|
178
|
+
"AGENT_END",
|
|
179
|
+
"TURN_START",
|
|
180
|
+
"TURN_END",
|
|
181
|
+
"BEFORE_TOOL_CALL",
|
|
182
|
+
"AFTER_TOOL_RESULT",
|
|
183
|
+
"CONTEXT_TRANSFORM",
|
|
184
|
+
"INPUT",
|
|
185
|
+
"TOOL_EXECUTION_UPDATE",
|
|
186
|
+
"SESSION_START",
|
|
187
|
+
"SESSION_END",
|
|
188
|
+
"MODEL_CHANGE",
|
|
189
|
+
"COMPACTION",
|
|
190
|
+
"ToolExecutionUpdateEvent",
|
|
191
|
+
"AgentStartEvent",
|
|
192
|
+
"AgentEndEvent",
|
|
193
|
+
"TurnStartEvent",
|
|
194
|
+
"TurnEndEvent",
|
|
195
|
+
"BeforeToolCallEvent",
|
|
196
|
+
"ToolCallEventResult",
|
|
197
|
+
"AfterToolResultEvent",
|
|
198
|
+
"ToolResultEventResult",
|
|
199
|
+
"ContextTransformEvent",
|
|
200
|
+
"ContextTransformEventResult",
|
|
201
|
+
"InputEvent",
|
|
202
|
+
"InputEventResult",
|
|
203
|
+
"StreamEvent",
|
|
204
|
+
"SessionStartEvent",
|
|
205
|
+
"SessionEndEvent",
|
|
206
|
+
"ModelChangeEvent",
|
|
207
|
+
"CompactionEvent",
|
|
208
|
+
# Model Registry
|
|
209
|
+
"ModelDefinition",
|
|
210
|
+
"ModelCost",
|
|
211
|
+
"ModelRegistry",
|
|
212
|
+
"TokenUsage",
|
|
213
|
+
"CostBreakdown",
|
|
214
|
+
# Thinking & Transport
|
|
215
|
+
"ThinkingLevel",
|
|
216
|
+
"Transport",
|
|
217
|
+
"DEFAULT_THINKING_BUDGETS",
|
|
218
|
+
"adjust_max_tokens_for_thinking",
|
|
219
|
+
"map_thinking_level_to_anthropic_effort",
|
|
220
|
+
"map_thinking_level_to_openai_effort",
|
|
221
|
+
# Context Management
|
|
222
|
+
"ContextManager",
|
|
223
|
+
"ContextCompactor",
|
|
224
|
+
"TokenBudgetCompactor",
|
|
225
|
+
"SlidingWindowCompactor",
|
|
226
|
+
"estimate_tokens",
|
|
227
|
+
"estimate_message_tokens",
|
|
228
|
+
"estimate_messages_tokens",
|
|
229
|
+
# Context Files
|
|
230
|
+
"ContextFile",
|
|
231
|
+
"load_context_files",
|
|
232
|
+
# Cache
|
|
233
|
+
"get_cache_control_anthropic",
|
|
234
|
+
"get_cache_config_openai",
|
|
235
|
+
# Loaders
|
|
236
|
+
"SkillLoader",
|
|
237
|
+
"MarkdownSkillLoader",
|
|
238
|
+
# Filters
|
|
239
|
+
"SkillFilter",
|
|
240
|
+
"DefaultSkillFilter",
|
|
241
|
+
# Runtime
|
|
242
|
+
"SkillRuntime",
|
|
243
|
+
"BashRuntime",
|
|
244
|
+
"CodeModeRuntime",
|
|
245
|
+
"BoxLiteRuntime",
|
|
246
|
+
"SecurityLevel",
|
|
247
|
+
"SandboxedAgentRunner",
|
|
248
|
+
# Adapters
|
|
249
|
+
"AdapterRegistry",
|
|
250
|
+
"AdapterFactory",
|
|
251
|
+
# Extensions
|
|
252
|
+
"ExtensionAPI",
|
|
253
|
+
"ExtensionManager",
|
|
254
|
+
"ExtensionInfo",
|
|
255
|
+
"CommandInfo",
|
|
256
|
+
"ToolInfo",
|
|
257
|
+
# Commands
|
|
258
|
+
"CommandRegistry",
|
|
259
|
+
"CommandResult",
|
|
260
|
+
# Prompts
|
|
261
|
+
"PromptTemplate",
|
|
262
|
+
"PromptTemplateLoader",
|
|
263
|
+
# Memory (optional)
|
|
264
|
+
"MemoryConfig",
|
|
265
|
+
"OpenVikingClient",
|
|
266
|
+
"setup_memory",
|
|
267
|
+
]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM provider adapters.
|
|
3
|
+
|
|
4
|
+
These adapters integrate the skills engine with different LLM providers,
|
|
5
|
+
making it easy to use skills in agent workflows.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from skillengine.adapters.base import LLMAdapter
|
|
9
|
+
from skillengine.adapters.registry import AdapterFactory, AdapterRegistry
|
|
10
|
+
|
|
11
|
+
__all__ = ["LLMAdapter", "AdapterRegistry", "AdapterFactory"]
|
|
12
|
+
|
|
13
|
+
# Optional imports for specific providers
|
|
14
|
+
try:
|
|
15
|
+
from skillengine.adapters.openai import OpenAIAdapter # noqa: F401
|
|
16
|
+
|
|
17
|
+
__all__.append("OpenAIAdapter")
|
|
18
|
+
except ImportError:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
from skillengine.adapters.anthropic import AnthropicAdapter # noqa: F401
|
|
23
|
+
|
|
24
|
+
__all__.append("AnthropicAdapter")
|
|
25
|
+
except ImportError:
|
|
26
|
+
pass
|
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic adapter for the skills engine.
|
|
3
|
+
|
|
4
|
+
Requires the 'anthropic' extra: pip install skillengine[anthropic]
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections.abc import AsyncIterator
|
|
10
|
+
from typing import Any, TypedDict
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from anthropic import AsyncAnthropic # type: ignore[import-not-found]
|
|
14
|
+
except ImportError:
|
|
15
|
+
raise ImportError(
|
|
16
|
+
"Anthropic adapter requires the 'anthropic' package. "
|
|
17
|
+
"Install with: pip install skillengine[anthropic]"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from skillengine.adapters.base import AgentResponse, LLMAdapter, Message
|
|
21
|
+
from skillengine.engine import SkillsEngine
|
|
22
|
+
from skillengine.events import StreamEvent
|
|
23
|
+
from skillengine.model_registry import (
|
|
24
|
+
ThinkingLevel,
|
|
25
|
+
TokenUsage,
|
|
26
|
+
adjust_max_tokens_for_thinking,
|
|
27
|
+
map_thinking_level_to_anthropic_effort,
|
|
28
|
+
supports_adaptive_thinking,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class AnthropicInputSchema(TypedDict):
|
|
33
|
+
"""Anthropic tool input schema."""
|
|
34
|
+
|
|
35
|
+
type: str
|
|
36
|
+
properties: dict[str, Any]
|
|
37
|
+
required: list[str]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class AnthropicTool(TypedDict):
|
|
41
|
+
"""Anthropic tool definition."""
|
|
42
|
+
|
|
43
|
+
name: str
|
|
44
|
+
description: str
|
|
45
|
+
input_schema: AnthropicInputSchema
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class AnthropicMessage(TypedDict, total=False):
|
|
49
|
+
"""Anthropic message format."""
|
|
50
|
+
|
|
51
|
+
role: str
|
|
52
|
+
content: str | list[dict[str, Any]]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class AnthropicAdapter(LLMAdapter):
|
|
56
|
+
"""
|
|
57
|
+
Anthropic adapter for the skills engine.
|
|
58
|
+
|
|
59
|
+
Example:
|
|
60
|
+
from anthropic import AsyncAnthropic
|
|
61
|
+
from skillengine import SkillsEngine
|
|
62
|
+
from skillengine.adapters import AnthropicAdapter
|
|
63
|
+
|
|
64
|
+
engine = SkillsEngine(config=...)
|
|
65
|
+
client = AsyncAnthropic()
|
|
66
|
+
adapter = AnthropicAdapter(engine, client)
|
|
67
|
+
|
|
68
|
+
response = await adapter.chat([
|
|
69
|
+
Message(role="user", content="List my GitHub PRs")
|
|
70
|
+
])
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
engine: SkillsEngine,
|
|
76
|
+
client: AsyncAnthropic | None = None,
|
|
77
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
78
|
+
max_tokens: int = 4096,
|
|
79
|
+
enable_tools: bool = True,
|
|
80
|
+
) -> None:
|
|
81
|
+
super().__init__(engine)
|
|
82
|
+
self.client = client or AsyncAnthropic()
|
|
83
|
+
self.model = model
|
|
84
|
+
self.max_tokens = max_tokens
|
|
85
|
+
self.enable_tools = enable_tools
|
|
86
|
+
|
|
87
|
+
def _get_anthropic_tools(self) -> list[AnthropicTool]:
|
|
88
|
+
"""Convert tool definitions to Anthropic format."""
|
|
89
|
+
tool_defs = self.get_tool_definitions()
|
|
90
|
+
return [
|
|
91
|
+
{
|
|
92
|
+
"name": tool["name"],
|
|
93
|
+
"description": tool["description"],
|
|
94
|
+
"input_schema": {
|
|
95
|
+
"type": tool["parameters"]["type"],
|
|
96
|
+
"properties": tool["parameters"]["properties"],
|
|
97
|
+
"required": tool["parameters"]["required"],
|
|
98
|
+
},
|
|
99
|
+
}
|
|
100
|
+
for tool in tool_defs
|
|
101
|
+
]
|
|
102
|
+
|
|
103
|
+
async def chat(
|
|
104
|
+
self,
|
|
105
|
+
messages: list[Message],
|
|
106
|
+
system_prompt: str | None = None,
|
|
107
|
+
thinking_level: ThinkingLevel | None = None,
|
|
108
|
+
) -> AgentResponse:
|
|
109
|
+
"""Send a chat request to Anthropic."""
|
|
110
|
+
# Build system prompt with skills
|
|
111
|
+
full_system = self.build_system_prompt(system_prompt or "")
|
|
112
|
+
|
|
113
|
+
# Format messages for Anthropic
|
|
114
|
+
anthropic_messages: list[dict[str, Any]] = []
|
|
115
|
+
|
|
116
|
+
for msg in messages:
|
|
117
|
+
if msg.role == "system":
|
|
118
|
+
# Anthropic handles system separately
|
|
119
|
+
continue
|
|
120
|
+
anthropic_messages.append(
|
|
121
|
+
{
|
|
122
|
+
"role": msg.role,
|
|
123
|
+
"content": msg.content,
|
|
124
|
+
}
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Build request kwargs
|
|
128
|
+
request_kwargs: dict[str, Any] = {
|
|
129
|
+
"model": self.model,
|
|
130
|
+
"max_tokens": self.max_tokens,
|
|
131
|
+
"messages": anthropic_messages,
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
if full_system:
|
|
135
|
+
request_kwargs["system"] = full_system
|
|
136
|
+
|
|
137
|
+
# Add tools if enabled
|
|
138
|
+
if self.enable_tools:
|
|
139
|
+
tools = self._get_anthropic_tools()
|
|
140
|
+
if tools:
|
|
141
|
+
request_kwargs["tools"] = tools
|
|
142
|
+
|
|
143
|
+
# Add thinking configuration
|
|
144
|
+
level = thinking_level or "off"
|
|
145
|
+
if level != "off":
|
|
146
|
+
if supports_adaptive_thinking(self.model):
|
|
147
|
+
effort = map_thinking_level_to_anthropic_effort(level)
|
|
148
|
+
request_kwargs["thinking"] = {"type": "adaptive"}
|
|
149
|
+
request_kwargs["output_config"] = {"effort": effort}
|
|
150
|
+
else:
|
|
151
|
+
max_tokens, thinking_budget = adjust_max_tokens_for_thinking(
|
|
152
|
+
self.max_tokens, 128_000, level
|
|
153
|
+
)
|
|
154
|
+
request_kwargs["max_tokens"] = max_tokens
|
|
155
|
+
request_kwargs["thinking"] = {
|
|
156
|
+
"type": "enabled",
|
|
157
|
+
"budget_tokens": thinking_budget,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# Call Anthropic
|
|
161
|
+
response = await self.client.messages.create(**request_kwargs)
|
|
162
|
+
|
|
163
|
+
# Extract content
|
|
164
|
+
content = ""
|
|
165
|
+
tool_calls = []
|
|
166
|
+
|
|
167
|
+
for block in response.content:
|
|
168
|
+
if block.type == "text":
|
|
169
|
+
content += block.text
|
|
170
|
+
elif block.type == "tool_use":
|
|
171
|
+
tool_calls.append(
|
|
172
|
+
{
|
|
173
|
+
"id": block.id,
|
|
174
|
+
"name": block.name,
|
|
175
|
+
"arguments": block.input,
|
|
176
|
+
}
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Extract thinking tokens from usage if available
|
|
180
|
+
thinking_tokens = 0
|
|
181
|
+
if hasattr(response.usage, "thinking_tokens"):
|
|
182
|
+
thinking_tokens = response.usage.thinking_tokens or 0
|
|
183
|
+
|
|
184
|
+
token_usage = TokenUsage(
|
|
185
|
+
input_tokens=response.usage.input_tokens,
|
|
186
|
+
output_tokens=response.usage.output_tokens,
|
|
187
|
+
thinking_tokens=thinking_tokens,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
return AgentResponse(
|
|
191
|
+
content=content,
|
|
192
|
+
tool_calls=tool_calls,
|
|
193
|
+
finish_reason=response.stop_reason,
|
|
194
|
+
usage={
|
|
195
|
+
"prompt_tokens": response.usage.input_tokens,
|
|
196
|
+
"completion_tokens": response.usage.output_tokens,
|
|
197
|
+
},
|
|
198
|
+
token_usage=token_usage,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def _build_anthropic_messages(
|
|
202
|
+
self,
|
|
203
|
+
messages: list[Message],
|
|
204
|
+
) -> list[dict[str, Any]]:
|
|
205
|
+
"""Build Anthropic-format messages (excluding system role)."""
|
|
206
|
+
anthropic_messages: list[dict[str, Any]] = []
|
|
207
|
+
for msg in messages:
|
|
208
|
+
if msg.role == "system":
|
|
209
|
+
continue
|
|
210
|
+
anthropic_messages.append({"role": msg.role, "content": msg.content})
|
|
211
|
+
return anthropic_messages
|
|
212
|
+
|
|
213
|
+
async def chat_stream(
|
|
214
|
+
self,
|
|
215
|
+
messages: list[Message],
|
|
216
|
+
system_prompt: str | None = None,
|
|
217
|
+
thinking_level: ThinkingLevel | None = None,
|
|
218
|
+
) -> AsyncIterator[str]:
|
|
219
|
+
"""Stream a chat response from Anthropic (text deltas only)."""
|
|
220
|
+
async for event in self.chat_stream_events(
|
|
221
|
+
messages, system_prompt, thinking_level=thinking_level
|
|
222
|
+
):
|
|
223
|
+
if event.type == "text_delta":
|
|
224
|
+
yield event.content
|
|
225
|
+
|
|
226
|
+
async def chat_stream_events(
|
|
227
|
+
self,
|
|
228
|
+
messages: list[Message],
|
|
229
|
+
system_prompt: str | None = None,
|
|
230
|
+
thinking_level: ThinkingLevel | None = None,
|
|
231
|
+
) -> AsyncIterator[StreamEvent]:
|
|
232
|
+
"""
|
|
233
|
+
Stream structured events from Anthropic.
|
|
234
|
+
|
|
235
|
+
Maps Anthropic streaming events to StreamEvent types:
|
|
236
|
+
- content_block_start (text) → text_start
|
|
237
|
+
- content_block_delta (text_delta) → text_delta
|
|
238
|
+
- content_block_stop (text) → text_end
|
|
239
|
+
- content_block_start (thinking) → thinking_start
|
|
240
|
+
- content_block_delta (thinking_delta) → thinking_delta
|
|
241
|
+
- content_block_stop (thinking) → thinking_end
|
|
242
|
+
- content_block_start (tool_use) → tool_call_start
|
|
243
|
+
- content_block_delta (input_json_delta) → tool_call_delta
|
|
244
|
+
- content_block_stop (tool_use) → tool_call_end
|
|
245
|
+
"""
|
|
246
|
+
|
|
247
|
+
full_system = self.build_system_prompt(system_prompt or "")
|
|
248
|
+
anthropic_messages = self._build_anthropic_messages(messages)
|
|
249
|
+
|
|
250
|
+
request_kwargs: dict[str, Any] = {
|
|
251
|
+
"model": self.model,
|
|
252
|
+
"max_tokens": self.max_tokens,
|
|
253
|
+
"messages": anthropic_messages,
|
|
254
|
+
}
|
|
255
|
+
if full_system:
|
|
256
|
+
request_kwargs["system"] = full_system
|
|
257
|
+
if self.enable_tools:
|
|
258
|
+
tools = self._get_anthropic_tools()
|
|
259
|
+
if tools:
|
|
260
|
+
request_kwargs["tools"] = tools
|
|
261
|
+
|
|
262
|
+
# Add thinking configuration
|
|
263
|
+
level = thinking_level or "off"
|
|
264
|
+
if level != "off":
|
|
265
|
+
if supports_adaptive_thinking(self.model):
|
|
266
|
+
effort = map_thinking_level_to_anthropic_effort(level)
|
|
267
|
+
request_kwargs["thinking"] = {"type": "adaptive"}
|
|
268
|
+
request_kwargs["output_config"] = {"effort": effort}
|
|
269
|
+
else:
|
|
270
|
+
max_tokens, thinking_budget = adjust_max_tokens_for_thinking(
|
|
271
|
+
self.max_tokens, 128_000, level
|
|
272
|
+
)
|
|
273
|
+
request_kwargs["max_tokens"] = max_tokens
|
|
274
|
+
request_kwargs["thinking"] = {
|
|
275
|
+
"type": "enabled",
|
|
276
|
+
"budget_tokens": thinking_budget,
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
# Track current block type for mapping stop events
|
|
280
|
+
# block_index -> {"type": "text"|"thinking"|"tool_use", "id": ..., "name": ...}
|
|
281
|
+
active_blocks: dict[int, dict[str, str]] = {}
|
|
282
|
+
|
|
283
|
+
async with self.client.messages.stream(**request_kwargs) as stream:
|
|
284
|
+
async for event in stream:
|
|
285
|
+
event_type = event.type
|
|
286
|
+
|
|
287
|
+
if event_type == "content_block_start":
|
|
288
|
+
block = event.content_block
|
|
289
|
+
idx = event.index
|
|
290
|
+
if block.type == "text":
|
|
291
|
+
active_blocks[idx] = {"type": "text"}
|
|
292
|
+
yield StreamEvent(type="text_start")
|
|
293
|
+
elif block.type == "thinking":
|
|
294
|
+
active_blocks[idx] = {"type": "thinking"}
|
|
295
|
+
yield StreamEvent(type="thinking_start")
|
|
296
|
+
elif block.type == "tool_use":
|
|
297
|
+
tc_id = block.id
|
|
298
|
+
tc_name = block.name
|
|
299
|
+
active_blocks[idx] = {
|
|
300
|
+
"type": "tool_use",
|
|
301
|
+
"id": tc_id,
|
|
302
|
+
"name": tc_name,
|
|
303
|
+
}
|
|
304
|
+
yield StreamEvent(
|
|
305
|
+
type="tool_call_start",
|
|
306
|
+
tool_call_id=tc_id,
|
|
307
|
+
tool_name=tc_name,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
elif event_type == "content_block_delta":
|
|
311
|
+
idx = event.index
|
|
312
|
+
delta = event.delta
|
|
313
|
+
block_info = active_blocks.get(idx, {})
|
|
314
|
+
|
|
315
|
+
if delta.type == "text_delta":
|
|
316
|
+
yield StreamEvent(type="text_delta", content=delta.text)
|
|
317
|
+
elif delta.type == "thinking_delta":
|
|
318
|
+
yield StreamEvent(
|
|
319
|
+
type="thinking_delta",
|
|
320
|
+
content=delta.thinking,
|
|
321
|
+
)
|
|
322
|
+
elif delta.type == "input_json_delta":
|
|
323
|
+
yield StreamEvent(
|
|
324
|
+
type="tool_call_delta",
|
|
325
|
+
tool_call_id=block_info.get("id"),
|
|
326
|
+
tool_name=block_info.get("name"),
|
|
327
|
+
args_delta=delta.partial_json,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
elif event_type == "content_block_stop":
|
|
331
|
+
idx = event.index
|
|
332
|
+
block_info = active_blocks.pop(idx, {})
|
|
333
|
+
btype = block_info.get("type", "")
|
|
334
|
+
|
|
335
|
+
if btype == "text":
|
|
336
|
+
yield StreamEvent(type="text_end")
|
|
337
|
+
elif btype == "thinking":
|
|
338
|
+
yield StreamEvent(type="thinking_end")
|
|
339
|
+
elif btype == "tool_use":
|
|
340
|
+
yield StreamEvent(
|
|
341
|
+
type="tool_call_end",
|
|
342
|
+
tool_call_id=block_info.get("id"),
|
|
343
|
+
tool_name=block_info.get("name"),
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
elif event_type == "message_stop":
|
|
347
|
+
yield StreamEvent(type="done", finish_reason="complete")
|