kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +2 -0
- agents/coder/__init__.py +0 -0
- agents/coder/agent.json +4 -0
- agents/coder/api-integration.md +2150 -0
- agents/coder/cli-pretty.md +765 -0
- agents/coder/code-review.md +1092 -0
- agents/coder/database-design.md +1525 -0
- agents/coder/debugging.md +1102 -0
- agents/coder/dependency-management.md +1397 -0
- agents/coder/git-workflow.md +1099 -0
- agents/coder/refactoring.md +1454 -0
- agents/coder/security-hardening.md +1732 -0
- agents/coder/system_prompt.md +1448 -0
- agents/coder/tdd.md +1367 -0
- agents/creative-writer/__init__.py +0 -0
- agents/creative-writer/agent.json +4 -0
- agents/creative-writer/character-development.md +1852 -0
- agents/creative-writer/dialogue-craft.md +1122 -0
- agents/creative-writer/plot-structure.md +1073 -0
- agents/creative-writer/revision-editing.md +1484 -0
- agents/creative-writer/system_prompt.md +690 -0
- agents/creative-writer/worldbuilding.md +2049 -0
- agents/data-analyst/__init__.py +30 -0
- agents/data-analyst/agent.json +4 -0
- agents/data-analyst/data-visualization.md +992 -0
- agents/data-analyst/exploratory-data-analysis.md +1110 -0
- agents/data-analyst/pandas-data-manipulation.md +1081 -0
- agents/data-analyst/sql-query-optimization.md +881 -0
- agents/data-analyst/statistical-analysis.md +1118 -0
- agents/data-analyst/system_prompt.md +928 -0
- agents/default/__init__.py +0 -0
- agents/default/agent.json +4 -0
- agents/default/dead-code.md +794 -0
- agents/default/explore-agent-system.md +585 -0
- agents/default/system_prompt.md +1448 -0
- agents/kollabor/__init__.py +0 -0
- agents/kollabor/analyze-plugin-lifecycle.md +175 -0
- agents/kollabor/analyze-terminal-rendering.md +388 -0
- agents/kollabor/code-review.md +1092 -0
- agents/kollabor/debug-mcp-integration.md +521 -0
- agents/kollabor/debug-plugin-hooks.md +547 -0
- agents/kollabor/debugging.md +1102 -0
- agents/kollabor/dependency-management.md +1397 -0
- agents/kollabor/git-workflow.md +1099 -0
- agents/kollabor/inspect-llm-conversation.md +148 -0
- agents/kollabor/monitor-event-bus.md +558 -0
- agents/kollabor/profile-performance.md +576 -0
- agents/kollabor/refactoring.md +1454 -0
- agents/kollabor/system_prompt copy.md +1448 -0
- agents/kollabor/system_prompt.md +757 -0
- agents/kollabor/trace-command-execution.md +178 -0
- agents/kollabor/validate-config.md +879 -0
- agents/research/__init__.py +0 -0
- agents/research/agent.json +4 -0
- agents/research/architecture-mapping.md +1099 -0
- agents/research/codebase-analysis.md +1077 -0
- agents/research/dependency-audit.md +1027 -0
- agents/research/performance-profiling.md +1047 -0
- agents/research/security-review.md +1359 -0
- agents/research/system_prompt.md +492 -0
- agents/technical-writer/__init__.py +0 -0
- agents/technical-writer/agent.json +4 -0
- agents/technical-writer/api-documentation.md +2328 -0
- agents/technical-writer/changelog-management.md +1181 -0
- agents/technical-writer/readme-writing.md +1360 -0
- agents/technical-writer/style-guide.md +1410 -0
- agents/technical-writer/system_prompt.md +653 -0
- agents/technical-writer/tutorial-creation.md +1448 -0
- core/__init__.py +0 -2
- core/application.py +343 -88
- core/cli.py +229 -10
- core/commands/menu_renderer.py +463 -59
- core/commands/registry.py +14 -9
- core/commands/system_commands.py +2461 -14
- core/config/loader.py +151 -37
- core/config/service.py +18 -6
- core/events/bus.py +29 -9
- core/events/executor.py +205 -75
- core/events/models.py +27 -8
- core/fullscreen/command_integration.py +20 -24
- core/fullscreen/components/__init__.py +10 -1
- core/fullscreen/components/matrix_components.py +1 -2
- core/fullscreen/components/space_shooter_components.py +654 -0
- core/fullscreen/plugin.py +5 -0
- core/fullscreen/renderer.py +52 -13
- core/fullscreen/session.py +52 -15
- core/io/__init__.py +29 -5
- core/io/buffer_manager.py +6 -1
- core/io/config_status_view.py +7 -29
- core/io/core_status_views.py +267 -347
- core/io/input/__init__.py +25 -0
- core/io/input/command_mode_handler.py +711 -0
- core/io/input/display_controller.py +128 -0
- core/io/input/hook_registrar.py +286 -0
- core/io/input/input_loop_manager.py +421 -0
- core/io/input/key_press_handler.py +502 -0
- core/io/input/modal_controller.py +1011 -0
- core/io/input/paste_processor.py +339 -0
- core/io/input/status_modal_renderer.py +184 -0
- core/io/input_errors.py +5 -1
- core/io/input_handler.py +211 -2452
- core/io/key_parser.py +7 -0
- core/io/layout.py +15 -3
- core/io/message_coordinator.py +111 -2
- core/io/message_renderer.py +129 -4
- core/io/status_renderer.py +147 -607
- core/io/terminal_renderer.py +97 -51
- core/io/terminal_state.py +21 -4
- core/io/visual_effects.py +816 -165
- core/llm/agent_manager.py +1063 -0
- core/llm/api_adapters/__init__.py +44 -0
- core/llm/api_adapters/anthropic_adapter.py +432 -0
- core/llm/api_adapters/base.py +241 -0
- core/llm/api_adapters/openai_adapter.py +326 -0
- core/llm/api_communication_service.py +167 -113
- core/llm/conversation_logger.py +322 -16
- core/llm/conversation_manager.py +556 -30
- core/llm/file_operations_executor.py +84 -32
- core/llm/llm_service.py +934 -103
- core/llm/mcp_integration.py +541 -57
- core/llm/message_display_service.py +135 -18
- core/llm/plugin_sdk.py +1 -2
- core/llm/profile_manager.py +1183 -0
- core/llm/response_parser.py +274 -56
- core/llm/response_processor.py +16 -3
- core/llm/tool_executor.py +6 -1
- core/logging/__init__.py +2 -0
- core/logging/setup.py +34 -6
- core/models/resume.py +54 -0
- core/plugins/__init__.py +4 -2
- core/plugins/base.py +127 -0
- core/plugins/collector.py +23 -161
- core/plugins/discovery.py +37 -3
- core/plugins/factory.py +6 -12
- core/plugins/registry.py +5 -17
- core/ui/config_widgets.py +128 -28
- core/ui/live_modal_renderer.py +2 -1
- core/ui/modal_actions.py +5 -0
- core/ui/modal_overlay_renderer.py +0 -60
- core/ui/modal_renderer.py +268 -7
- core/ui/modal_state_manager.py +29 -4
- core/ui/widgets/base_widget.py +7 -0
- core/updates/__init__.py +10 -0
- core/updates/version_check_service.py +348 -0
- core/updates/version_comparator.py +103 -0
- core/utils/config_utils.py +685 -526
- core/utils/plugin_utils.py +1 -1
- core/utils/session_naming.py +111 -0
- fonts/LICENSE +21 -0
- fonts/README.md +46 -0
- fonts/SymbolsNerdFont-Regular.ttf +0 -0
- fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
- fonts/__init__.py +44 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
- kollabor-0.4.15.dist-info/RECORD +228 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
- plugins/agent_orchestrator/__init__.py +39 -0
- plugins/agent_orchestrator/activity_monitor.py +181 -0
- plugins/agent_orchestrator/file_attacher.py +77 -0
- plugins/agent_orchestrator/message_injector.py +135 -0
- plugins/agent_orchestrator/models.py +48 -0
- plugins/agent_orchestrator/orchestrator.py +403 -0
- plugins/agent_orchestrator/plugin.py +976 -0
- plugins/agent_orchestrator/xml_parser.py +191 -0
- plugins/agent_orchestrator_plugin.py +9 -0
- plugins/enhanced_input/box_styles.py +1 -0
- plugins/enhanced_input/color_engine.py +19 -4
- plugins/enhanced_input/config.py +2 -2
- plugins/enhanced_input_plugin.py +61 -11
- plugins/fullscreen/__init__.py +6 -2
- plugins/fullscreen/example_plugin.py +1035 -222
- plugins/fullscreen/setup_wizard_plugin.py +592 -0
- plugins/fullscreen/space_shooter_plugin.py +131 -0
- plugins/hook_monitoring_plugin.py +436 -78
- plugins/query_enhancer_plugin.py +66 -30
- plugins/resume_conversation_plugin.py +1494 -0
- plugins/save_conversation_plugin.py +98 -32
- plugins/system_commands_plugin.py +70 -56
- plugins/tmux_plugin.py +154 -78
- plugins/workflow_enforcement_plugin.py +94 -92
- system_prompt/default.md +952 -886
- core/io/input_mode_manager.py +0 -402
- core/io/modal_interaction_handler.py +0 -315
- core/io/raw_input_processor.py +0 -946
- core/storage/__init__.py +0 -5
- core/storage/state_manager.py +0 -84
- core/ui/widget_integration.py +0 -222
- core/utils/key_reader.py +0 -171
- kollabor-0.4.9.dist-info/RECORD +0 -128
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
- {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""
|
|
2
|
+
API Adapters for different LLM providers.
|
|
3
|
+
|
|
4
|
+
This module provides adapters that handle the differences between
|
|
5
|
+
OpenAI and Anthropic API formats for:
|
|
6
|
+
- Request formatting (tool definitions, messages)
|
|
7
|
+
- Response parsing (tool calls, content)
|
|
8
|
+
- Tool result formatting
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from .base import (
|
|
12
|
+
BaseAPIAdapter,
|
|
13
|
+
AdapterResponse,
|
|
14
|
+
ToolCallResult,
|
|
15
|
+
ToolCallingFormat,
|
|
16
|
+
)
|
|
17
|
+
from .openai_adapter import OpenAIAdapter
|
|
18
|
+
from .anthropic_adapter import AnthropicAdapter
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"BaseAPIAdapter",
|
|
22
|
+
"AdapterResponse",
|
|
23
|
+
"ToolCallResult",
|
|
24
|
+
"ToolCallingFormat",
|
|
25
|
+
"OpenAIAdapter",
|
|
26
|
+
"AnthropicAdapter",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def get_adapter(tool_format: str, base_url: str = "") -> BaseAPIAdapter:
|
|
31
|
+
"""
|
|
32
|
+
Factory function to get the appropriate adapter.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
tool_format: "openai" or "anthropic"
|
|
36
|
+
base_url: Base URL for the API endpoint
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Configured API adapter instance
|
|
40
|
+
"""
|
|
41
|
+
if tool_format == "anthropic":
|
|
42
|
+
return AnthropicAdapter(base_url=base_url or "https://api.anthropic.com")
|
|
43
|
+
else:
|
|
44
|
+
return OpenAIAdapter(base_url=base_url or "http://localhost:1234")
|
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic Claude API Adapter.
|
|
3
|
+
|
|
4
|
+
Handles the Anthropic API format:
|
|
5
|
+
- Endpoint: /v1/messages
|
|
6
|
+
- Tool definitions use "input_schema" key
|
|
7
|
+
- Responses have "tool_use" content blocks
|
|
8
|
+
- Tool results use role="user" with tool_result content block
|
|
9
|
+
- System message is separate from messages array
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Any, Dict, List, Optional
|
|
15
|
+
|
|
16
|
+
from .base import (
|
|
17
|
+
BaseAPIAdapter,
|
|
18
|
+
AdapterResponse,
|
|
19
|
+
ToolCallResult,
|
|
20
|
+
ToolCallingFormat,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AnthropicAdapter(BaseAPIAdapter):
|
|
27
|
+
"""
|
|
28
|
+
Adapter for Anthropic Claude API.
|
|
29
|
+
|
|
30
|
+
Key differences from OpenAI:
|
|
31
|
+
- System prompt is a separate field, not in messages
|
|
32
|
+
- Tool definitions use "input_schema" instead of "parameters"
|
|
33
|
+
- Tool calls are "tool_use" content blocks
|
|
34
|
+
- Tool results are "tool_result" content blocks in user messages
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# Anthropic API version header
|
|
38
|
+
ANTHROPIC_VERSION = "2023-06-01"
|
|
39
|
+
|
|
40
|
+
def __init__(self, base_url: str = "https://api.anthropic.com"):
|
|
41
|
+
"""
|
|
42
|
+
Initialize Anthropic adapter.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
base_url: Base URL for the API (default: api.anthropic.com)
|
|
46
|
+
"""
|
|
47
|
+
super().__init__(base_url)
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def provider_name(self) -> str:
|
|
51
|
+
return "anthropic"
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def api_endpoint(self) -> str:
|
|
55
|
+
return f"{self._base_url}/v1/messages"
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def tool_format(self) -> ToolCallingFormat:
|
|
59
|
+
return ToolCallingFormat.ANTHROPIC
|
|
60
|
+
|
|
61
|
+
def format_request(
|
|
62
|
+
self,
|
|
63
|
+
messages: List[Dict[str, Any]],
|
|
64
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
65
|
+
**kwargs,
|
|
66
|
+
) -> Dict[str, Any]:
|
|
67
|
+
"""
|
|
68
|
+
Format request for Anthropic API.
|
|
69
|
+
|
|
70
|
+
Key differences:
|
|
71
|
+
- System message is hoisted to separate "system" field
|
|
72
|
+
- max_tokens is required (not optional)
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
messages: Conversation messages
|
|
76
|
+
tools: Tool definitions (optional)
|
|
77
|
+
**kwargs: model, temperature, max_tokens, stream
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Anthropic-formatted request payload
|
|
81
|
+
"""
|
|
82
|
+
self.validate_messages(messages)
|
|
83
|
+
|
|
84
|
+
# Separate system message from conversation
|
|
85
|
+
system_content, conversation_messages = self._separate_system_message(messages)
|
|
86
|
+
|
|
87
|
+
payload: Dict[str, Any] = {
|
|
88
|
+
"model": kwargs.get("model", "claude-sonnet-4-20250514"),
|
|
89
|
+
"max_tokens": kwargs.get("max_tokens", 4096), # Required for Anthropic
|
|
90
|
+
"messages": conversation_messages,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# Add system prompt if present
|
|
94
|
+
if system_content:
|
|
95
|
+
payload["system"] = system_content
|
|
96
|
+
|
|
97
|
+
# Optional parameters
|
|
98
|
+
if "temperature" in kwargs:
|
|
99
|
+
payload["temperature"] = kwargs["temperature"]
|
|
100
|
+
|
|
101
|
+
if kwargs.get("stream", False):
|
|
102
|
+
payload["stream"] = True
|
|
103
|
+
|
|
104
|
+
# Tool configuration
|
|
105
|
+
if tools:
|
|
106
|
+
payload["tools"] = self.format_tool_definitions(tools)
|
|
107
|
+
# Anthropic tool_choice format is different
|
|
108
|
+
tool_choice = kwargs.get("tool_choice", "auto")
|
|
109
|
+
if tool_choice == "auto":
|
|
110
|
+
payload["tool_choice"] = {"type": "auto"}
|
|
111
|
+
elif tool_choice == "any":
|
|
112
|
+
payload["tool_choice"] = {"type": "any"}
|
|
113
|
+
elif tool_choice == "none":
|
|
114
|
+
# Don't include tool_choice for "none"
|
|
115
|
+
pass
|
|
116
|
+
elif isinstance(tool_choice, dict):
|
|
117
|
+
payload["tool_choice"] = tool_choice
|
|
118
|
+
else:
|
|
119
|
+
# Specific tool name
|
|
120
|
+
payload["tool_choice"] = {"type": "tool", "name": tool_choice}
|
|
121
|
+
|
|
122
|
+
return payload
|
|
123
|
+
|
|
124
|
+
def _separate_system_message(
|
|
125
|
+
self, messages: List[Dict[str, Any]]
|
|
126
|
+
) -> tuple[str, List[Dict[str, Any]]]:
|
|
127
|
+
"""
|
|
128
|
+
Separate system messages from conversation.
|
|
129
|
+
|
|
130
|
+
Anthropic requires system message as separate field.
|
|
131
|
+
Multiple system messages are concatenated.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
messages: All messages including system
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Tuple of (system_content, conversation_messages)
|
|
138
|
+
"""
|
|
139
|
+
system_parts: List[str] = []
|
|
140
|
+
conversation: List[Dict[str, Any]] = []
|
|
141
|
+
|
|
142
|
+
for msg in messages:
|
|
143
|
+
# Validate message is a dict
|
|
144
|
+
if not isinstance(msg, dict):
|
|
145
|
+
logger.warning(f"Skipping non-dict message: {type(msg)}")
|
|
146
|
+
continue
|
|
147
|
+
role = msg.get("role", "user")
|
|
148
|
+
content = msg.get("content", "")
|
|
149
|
+
|
|
150
|
+
if role == "system":
|
|
151
|
+
system_parts.append(content)
|
|
152
|
+
elif role == "user":
|
|
153
|
+
conversation.append(self._format_user_message(msg))
|
|
154
|
+
elif role == "assistant":
|
|
155
|
+
conversation.append(self._format_assistant_message(msg))
|
|
156
|
+
|
|
157
|
+
system_content = "\n".join(system_parts) if system_parts else ""
|
|
158
|
+
return system_content, conversation
|
|
159
|
+
|
|
160
|
+
def _format_user_message(self, msg: Dict[str, Any]) -> Dict[str, Any]:
|
|
161
|
+
"""
|
|
162
|
+
Format user message for Anthropic API.
|
|
163
|
+
|
|
164
|
+
Handles both simple text and tool_result content.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
msg: User message
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Anthropic-formatted user message
|
|
171
|
+
"""
|
|
172
|
+
content = msg.get("content", "")
|
|
173
|
+
|
|
174
|
+
# Check if this is a tool result message
|
|
175
|
+
if "tool_result" in msg:
|
|
176
|
+
return {
|
|
177
|
+
"role": "user",
|
|
178
|
+
"content": msg["tool_result"], # Already formatted content blocks
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
# Check if content is already a list of content blocks
|
|
182
|
+
if isinstance(content, list):
|
|
183
|
+
return {"role": "user", "content": content}
|
|
184
|
+
|
|
185
|
+
# Simple text content
|
|
186
|
+
return {
|
|
187
|
+
"role": "user",
|
|
188
|
+
"content": [{"type": "text", "text": content}],
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
def _format_assistant_message(self, msg: Dict[str, Any]) -> Dict[str, Any]:
|
|
192
|
+
"""
|
|
193
|
+
Format assistant message for Anthropic API.
|
|
194
|
+
|
|
195
|
+
Handles text and tool_use content blocks.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
msg: Assistant message
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Anthropic-formatted assistant message
|
|
202
|
+
"""
|
|
203
|
+
content = msg.get("content", "")
|
|
204
|
+
tool_uses = msg.get("tool_uses", [])
|
|
205
|
+
|
|
206
|
+
content_blocks: List[Dict[str, Any]] = []
|
|
207
|
+
|
|
208
|
+
# Add text content if present
|
|
209
|
+
if content:
|
|
210
|
+
content_blocks.append({"type": "text", "text": content})
|
|
211
|
+
|
|
212
|
+
# Add tool_use blocks
|
|
213
|
+
for tool_use in tool_uses:
|
|
214
|
+
content_blocks.append({
|
|
215
|
+
"type": "tool_use",
|
|
216
|
+
"id": tool_use.get("id", ""),
|
|
217
|
+
"name": tool_use.get("name", ""),
|
|
218
|
+
"input": tool_use.get("input", {}),
|
|
219
|
+
})
|
|
220
|
+
|
|
221
|
+
# If content was already a list of blocks, use that
|
|
222
|
+
if isinstance(content, list):
|
|
223
|
+
content_blocks = content
|
|
224
|
+
|
|
225
|
+
return {
|
|
226
|
+
"role": "assistant",
|
|
227
|
+
"content": content_blocks if content_blocks else [{"type": "text", "text": ""}],
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
def parse_response(self, raw_response: Dict[str, Any]) -> AdapterResponse:
|
|
231
|
+
"""
|
|
232
|
+
Parse Anthropic API response.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
raw_response: Raw JSON from Anthropic API
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Unified AdapterResponse
|
|
239
|
+
"""
|
|
240
|
+
# Handle error responses
|
|
241
|
+
if "error" in raw_response:
|
|
242
|
+
error_msg = raw_response["error"].get("message", "Unknown error")
|
|
243
|
+
logger.error(f"Anthropic API error: {error_msg}")
|
|
244
|
+
return AdapterResponse(
|
|
245
|
+
content=f"API Error: {error_msg}",
|
|
246
|
+
stop_reason="error",
|
|
247
|
+
raw_response=raw_response,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Check if this looks like an OpenAI response (wrong adapter)
|
|
251
|
+
if "choices" in raw_response:
|
|
252
|
+
logger.error("FORMAT MISMATCH: Got OpenAI response but using Anthropic adapter")
|
|
253
|
+
return AdapterResponse(
|
|
254
|
+
content="CONFIG ERROR: Your profile has tool_format='anthropic' but the server "
|
|
255
|
+
"returned an OpenAI-compatible response.\n\n"
|
|
256
|
+
"FIX: Run /profile, select this profile, press 'e' to edit, "
|
|
257
|
+
"change Tool Format to 'openai', then Ctrl+S to save.",
|
|
258
|
+
stop_reason="format_error",
|
|
259
|
+
raw_response=raw_response,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# Extract content blocks
|
|
263
|
+
content_blocks = raw_response.get("content", [])
|
|
264
|
+
|
|
265
|
+
# Validate content_blocks is a list
|
|
266
|
+
if not isinstance(content_blocks, list):
|
|
267
|
+
logger.warning(f"Expected content to be a list, got {type(content_blocks)}")
|
|
268
|
+
content_blocks = []
|
|
269
|
+
|
|
270
|
+
# Process content blocks
|
|
271
|
+
text_parts: List[str] = []
|
|
272
|
+
tool_calls: List[ToolCallResult] = []
|
|
273
|
+
|
|
274
|
+
for block in content_blocks:
|
|
275
|
+
# Ensure block is a dict before calling .get()
|
|
276
|
+
if not isinstance(block, dict):
|
|
277
|
+
logger.warning(f"Skipping non-dict content block: {type(block)}")
|
|
278
|
+
continue
|
|
279
|
+
block_type = block.get("type", "")
|
|
280
|
+
|
|
281
|
+
if block_type == "text":
|
|
282
|
+
text_parts.append(block.get("text", ""))
|
|
283
|
+
elif block_type == "tool_use":
|
|
284
|
+
tool_calls.append(
|
|
285
|
+
ToolCallResult(
|
|
286
|
+
tool_id=block.get("id", ""),
|
|
287
|
+
tool_name=block.get("name", ""),
|
|
288
|
+
arguments=block.get("input", {}),
|
|
289
|
+
)
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Combine text content
|
|
293
|
+
content = "\n".join(text_parts)
|
|
294
|
+
|
|
295
|
+
# Extract usage
|
|
296
|
+
usage = raw_response.get("usage", {})
|
|
297
|
+
|
|
298
|
+
# Map stop_reason
|
|
299
|
+
stop_reason = raw_response.get("stop_reason", "unknown")
|
|
300
|
+
# Anthropic uses "end_turn", "tool_use", "max_tokens" directly
|
|
301
|
+
# which matches our unified format
|
|
302
|
+
|
|
303
|
+
return AdapterResponse(
|
|
304
|
+
content=content,
|
|
305
|
+
tool_calls=tool_calls,
|
|
306
|
+
usage={
|
|
307
|
+
"prompt_tokens": usage.get("input_tokens", 0),
|
|
308
|
+
"completion_tokens": usage.get("output_tokens", 0),
|
|
309
|
+
"total_tokens": usage.get("input_tokens", 0) + usage.get("output_tokens", 0),
|
|
310
|
+
},
|
|
311
|
+
stop_reason=stop_reason,
|
|
312
|
+
raw_response=raw_response,
|
|
313
|
+
model=raw_response.get("model", ""),
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
def format_tool_definitions(
|
|
317
|
+
self, tools: List[Dict[str, Any]]
|
|
318
|
+
) -> List[Dict[str, Any]]:
|
|
319
|
+
"""
|
|
320
|
+
Convert tool definitions to Anthropic format.
|
|
321
|
+
|
|
322
|
+
Anthropic format:
|
|
323
|
+
{
|
|
324
|
+
"name": "...",
|
|
325
|
+
"description": "...",
|
|
326
|
+
"input_schema": {...} # JSON Schema
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
tools: Generic tool definitions
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
Anthropic-formatted tool definitions
|
|
334
|
+
"""
|
|
335
|
+
formatted = []
|
|
336
|
+
|
|
337
|
+
for tool in tools:
|
|
338
|
+
# Handle both "parameters" and "input_schema" keys
|
|
339
|
+
input_schema = tool.get("input_schema") or tool.get("parameters", {})
|
|
340
|
+
|
|
341
|
+
formatted.append({
|
|
342
|
+
"name": tool.get("name", ""),
|
|
343
|
+
"description": tool.get("description", ""),
|
|
344
|
+
"input_schema": input_schema,
|
|
345
|
+
})
|
|
346
|
+
|
|
347
|
+
return formatted
|
|
348
|
+
|
|
349
|
+
def format_tool_result(
|
|
350
|
+
self, tool_id: str, result: Any, is_error: bool = False
|
|
351
|
+
) -> Dict[str, Any]:
|
|
352
|
+
"""
|
|
353
|
+
Format tool result for Anthropic API.
|
|
354
|
+
|
|
355
|
+
Anthropic uses role="user" with tool_result content blocks.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
tool_id: ID of the tool call (tool_use_id)
|
|
359
|
+
result: Tool execution result
|
|
360
|
+
is_error: Whether result is an error
|
|
361
|
+
|
|
362
|
+
Returns:
|
|
363
|
+
Anthropic-formatted tool result message
|
|
364
|
+
"""
|
|
365
|
+
content = result if isinstance(result, str) else json.dumps(result)
|
|
366
|
+
|
|
367
|
+
return {
|
|
368
|
+
"role": "user",
|
|
369
|
+
"content": [
|
|
370
|
+
{
|
|
371
|
+
"type": "tool_result",
|
|
372
|
+
"tool_use_id": tool_id,
|
|
373
|
+
"content": content,
|
|
374
|
+
"is_error": is_error,
|
|
375
|
+
}
|
|
376
|
+
],
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
def format_multiple_tool_results(
|
|
380
|
+
self, results: List[Dict[str, Any]]
|
|
381
|
+
) -> Dict[str, Any]:
|
|
382
|
+
"""
|
|
383
|
+
Format multiple tool results in a single user message.
|
|
384
|
+
|
|
385
|
+
For parallel tool calls, all results must be in one message.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
results: List of {tool_id, result, is_error}
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Single user message with all tool_result blocks
|
|
392
|
+
"""
|
|
393
|
+
content_blocks = []
|
|
394
|
+
|
|
395
|
+
for r in results:
|
|
396
|
+
tool_id = r.get("tool_id", "")
|
|
397
|
+
result = r.get("result", "")
|
|
398
|
+
is_error = r.get("is_error", False)
|
|
399
|
+
|
|
400
|
+
content = result if isinstance(result, str) else json.dumps(result)
|
|
401
|
+
|
|
402
|
+
content_blocks.append({
|
|
403
|
+
"type": "tool_result",
|
|
404
|
+
"tool_use_id": tool_id,
|
|
405
|
+
"content": content,
|
|
406
|
+
"is_error": is_error,
|
|
407
|
+
})
|
|
408
|
+
|
|
409
|
+
return {
|
|
410
|
+
"role": "user",
|
|
411
|
+
"content": content_blocks,
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
def get_headers(self, api_token: Optional[str] = None) -> Dict[str, str]:
|
|
415
|
+
"""
|
|
416
|
+
Get headers for Anthropic API requests.
|
|
417
|
+
|
|
418
|
+
Includes required anthropic-version header.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
api_token: Anthropic API key
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
HTTP headers dictionary
|
|
425
|
+
"""
|
|
426
|
+
headers = {
|
|
427
|
+
"Content-Type": "application/json",
|
|
428
|
+
"anthropic-version": self.ANTHROPIC_VERSION,
|
|
429
|
+
}
|
|
430
|
+
if api_token:
|
|
431
|
+
headers["x-api-key"] = api_token
|
|
432
|
+
return headers
|