kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. agents/__init__.py +2 -0
  2. agents/coder/__init__.py +0 -0
  3. agents/coder/agent.json +4 -0
  4. agents/coder/api-integration.md +2150 -0
  5. agents/coder/cli-pretty.md +765 -0
  6. agents/coder/code-review.md +1092 -0
  7. agents/coder/database-design.md +1525 -0
  8. agents/coder/debugging.md +1102 -0
  9. agents/coder/dependency-management.md +1397 -0
  10. agents/coder/git-workflow.md +1099 -0
  11. agents/coder/refactoring.md +1454 -0
  12. agents/coder/security-hardening.md +1732 -0
  13. agents/coder/system_prompt.md +1448 -0
  14. agents/coder/tdd.md +1367 -0
  15. agents/creative-writer/__init__.py +0 -0
  16. agents/creative-writer/agent.json +4 -0
  17. agents/creative-writer/character-development.md +1852 -0
  18. agents/creative-writer/dialogue-craft.md +1122 -0
  19. agents/creative-writer/plot-structure.md +1073 -0
  20. agents/creative-writer/revision-editing.md +1484 -0
  21. agents/creative-writer/system_prompt.md +690 -0
  22. agents/creative-writer/worldbuilding.md +2049 -0
  23. agents/data-analyst/__init__.py +30 -0
  24. agents/data-analyst/agent.json +4 -0
  25. agents/data-analyst/data-visualization.md +992 -0
  26. agents/data-analyst/exploratory-data-analysis.md +1110 -0
  27. agents/data-analyst/pandas-data-manipulation.md +1081 -0
  28. agents/data-analyst/sql-query-optimization.md +881 -0
  29. agents/data-analyst/statistical-analysis.md +1118 -0
  30. agents/data-analyst/system_prompt.md +928 -0
  31. agents/default/__init__.py +0 -0
  32. agents/default/agent.json +4 -0
  33. agents/default/dead-code.md +794 -0
  34. agents/default/explore-agent-system.md +585 -0
  35. agents/default/system_prompt.md +1448 -0
  36. agents/kollabor/__init__.py +0 -0
  37. agents/kollabor/analyze-plugin-lifecycle.md +175 -0
  38. agents/kollabor/analyze-terminal-rendering.md +388 -0
  39. agents/kollabor/code-review.md +1092 -0
  40. agents/kollabor/debug-mcp-integration.md +521 -0
  41. agents/kollabor/debug-plugin-hooks.md +547 -0
  42. agents/kollabor/debugging.md +1102 -0
  43. agents/kollabor/dependency-management.md +1397 -0
  44. agents/kollabor/git-workflow.md +1099 -0
  45. agents/kollabor/inspect-llm-conversation.md +148 -0
  46. agents/kollabor/monitor-event-bus.md +558 -0
  47. agents/kollabor/profile-performance.md +576 -0
  48. agents/kollabor/refactoring.md +1454 -0
  49. agents/kollabor/system_prompt copy.md +1448 -0
  50. agents/kollabor/system_prompt.md +757 -0
  51. agents/kollabor/trace-command-execution.md +178 -0
  52. agents/kollabor/validate-config.md +879 -0
  53. agents/research/__init__.py +0 -0
  54. agents/research/agent.json +4 -0
  55. agents/research/architecture-mapping.md +1099 -0
  56. agents/research/codebase-analysis.md +1077 -0
  57. agents/research/dependency-audit.md +1027 -0
  58. agents/research/performance-profiling.md +1047 -0
  59. agents/research/security-review.md +1359 -0
  60. agents/research/system_prompt.md +492 -0
  61. agents/technical-writer/__init__.py +0 -0
  62. agents/technical-writer/agent.json +4 -0
  63. agents/technical-writer/api-documentation.md +2328 -0
  64. agents/technical-writer/changelog-management.md +1181 -0
  65. agents/technical-writer/readme-writing.md +1360 -0
  66. agents/technical-writer/style-guide.md +1410 -0
  67. agents/technical-writer/system_prompt.md +653 -0
  68. agents/technical-writer/tutorial-creation.md +1448 -0
  69. core/__init__.py +0 -2
  70. core/application.py +343 -88
  71. core/cli.py +229 -10
  72. core/commands/menu_renderer.py +463 -59
  73. core/commands/registry.py +14 -9
  74. core/commands/system_commands.py +2461 -14
  75. core/config/loader.py +151 -37
  76. core/config/service.py +18 -6
  77. core/events/bus.py +29 -9
  78. core/events/executor.py +205 -75
  79. core/events/models.py +27 -8
  80. core/fullscreen/command_integration.py +20 -24
  81. core/fullscreen/components/__init__.py +10 -1
  82. core/fullscreen/components/matrix_components.py +1 -2
  83. core/fullscreen/components/space_shooter_components.py +654 -0
  84. core/fullscreen/plugin.py +5 -0
  85. core/fullscreen/renderer.py +52 -13
  86. core/fullscreen/session.py +52 -15
  87. core/io/__init__.py +29 -5
  88. core/io/buffer_manager.py +6 -1
  89. core/io/config_status_view.py +7 -29
  90. core/io/core_status_views.py +267 -347
  91. core/io/input/__init__.py +25 -0
  92. core/io/input/command_mode_handler.py +711 -0
  93. core/io/input/display_controller.py +128 -0
  94. core/io/input/hook_registrar.py +286 -0
  95. core/io/input/input_loop_manager.py +421 -0
  96. core/io/input/key_press_handler.py +502 -0
  97. core/io/input/modal_controller.py +1011 -0
  98. core/io/input/paste_processor.py +339 -0
  99. core/io/input/status_modal_renderer.py +184 -0
  100. core/io/input_errors.py +5 -1
  101. core/io/input_handler.py +211 -2452
  102. core/io/key_parser.py +7 -0
  103. core/io/layout.py +15 -3
  104. core/io/message_coordinator.py +111 -2
  105. core/io/message_renderer.py +129 -4
  106. core/io/status_renderer.py +147 -607
  107. core/io/terminal_renderer.py +97 -51
  108. core/io/terminal_state.py +21 -4
  109. core/io/visual_effects.py +816 -165
  110. core/llm/agent_manager.py +1063 -0
  111. core/llm/api_adapters/__init__.py +44 -0
  112. core/llm/api_adapters/anthropic_adapter.py +432 -0
  113. core/llm/api_adapters/base.py +241 -0
  114. core/llm/api_adapters/openai_adapter.py +326 -0
  115. core/llm/api_communication_service.py +167 -113
  116. core/llm/conversation_logger.py +322 -16
  117. core/llm/conversation_manager.py +556 -30
  118. core/llm/file_operations_executor.py +84 -32
  119. core/llm/llm_service.py +934 -103
  120. core/llm/mcp_integration.py +541 -57
  121. core/llm/message_display_service.py +135 -18
  122. core/llm/plugin_sdk.py +1 -2
  123. core/llm/profile_manager.py +1183 -0
  124. core/llm/response_parser.py +274 -56
  125. core/llm/response_processor.py +16 -3
  126. core/llm/tool_executor.py +6 -1
  127. core/logging/__init__.py +2 -0
  128. core/logging/setup.py +34 -6
  129. core/models/resume.py +54 -0
  130. core/plugins/__init__.py +4 -2
  131. core/plugins/base.py +127 -0
  132. core/plugins/collector.py +23 -161
  133. core/plugins/discovery.py +37 -3
  134. core/plugins/factory.py +6 -12
  135. core/plugins/registry.py +5 -17
  136. core/ui/config_widgets.py +128 -28
  137. core/ui/live_modal_renderer.py +2 -1
  138. core/ui/modal_actions.py +5 -0
  139. core/ui/modal_overlay_renderer.py +0 -60
  140. core/ui/modal_renderer.py +268 -7
  141. core/ui/modal_state_manager.py +29 -4
  142. core/ui/widgets/base_widget.py +7 -0
  143. core/updates/__init__.py +10 -0
  144. core/updates/version_check_service.py +348 -0
  145. core/updates/version_comparator.py +103 -0
  146. core/utils/config_utils.py +685 -526
  147. core/utils/plugin_utils.py +1 -1
  148. core/utils/session_naming.py +111 -0
  149. fonts/LICENSE +21 -0
  150. fonts/README.md +46 -0
  151. fonts/SymbolsNerdFont-Regular.ttf +0 -0
  152. fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
  153. fonts/__init__.py +44 -0
  154. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
  155. kollabor-0.4.15.dist-info/RECORD +228 -0
  156. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
  157. plugins/agent_orchestrator/__init__.py +39 -0
  158. plugins/agent_orchestrator/activity_monitor.py +181 -0
  159. plugins/agent_orchestrator/file_attacher.py +77 -0
  160. plugins/agent_orchestrator/message_injector.py +135 -0
  161. plugins/agent_orchestrator/models.py +48 -0
  162. plugins/agent_orchestrator/orchestrator.py +403 -0
  163. plugins/agent_orchestrator/plugin.py +976 -0
  164. plugins/agent_orchestrator/xml_parser.py +191 -0
  165. plugins/agent_orchestrator_plugin.py +9 -0
  166. plugins/enhanced_input/box_styles.py +1 -0
  167. plugins/enhanced_input/color_engine.py +19 -4
  168. plugins/enhanced_input/config.py +2 -2
  169. plugins/enhanced_input_plugin.py +61 -11
  170. plugins/fullscreen/__init__.py +6 -2
  171. plugins/fullscreen/example_plugin.py +1035 -222
  172. plugins/fullscreen/setup_wizard_plugin.py +592 -0
  173. plugins/fullscreen/space_shooter_plugin.py +131 -0
  174. plugins/hook_monitoring_plugin.py +436 -78
  175. plugins/query_enhancer_plugin.py +66 -30
  176. plugins/resume_conversation_plugin.py +1494 -0
  177. plugins/save_conversation_plugin.py +98 -32
  178. plugins/system_commands_plugin.py +70 -56
  179. plugins/tmux_plugin.py +154 -78
  180. plugins/workflow_enforcement_plugin.py +94 -92
  181. system_prompt/default.md +952 -886
  182. core/io/input_mode_manager.py +0 -402
  183. core/io/modal_interaction_handler.py +0 -315
  184. core/io/raw_input_processor.py +0 -946
  185. core/storage/__init__.py +0 -5
  186. core/storage/state_manager.py +0 -84
  187. core/ui/widget_integration.py +0 -222
  188. core/utils/key_reader.py +0 -171
  189. kollabor-0.4.9.dist-info/RECORD +0 -128
  190. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
  191. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
  192. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,241 @@
1
+ """
2
+ Base API Adapter interface for LLM providers.
3
+
4
+ Provides abstract base class and data structures for adapting
5
+ between different LLM API formats (OpenAI, Anthropic, etc.).
6
+ """
7
+
8
+ from abc import ABC, abstractmethod
9
+ from dataclasses import dataclass, field
10
+ from enum import Enum
11
+ from typing import Any, Dict, List, Optional
12
+ import logging
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class ToolCallingFormat(Enum):
18
+ """Supported tool calling API formats."""
19
+
20
+ OPENAI = "openai" # /v1/chat/completions, parameters, tool_calls
21
+ ANTHROPIC = "anthropic" # /v1/messages, input_schema, tool_use
22
+
23
+
24
+ @dataclass
25
+ class ToolCallResult:
26
+ """
27
+ Unified representation of a tool call from the LLM.
28
+
29
+ Attributes:
30
+ tool_id: Unique identifier for this tool call
31
+ tool_name: Name of the tool being called
32
+ arguments: Dictionary of arguments passed to the tool
33
+ """
34
+
35
+ tool_id: str
36
+ tool_name: str
37
+ arguments: Dict[str, Any]
38
+
39
+ def to_dict(self) -> Dict[str, Any]:
40
+ """Convert to dictionary representation."""
41
+ return {
42
+ "tool_id": self.tool_id,
43
+ "tool_name": self.tool_name,
44
+ "arguments": self.arguments,
45
+ }
46
+
47
+
48
+ @dataclass
49
+ class AdapterResponse:
50
+ """
51
+ Unified response format from any LLM API adapter.
52
+
53
+ Attributes:
54
+ content: Text content from the response
55
+ tool_calls: List of tool calls requested by the LLM
56
+ usage: Token usage statistics (prompt_tokens, completion_tokens, total_tokens)
57
+ stop_reason: Why the response ended (end_turn, tool_use, max_tokens)
58
+ raw_response: Original unmodified response from the API
59
+ model: Model that generated the response
60
+ """
61
+
62
+ content: str
63
+ tool_calls: List[ToolCallResult] = field(default_factory=list)
64
+ usage: Dict[str, int] = field(default_factory=dict)
65
+ stop_reason: str = "unknown"
66
+ raw_response: Dict[str, Any] = field(default_factory=dict)
67
+ model: str = ""
68
+
69
+ @property
70
+ def has_tool_calls(self) -> bool:
71
+ """Check if response contains tool calls."""
72
+ return len(self.tool_calls) > 0
73
+
74
+ def to_dict(self) -> Dict[str, Any]:
75
+ """Convert to dictionary representation."""
76
+ return {
77
+ "content": self.content,
78
+ "tool_calls": [tc.to_dict() for tc in self.tool_calls],
79
+ "usage": self.usage,
80
+ "stop_reason": self.stop_reason,
81
+ "model": self.model,
82
+ }
83
+
84
+
85
+ class BaseAPIAdapter(ABC):
86
+ """
87
+ Abstract base class for LLM API adapters.
88
+
89
+ Adapters handle the differences between API formats:
90
+ - OpenAI: /v1/chat/completions with "parameters" and "tool_calls"
91
+ - Anthropic: /v1/messages with "input_schema" and "tool_use"
92
+
93
+ Each adapter must implement methods for:
94
+ - Formatting requests (messages, tools)
95
+ - Parsing responses
96
+ - Formatting tool results
97
+ """
98
+
99
+ def __init__(self, base_url: str = ""):
100
+ """
101
+ Initialize adapter with base URL.
102
+
103
+ Args:
104
+ base_url: Base URL for the API endpoint
105
+ """
106
+ self._base_url = base_url.rstrip("/") if base_url else ""
107
+
108
+ @property
109
+ @abstractmethod
110
+ def provider_name(self) -> str:
111
+ """
112
+ Get the provider name (e.g., 'openai', 'anthropic').
113
+
114
+ Returns:
115
+ Provider identifier string
116
+ """
117
+ pass
118
+
119
+ @property
120
+ @abstractmethod
121
+ def api_endpoint(self) -> str:
122
+ """
123
+ Get the full API endpoint URL.
124
+
125
+ Returns:
126
+ Complete URL for API requests
127
+ """
128
+ pass
129
+
130
+ @property
131
+ @abstractmethod
132
+ def tool_format(self) -> ToolCallingFormat:
133
+ """
134
+ Get the tool calling format used by this adapter.
135
+
136
+ Returns:
137
+ ToolCallingFormat enum value
138
+ """
139
+ pass
140
+
141
+ @abstractmethod
142
+ def format_request(
143
+ self,
144
+ messages: List[Dict[str, Any]],
145
+ tools: Optional[List[Dict[str, Any]]] = None,
146
+ **kwargs,
147
+ ) -> Dict[str, Any]:
148
+ """
149
+ Format a request payload for the API.
150
+
151
+ Args:
152
+ messages: Conversation messages (role, content)
153
+ tools: Tool definitions (optional)
154
+ **kwargs: Additional parameters (model, temperature, max_tokens, etc.)
155
+
156
+ Returns:
157
+ Formatted request payload dictionary
158
+ """
159
+ pass
160
+
161
+ @abstractmethod
162
+ def parse_response(self, raw_response: Dict[str, Any]) -> AdapterResponse:
163
+ """
164
+ Parse API response into unified format.
165
+
166
+ Args:
167
+ raw_response: Raw JSON response from the API
168
+
169
+ Returns:
170
+ AdapterResponse with normalized fields
171
+ """
172
+ pass
173
+
174
+ @abstractmethod
175
+ def format_tool_definitions(
176
+ self, tools: List[Dict[str, Any]]
177
+ ) -> List[Dict[str, Any]]:
178
+ """
179
+ Convert tool definitions to provider-specific format.
180
+
181
+ OpenAI uses 'parameters' key in function schema.
182
+ Anthropic uses 'input_schema' key.
183
+
184
+ Args:
185
+ tools: Tool definitions in generic format
186
+
187
+ Returns:
188
+ Tool definitions in provider-specific format
189
+ """
190
+ pass
191
+
192
+ @abstractmethod
193
+ def format_tool_result(
194
+ self, tool_id: str, result: Any, is_error: bool = False
195
+ ) -> Dict[str, Any]:
196
+ """
197
+ Format a tool result for conversation continuation.
198
+
199
+ Args:
200
+ tool_id: ID of the tool call this is responding to
201
+ result: Result from tool execution
202
+ is_error: Whether the result is an error
203
+
204
+ Returns:
205
+ Formatted message dictionary for the conversation
206
+ """
207
+ pass
208
+
209
+ def get_headers(self, api_token: Optional[str] = None) -> Dict[str, str]:
210
+ """
211
+ Get headers for API requests.
212
+
213
+ Args:
214
+ api_token: API authentication token
215
+
216
+ Returns:
217
+ Dictionary of HTTP headers
218
+ """
219
+ headers = {
220
+ "Content-Type": "application/json",
221
+ }
222
+ if api_token:
223
+ headers["Authorization"] = f"Bearer {api_token}"
224
+ return headers
225
+
226
+ def validate_messages(self, messages: List[Dict[str, Any]]) -> bool:
227
+ """
228
+ Validate message format.
229
+
230
+ Args:
231
+ messages: List of message dictionaries
232
+
233
+ Returns:
234
+ True if valid, raises ValueError if not
235
+ """
236
+ for i, msg in enumerate(messages):
237
+ if "role" not in msg:
238
+ raise ValueError(f"Message {i} missing 'role' field")
239
+ if "content" not in msg and msg["role"] != "assistant":
240
+ raise ValueError(f"Message {i} missing 'content' field")
241
+ return True
@@ -0,0 +1,326 @@
1
+ """
2
+ OpenAI API Adapter.
3
+
4
+ Handles the OpenAI-compatible API format:
5
+ - Endpoint: /v1/chat/completions
6
+ - Tool definitions use "parameters" key
7
+ - Responses have "tool_calls" array
8
+ - Tool results use role="tool" with tool_call_id
9
+ """
10
+
11
+ import json
12
+ import logging
13
+ from typing import Any, Dict, List, Optional
14
+
15
+ from .base import (
16
+ BaseAPIAdapter,
17
+ AdapterResponse,
18
+ ToolCallResult,
19
+ ToolCallingFormat,
20
+ )
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class OpenAIAdapter(BaseAPIAdapter):
26
+ """
27
+ Adapter for OpenAI-compatible APIs.
28
+
29
+ Compatible with:
30
+ - OpenAI API (api.openai.com)
31
+ - Local LLM servers (LM Studio, Ollama, vLLM, etc.)
32
+ - Any OpenAI-compatible endpoint
33
+ """
34
+
35
+ def __init__(self, base_url: str = "http://localhost:1234"):
36
+ """
37
+ Initialize OpenAI adapter.
38
+
39
+ Args:
40
+ base_url: Base URL for the API (default: localhost:1234 for local LLMs)
41
+ """
42
+ super().__init__(base_url)
43
+
44
+ @property
45
+ def provider_name(self) -> str:
46
+ return "openai"
47
+
48
+ @property
49
+ def api_endpoint(self) -> str:
50
+ return f"{self._base_url}/v1/chat/completions"
51
+
52
+ @property
53
+ def tool_format(self) -> ToolCallingFormat:
54
+ return ToolCallingFormat.OPENAI
55
+
56
+ def format_request(
57
+ self,
58
+ messages: List[Dict[str, Any]],
59
+ tools: Optional[List[Dict[str, Any]]] = None,
60
+ **kwargs,
61
+ ) -> Dict[str, Any]:
62
+ """
63
+ Format request for OpenAI API.
64
+
65
+ Args:
66
+ messages: Conversation messages
67
+ tools: Tool definitions (optional)
68
+ **kwargs: model, temperature, max_tokens, stream, tool_choice
69
+
70
+ Returns:
71
+ OpenAI-formatted request payload
72
+ """
73
+ self.validate_messages(messages)
74
+
75
+ payload: Dict[str, Any] = {
76
+ "model": kwargs.get("model", "gpt-4"),
77
+ "messages": self._format_messages(messages),
78
+ }
79
+
80
+ # Optional parameters
81
+ if "temperature" in kwargs:
82
+ payload["temperature"] = kwargs["temperature"]
83
+
84
+ if "max_tokens" in kwargs and kwargs["max_tokens"]:
85
+ payload["max_tokens"] = kwargs["max_tokens"]
86
+
87
+ if kwargs.get("stream", False):
88
+ payload["stream"] = True
89
+
90
+ # Tool configuration
91
+ if tools:
92
+ payload["tools"] = self.format_tool_definitions(tools)
93
+ tool_choice = kwargs.get("tool_choice", "auto")
94
+ payload["tool_choice"] = tool_choice
95
+
96
+ return payload
97
+
98
+ def _format_messages(
99
+ self, messages: List[Dict[str, Any]]
100
+ ) -> List[Dict[str, Any]]:
101
+ """
102
+ Format messages for OpenAI API.
103
+
104
+ Handles:
105
+ - System, user, assistant messages
106
+ - Tool call messages
107
+ - Tool result messages
108
+
109
+ Args:
110
+ messages: Raw conversation messages
111
+
112
+ Returns:
113
+ OpenAI-formatted messages
114
+ """
115
+ formatted = []
116
+
117
+ for msg in messages:
118
+ role = msg.get("role", "user")
119
+ content = msg.get("content", "")
120
+
121
+ if role == "system":
122
+ formatted.append({"role": "system", "content": content})
123
+ elif role == "user":
124
+ formatted.append({"role": "user", "content": content})
125
+ elif role == "assistant":
126
+ assistant_msg: Dict[str, Any] = {
127
+ "role": "assistant",
128
+ "content": content,
129
+ }
130
+ # Include tool_calls if present
131
+ if "tool_calls" in msg:
132
+ assistant_msg["tool_calls"] = msg["tool_calls"]
133
+ formatted.append(assistant_msg)
134
+ elif role == "tool":
135
+ formatted.append({
136
+ "role": "tool",
137
+ "tool_call_id": msg.get("tool_call_id", ""),
138
+ "content": content if isinstance(content, str) else json.dumps(content),
139
+ })
140
+
141
+ return formatted
142
+
143
+ def parse_response(self, raw_response: Dict[str, Any]) -> AdapterResponse:
144
+ """
145
+ Parse OpenAI API response.
146
+
147
+ Args:
148
+ raw_response: Raw JSON from OpenAI API
149
+
150
+ Returns:
151
+ Unified AdapterResponse
152
+ """
153
+ # Handle error responses
154
+ if "error" in raw_response:
155
+ error_msg = raw_response["error"].get("message", "Unknown error")
156
+ logger.error(f"OpenAI API error: {error_msg}")
157
+ return AdapterResponse(
158
+ content=f"API Error: {error_msg}",
159
+ stop_reason="error",
160
+ raw_response=raw_response,
161
+ )
162
+
163
+ # Check if this looks like an Anthropic response (wrong adapter)
164
+ if "content" in raw_response and isinstance(raw_response.get("content"), list):
165
+ # Anthropic returns content as array of blocks, OpenAI returns choices
166
+ if "choices" not in raw_response:
167
+ logger.error("FORMAT MISMATCH: Got Anthropic response but using OpenAI adapter")
168
+ return AdapterResponse(
169
+ content="CONFIG ERROR: Your profile has tool_format='openai' but the server "
170
+ "returned an Anthropic-style response.\n\n"
171
+ "FIX: Run /profile, select this profile, press 'e' to edit, "
172
+ "change Tool Format to 'anthropic', then Ctrl+S to save.",
173
+ stop_reason="format_error",
174
+ raw_response=raw_response,
175
+ )
176
+
177
+ # Extract choice
178
+ choices = raw_response.get("choices", [])
179
+ if not choices:
180
+ logger.warning("Empty choices in response")
181
+ return AdapterResponse(
182
+ content="",
183
+ stop_reason="unknown",
184
+ raw_response=raw_response,
185
+ )
186
+
187
+ choice = choices[0]
188
+ message = choice.get("message", {})
189
+
190
+ # Extract content
191
+ content = message.get("content", "") or ""
192
+
193
+ # Extract tool calls
194
+ tool_calls: List[ToolCallResult] = []
195
+ if "tool_calls" in message:
196
+ for tc in message["tool_calls"]:
197
+ try:
198
+ arguments = tc.get("function", {}).get("arguments", "{}")
199
+ if isinstance(arguments, str):
200
+ arguments = json.loads(arguments)
201
+
202
+ tool_calls.append(
203
+ ToolCallResult(
204
+ tool_id=tc.get("id", ""),
205
+ tool_name=tc.get("function", {}).get("name", ""),
206
+ arguments=arguments,
207
+ )
208
+ )
209
+ except json.JSONDecodeError as e:
210
+ logger.warning(f"Failed to parse tool arguments: {e}")
211
+ continue
212
+
213
+ # Extract usage
214
+ usage = raw_response.get("usage", {})
215
+
216
+ # Debug: Log token usage to verify API format
217
+ if usage:
218
+ logger.info(f"Token usage from API: {usage}")
219
+ else:
220
+ logger.warning("No usage data in API response")
221
+
222
+ # Map finish_reason to stop_reason
223
+ finish_reason = choice.get("finish_reason", "unknown")
224
+ stop_reason_map = {
225
+ "stop": "end_turn",
226
+ "tool_calls": "tool_use",
227
+ "length": "max_tokens",
228
+ "content_filter": "content_filter",
229
+ }
230
+ stop_reason = stop_reason_map.get(finish_reason, finish_reason)
231
+
232
+ return AdapterResponse(
233
+ content=content,
234
+ tool_calls=tool_calls,
235
+ usage={
236
+ "prompt_tokens": usage.get("prompt_tokens", 0),
237
+ "completion_tokens": usage.get("completion_tokens", 0),
238
+ "total_tokens": usage.get("total_tokens", 0),
239
+ },
240
+ stop_reason=stop_reason,
241
+ raw_response=raw_response,
242
+ model=raw_response.get("model", ""),
243
+ )
244
+
245
+ def format_tool_definitions(
246
+ self, tools: List[Dict[str, Any]]
247
+ ) -> List[Dict[str, Any]]:
248
+ """
249
+ Convert tool definitions to OpenAI format.
250
+
251
+ OpenAI format:
252
+ {
253
+ "type": "function",
254
+ "function": {
255
+ "name": "...",
256
+ "description": "...",
257
+ "parameters": {...} # JSON Schema
258
+ }
259
+ }
260
+
261
+ Args:
262
+ tools: Generic tool definitions
263
+
264
+ Returns:
265
+ OpenAI-formatted tool definitions
266
+ """
267
+ formatted = []
268
+
269
+ for tool in tools:
270
+ # Handle both "input_schema" and "parameters" keys
271
+ parameters = tool.get("parameters") or tool.get("input_schema", {})
272
+
273
+ formatted.append({
274
+ "type": "function",
275
+ "function": {
276
+ "name": tool.get("name", ""),
277
+ "description": tool.get("description", ""),
278
+ "parameters": parameters,
279
+ },
280
+ })
281
+
282
+ return formatted
283
+
284
+ def format_tool_result(
285
+ self, tool_id: str, result: Any, is_error: bool = False
286
+ ) -> Dict[str, Any]:
287
+ """
288
+ Format tool result for OpenAI API.
289
+
290
+ OpenAI uses role="tool" with tool_call_id.
291
+
292
+ Args:
293
+ tool_id: ID of the tool call
294
+ result: Tool execution result
295
+ is_error: Whether result is an error
296
+
297
+ Returns:
298
+ OpenAI-formatted tool result message
299
+ """
300
+ content = result if isinstance(result, str) else json.dumps(result)
301
+
302
+ if is_error:
303
+ content = f"Error: {content}"
304
+
305
+ return {
306
+ "role": "tool",
307
+ "tool_call_id": tool_id,
308
+ "content": content,
309
+ }
310
+
311
+ def get_headers(self, api_token: Optional[str] = None) -> Dict[str, str]:
312
+ """
313
+ Get headers for OpenAI API requests.
314
+
315
+ Args:
316
+ api_token: OpenAI API key
317
+
318
+ Returns:
319
+ HTTP headers dictionary
320
+ """
321
+ headers = {
322
+ "Content-Type": "application/json",
323
+ }
324
+ if api_token:
325
+ headers["Authorization"] = f"Bearer {api_token}"
326
+ return headers