cua-agent 0.3.2__py3-none-any.whl → 0.4.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cua-agent might be problematic. Click here for more details.

Files changed (111) hide show
  1. agent/__init__.py +15 -51
  2. agent/__main__.py +21 -0
  3. agent/adapters/__init__.py +9 -0
  4. agent/adapters/huggingfacelocal_adapter.py +229 -0
  5. agent/agent.py +577 -0
  6. agent/callbacks/__init__.py +17 -0
  7. agent/callbacks/base.py +153 -0
  8. agent/callbacks/budget_manager.py +44 -0
  9. agent/callbacks/image_retention.py +139 -0
  10. agent/callbacks/logging.py +247 -0
  11. agent/callbacks/pii_anonymization.py +259 -0
  12. agent/callbacks/trajectory_saver.py +305 -0
  13. agent/cli.py +290 -0
  14. agent/computer_handler.py +107 -0
  15. agent/decorators.py +90 -0
  16. agent/loops/__init__.py +11 -0
  17. agent/loops/anthropic.py +728 -0
  18. agent/loops/omniparser.py +339 -0
  19. agent/loops/openai.py +95 -0
  20. agent/loops/uitars.py +688 -0
  21. agent/responses.py +207 -0
  22. agent/types.py +79 -0
  23. agent/ui/__init__.py +7 -1
  24. agent/ui/gradio/__init__.py +6 -19
  25. agent/ui/gradio/app.py +80 -1299
  26. agent/ui/gradio/ui_components.py +703 -0
  27. cua_agent-0.4.0b2.dist-info/METADATA +424 -0
  28. cua_agent-0.4.0b2.dist-info/RECORD +30 -0
  29. agent/core/__init__.py +0 -27
  30. agent/core/agent.py +0 -210
  31. agent/core/base.py +0 -217
  32. agent/core/callbacks.py +0 -200
  33. agent/core/experiment.py +0 -249
  34. agent/core/factory.py +0 -122
  35. agent/core/messages.py +0 -332
  36. agent/core/provider_config.py +0 -21
  37. agent/core/telemetry.py +0 -142
  38. agent/core/tools/__init__.py +0 -21
  39. agent/core/tools/base.py +0 -74
  40. agent/core/tools/bash.py +0 -52
  41. agent/core/tools/collection.py +0 -46
  42. agent/core/tools/computer.py +0 -113
  43. agent/core/tools/edit.py +0 -67
  44. agent/core/tools/manager.py +0 -56
  45. agent/core/tools.py +0 -32
  46. agent/core/types.py +0 -88
  47. agent/core/visualization.py +0 -197
  48. agent/providers/__init__.py +0 -4
  49. agent/providers/anthropic/__init__.py +0 -6
  50. agent/providers/anthropic/api/client.py +0 -360
  51. agent/providers/anthropic/api/logging.py +0 -150
  52. agent/providers/anthropic/api_handler.py +0 -140
  53. agent/providers/anthropic/callbacks/__init__.py +0 -5
  54. agent/providers/anthropic/callbacks/manager.py +0 -65
  55. agent/providers/anthropic/loop.py +0 -568
  56. agent/providers/anthropic/prompts.py +0 -23
  57. agent/providers/anthropic/response_handler.py +0 -226
  58. agent/providers/anthropic/tools/__init__.py +0 -33
  59. agent/providers/anthropic/tools/base.py +0 -88
  60. agent/providers/anthropic/tools/bash.py +0 -66
  61. agent/providers/anthropic/tools/collection.py +0 -34
  62. agent/providers/anthropic/tools/computer.py +0 -396
  63. agent/providers/anthropic/tools/edit.py +0 -326
  64. agent/providers/anthropic/tools/manager.py +0 -54
  65. agent/providers/anthropic/tools/run.py +0 -42
  66. agent/providers/anthropic/types.py +0 -16
  67. agent/providers/anthropic/utils.py +0 -381
  68. agent/providers/omni/__init__.py +0 -8
  69. agent/providers/omni/api_handler.py +0 -42
  70. agent/providers/omni/clients/anthropic.py +0 -103
  71. agent/providers/omni/clients/base.py +0 -35
  72. agent/providers/omni/clients/oaicompat.py +0 -195
  73. agent/providers/omni/clients/ollama.py +0 -122
  74. agent/providers/omni/clients/openai.py +0 -155
  75. agent/providers/omni/clients/utils.py +0 -25
  76. agent/providers/omni/image_utils.py +0 -34
  77. agent/providers/omni/loop.py +0 -990
  78. agent/providers/omni/parser.py +0 -307
  79. agent/providers/omni/prompts.py +0 -64
  80. agent/providers/omni/tools/__init__.py +0 -30
  81. agent/providers/omni/tools/base.py +0 -29
  82. agent/providers/omni/tools/bash.py +0 -74
  83. agent/providers/omni/tools/computer.py +0 -179
  84. agent/providers/omni/tools/manager.py +0 -61
  85. agent/providers/omni/utils.py +0 -236
  86. agent/providers/openai/__init__.py +0 -6
  87. agent/providers/openai/api_handler.py +0 -456
  88. agent/providers/openai/loop.py +0 -472
  89. agent/providers/openai/response_handler.py +0 -205
  90. agent/providers/openai/tools/__init__.py +0 -15
  91. agent/providers/openai/tools/base.py +0 -79
  92. agent/providers/openai/tools/computer.py +0 -326
  93. agent/providers/openai/tools/manager.py +0 -106
  94. agent/providers/openai/types.py +0 -36
  95. agent/providers/openai/utils.py +0 -98
  96. agent/providers/uitars/__init__.py +0 -1
  97. agent/providers/uitars/clients/base.py +0 -35
  98. agent/providers/uitars/clients/mlxvlm.py +0 -263
  99. agent/providers/uitars/clients/oaicompat.py +0 -214
  100. agent/providers/uitars/loop.py +0 -660
  101. agent/providers/uitars/prompts.py +0 -63
  102. agent/providers/uitars/tools/__init__.py +0 -1
  103. agent/providers/uitars/tools/computer.py +0 -283
  104. agent/providers/uitars/tools/manager.py +0 -60
  105. agent/providers/uitars/utils.py +0 -264
  106. agent/telemetry.py +0 -21
  107. agent/ui/__main__.py +0 -15
  108. cua_agent-0.3.2.dist-info/METADATA +0 -295
  109. cua_agent-0.3.2.dist-info/RECORD +0 -87
  110. {cua_agent-0.3.2.dist-info → cua_agent-0.4.0b2.dist-info}/WHEEL +0 -0
  111. {cua_agent-0.3.2.dist-info → cua_agent-0.4.0b2.dist-info}/entry_points.txt +0 -0
@@ -1,381 +0,0 @@
1
- """Utility functions for Anthropic message handling."""
2
-
3
- import logging
4
- import re
5
- from typing import Any, Dict, List, Optional, Tuple, cast
6
- from anthropic.types.beta import BetaMessage
7
- from ...core.types import AgentResponse
8
- from datetime import datetime
9
-
10
- # Configure module logger
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def to_anthropic_format(
15
- messages: List[Dict[str, Any]],
16
- ) -> Tuple[List[Dict[str, Any]], str]:
17
- """Convert standard OpenAI format messages to Anthropic format.
18
-
19
- Args:
20
- messages: List of messages in OpenAI format
21
-
22
- Returns:
23
- Tuple containing (anthropic_messages, system_content)
24
- """
25
- result = []
26
- system_content = ""
27
-
28
- # Process messages in order to maintain conversation flow
29
- previous_assistant_tool_use_ids = set() # Track tool_use_ids in the previous assistant message
30
-
31
- for i, msg in enumerate(messages):
32
- role = msg.get("role", "")
33
- content = msg.get("content", "")
34
-
35
- if role == "system":
36
- # Collect system messages for later use
37
- system_content += content + "\n"
38
- continue
39
-
40
- if role == "assistant":
41
- # Track tool_use_ids in this assistant message for the next user message
42
- previous_assistant_tool_use_ids = set()
43
- if isinstance(content, list):
44
- for item in content:
45
- if isinstance(item, dict) and item.get("type") == "tool_use" and "id" in item:
46
- previous_assistant_tool_use_ids.add(item["id"])
47
-
48
- if role in ["user", "assistant"]:
49
- anthropic_msg = {"role": role}
50
-
51
- # Convert content based on type
52
- if isinstance(content, str):
53
- # Simple text content
54
- anthropic_msg["content"] = [{"type": "text", "text": content}]
55
- elif isinstance(content, list):
56
- # Convert complex content
57
- anthropic_content = []
58
- for item in content:
59
- item_type = item.get("type", "")
60
-
61
- if item_type == "text":
62
- anthropic_content.append({"type": "text", "text": item.get("text", "")})
63
- elif item_type == "image_url":
64
- # Convert OpenAI image format to Anthropic
65
- image_url = item.get("image_url", {}).get("url", "")
66
- if image_url.startswith("data:"):
67
- # Extract base64 data and media type
68
- match = re.match(r"data:(.+);base64,(.+)", image_url)
69
- if match:
70
- media_type, data = match.groups()
71
- anthropic_content.append(
72
- {
73
- "type": "image",
74
- "source": {
75
- "type": "base64",
76
- "media_type": media_type,
77
- "data": data,
78
- },
79
- }
80
- )
81
- else:
82
- # Regular URL
83
- anthropic_content.append(
84
- {
85
- "type": "image",
86
- "source": {
87
- "type": "url",
88
- "url": image_url,
89
- },
90
- }
91
- )
92
- elif item_type == "tool_use":
93
- # Always include tool_use blocks
94
- anthropic_content.append(item)
95
- elif item_type == "tool_result":
96
- # Check if this is a user message AND if the tool_use_id exists in the previous assistant message
97
- tool_use_id = item.get("tool_use_id")
98
-
99
- # Only include tool_result if it references a tool_use from the immediately preceding assistant message
100
- if (
101
- role == "user"
102
- and tool_use_id
103
- and tool_use_id in previous_assistant_tool_use_ids
104
- ):
105
- anthropic_content.append(item)
106
- else:
107
- content_text = "Tool Result: "
108
- if "content" in item:
109
- if isinstance(item["content"], list):
110
- for content_item in item["content"]:
111
- if (
112
- isinstance(content_item, dict)
113
- and content_item.get("type") == "text"
114
- ):
115
- content_text += content_item.get("text", "")
116
- elif isinstance(item["content"], str):
117
- content_text += item["content"]
118
- anthropic_content.append({"type": "text", "text": content_text})
119
-
120
- anthropic_msg["content"] = anthropic_content
121
-
122
- result.append(anthropic_msg)
123
-
124
- return result, system_content
125
-
126
-
127
- def from_anthropic_format(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
128
- """Convert Anthropic format messages to standard OpenAI format.
129
-
130
- Args:
131
- messages: List of messages in Anthropic format
132
-
133
- Returns:
134
- List of messages in OpenAI format
135
- """
136
- result = []
137
-
138
- for msg in messages:
139
- role = msg.get("role", "")
140
- content = msg.get("content", [])
141
-
142
- if role in ["user", "assistant"]:
143
- openai_msg = {"role": role}
144
-
145
- # Simple case: single text block
146
- if len(content) == 1 and content[0].get("type") == "text":
147
- openai_msg["content"] = content[0].get("text", "")
148
- else:
149
- # Complex case: multiple blocks or non-text
150
- openai_content = []
151
- for item in content:
152
- item_type = item.get("type", "")
153
-
154
- if item_type == "text":
155
- openai_content.append({"type": "text", "text": item.get("text", "")})
156
- elif item_type == "image":
157
- # Convert Anthropic image to OpenAI format
158
- source = item.get("source", {})
159
- if source.get("type") == "base64":
160
- media_type = source.get("media_type", "image/png")
161
- data = source.get("data", "")
162
- openai_content.append(
163
- {
164
- "type": "image_url",
165
- "image_url": {"url": f"data:{media_type};base64,{data}"},
166
- }
167
- )
168
- else:
169
- # URL
170
- openai_content.append(
171
- {
172
- "type": "image_url",
173
- "image_url": {"url": source.get("url", "")},
174
- }
175
- )
176
- elif item_type in ["tool_use", "tool_result"]:
177
- # Pass through tool-related content
178
- openai_content.append(item)
179
-
180
- openai_msg["content"] = openai_content
181
-
182
- result.append(openai_msg)
183
-
184
- return result
185
-
186
-
187
- async def to_agent_response_format(
188
- response: BetaMessage,
189
- messages: List[Dict[str, Any]],
190
- parsed_screen: Optional[dict] = None,
191
- parser: Optional[Any] = None,
192
- model: Optional[str] = None,
193
- ) -> AgentResponse:
194
- """Convert an Anthropic response to the standard agent response format.
195
-
196
- Args:
197
- response: The Anthropic API response (BetaMessage)
198
- messages: List of messages in standard format
199
- parsed_screen: Optional pre-parsed screen information
200
- parser: Optional parser instance for coordinate calculation
201
- model: Optional model name
202
-
203
- Returns:
204
- A response formatted according to the standard agent response format
205
- """
206
- # Create unique IDs for this response
207
- response_id = f"resp_{datetime.now().strftime('%Y%m%d%H%M%S')}_{id(response)}"
208
- reasoning_id = f"rs_{response_id}"
209
- action_id = f"cu_{response_id}"
210
- call_id = f"call_{response_id}"
211
-
212
- # Extract content and reasoning from Anthropic response
213
- content = []
214
- reasoning_text = None
215
- action_details = None
216
-
217
- for block in response.content:
218
- if block.type == "text":
219
- # Use the first text block as reasoning
220
- if reasoning_text is None:
221
- reasoning_text = block.text
222
- content.append({"type": "text", "text": block.text})
223
- elif block.type == "tool_use" and block.name == "computer":
224
- try:
225
- input_dict = cast(Dict[str, Any], block.input)
226
- action = input_dict.get("action", "").lower()
227
-
228
- # Extract coordinates from coordinate list if provided
229
- coordinates = input_dict.get("coordinate", [100, 100])
230
- x, y = coordinates if len(coordinates) == 2 else (100, 100)
231
-
232
- if action == "screenshot":
233
- action_details = {
234
- "type": "screenshot",
235
- }
236
- elif action in ["click", "left_click", "right_click", "double_click"]:
237
- action_details = {
238
- "type": "click",
239
- "button": "left" if action in ["click", "left_click"] else "right",
240
- "double": action == "double_click",
241
- "x": x,
242
- "y": y,
243
- }
244
- elif action == "type":
245
- action_details = {
246
- "type": "type",
247
- "text": input_dict.get("text", ""),
248
- }
249
- elif action == "key":
250
- action_details = {
251
- "type": "hotkey",
252
- "keys": [input_dict.get("text", "")],
253
- }
254
- elif action == "scroll":
255
- scroll_amount = input_dict.get("scroll_amount", 1)
256
- scroll_direction = input_dict.get("scroll_direction", "down")
257
- delta_y = scroll_amount if scroll_direction == "down" else -scroll_amount
258
- action_details = {
259
- "type": "scroll",
260
- "x": x,
261
- "y": y,
262
- "delta_x": 0,
263
- "delta_y": delta_y,
264
- }
265
- elif action == "move":
266
- action_details = {
267
- "type": "move",
268
- "x": x,
269
- "y": y,
270
- }
271
- except Exception as e:
272
- logger.error(f"Error extracting action details: {str(e)}")
273
-
274
- # Create output items with reasoning
275
- output_items = []
276
- if reasoning_text:
277
- output_items.append(
278
- {
279
- "type": "reasoning",
280
- "id": reasoning_id,
281
- "summary": [
282
- {
283
- "type": "summary_text",
284
- "text": reasoning_text,
285
- }
286
- ],
287
- }
288
- )
289
-
290
- # Add computer_call item with extracted or default action
291
- computer_call = {
292
- "type": "computer_call",
293
- "id": action_id,
294
- "call_id": call_id,
295
- "action": action_details or {"type": "none", "description": "No action specified"},
296
- "pending_safety_checks": [],
297
- "status": "completed",
298
- }
299
- output_items.append(computer_call)
300
-
301
- # Create the standard response format
302
- standard_response = {
303
- "id": response_id,
304
- "object": "response",
305
- "created_at": int(datetime.now().timestamp()),
306
- "status": "completed",
307
- "error": None,
308
- "incomplete_details": None,
309
- "instructions": None,
310
- "max_output_tokens": None,
311
- "model": model or "anthropic-default",
312
- "output": output_items,
313
- "parallel_tool_calls": True,
314
- "previous_response_id": None,
315
- "reasoning": {"effort": "medium", "generate_summary": "concise"},
316
- "store": True,
317
- "temperature": 1.0,
318
- "text": {"format": {"type": "text"}},
319
- "tool_choice": "auto",
320
- "tools": [
321
- {
322
- "type": "computer_use_preview",
323
- "display_height": 768,
324
- "display_width": 1024,
325
- "environment": "mac",
326
- }
327
- ],
328
- "top_p": 1.0,
329
- "truncation": "auto",
330
- "usage": {
331
- "input_tokens": 0,
332
- "input_tokens_details": {"cached_tokens": 0},
333
- "output_tokens": 0,
334
- "output_tokens_details": {"reasoning_tokens": 0},
335
- "total_tokens": 0,
336
- },
337
- "user": None,
338
- "metadata": {},
339
- "response": {
340
- "choices": [
341
- {
342
- "message": {
343
- "role": "assistant",
344
- "content": content,
345
- "tool_calls": [],
346
- },
347
- "finish_reason": response.stop_reason or "stop",
348
- }
349
- ]
350
- },
351
- }
352
-
353
- if response.usage:
354
- standard_response["usage"] = {
355
- "input_tokens": response.usage.input_tokens,
356
- "input_tokens_details": {
357
- "cached_tokens": response.usage.cache_creation_input_tokens,
358
- },
359
- "output_tokens": response.usage.output_tokens,
360
- "output_tokens_details": {
361
- "reasoning_tokens": 0
362
- },
363
- "total_tokens": response.usage.input_tokens + response.usage.output_tokens,
364
- **dict(response.usage),
365
- }
366
-
367
- # Add tool calls if present
368
- tool_calls = []
369
- for block in response.content:
370
- if hasattr(block, "type") and block.type == "tool_use":
371
- tool_calls.append(
372
- {
373
- "id": f"call_{block.id}",
374
- "type": "function",
375
- "function": {"name": block.name, "arguments": block.input},
376
- }
377
- )
378
- if tool_calls:
379
- standard_response["response"]["choices"][0]["message"]["tool_calls"] = tool_calls
380
-
381
- return cast(AgentResponse, standard_response)
@@ -1,8 +0,0 @@
1
- """Omni provider implementation."""
2
-
3
- from ...core.types import LLMProvider
4
- from .image_utils import (
5
- decode_base64_image,
6
- )
7
-
8
- __all__ = ["LLMProvider", "decode_base64_image"]
@@ -1,42 +0,0 @@
1
- """API handling for Omni provider."""
2
-
3
- import logging
4
- from typing import Any, Dict, List
5
-
6
- from .prompts import SYSTEM_PROMPT
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- class OmniAPIHandler:
12
- """Handler for Omni API calls."""
13
-
14
- def __init__(self, loop):
15
- """Initialize the API handler.
16
-
17
- Args:
18
- loop: Parent loop instance
19
- """
20
- self.loop = loop
21
-
22
- async def make_api_call(
23
- self, messages: List[Dict[str, Any]], system_prompt: str = SYSTEM_PROMPT
24
- ) -> Any:
25
- """Make an API call to the appropriate provider.
26
-
27
- Args:
28
- messages: List of messages in standard OpenAI format
29
- system_prompt: System prompt to use
30
-
31
- Returns:
32
- API response
33
- """
34
- if not self.loop._make_api_call:
35
- raise RuntimeError("Loop does not have _make_api_call method")
36
-
37
- try:
38
- # Use the loop's _make_api_call method with standard messages
39
- return await self.loop._make_api_call(messages=messages, system_prompt=system_prompt)
40
- except Exception as e:
41
- logger.error(f"Error making API call: {str(e)}")
42
- raise
@@ -1,103 +0,0 @@
1
- """Anthropic API client implementation."""
2
-
3
- import logging
4
- from typing import Any, Dict, List, Optional, Tuple, cast
5
- import asyncio
6
- from httpx import ConnectError, ReadTimeout
7
-
8
- from anthropic import AsyncAnthropic, Anthropic
9
- from anthropic.types import MessageParam
10
- from .base import BaseOmniClient
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- class AnthropicClient(BaseOmniClient):
16
- """Client for making calls to Anthropic API."""
17
-
18
- def __init__(self, api_key: str, model: str, max_retries: int = 3, retry_delay: float = 1.0):
19
- """Initialize the Anthropic client.
20
-
21
- Args:
22
- api_key: Anthropic API key
23
- model: Anthropic model name (e.g. "claude-3-opus-20240229")
24
- max_retries: Maximum number of retries for API calls
25
- retry_delay: Base delay between retries in seconds
26
- """
27
- if not model:
28
- raise ValueError("Model name must be provided")
29
-
30
- self.client = AsyncAnthropic(api_key=api_key)
31
- self.model: str = model # Add explicit type annotation
32
- self.max_retries = max_retries
33
- self.retry_delay = retry_delay
34
-
35
- def _convert_message_format(self, messages: List[Dict[str, Any]]) -> List[MessageParam]:
36
- """Convert messages from standard format to Anthropic format.
37
-
38
- Args:
39
- messages: Messages in standard format
40
-
41
- Returns:
42
- Messages in Anthropic format
43
- """
44
- anthropic_messages = []
45
-
46
- for message in messages:
47
- # Skip messages with empty content
48
- if not message.get("content"):
49
- continue
50
-
51
- if message["role"] == "user":
52
- anthropic_messages.append({"role": "user", "content": message["content"]})
53
- elif message["role"] == "assistant":
54
- anthropic_messages.append({"role": "assistant", "content": message["content"]})
55
-
56
- # Cast the list to the correct type expected by Anthropic
57
- return cast(List[MessageParam], anthropic_messages)
58
-
59
- async def run_interleaved(
60
- self, messages: List[Dict[str, Any]], system: str, max_tokens: int
61
- ) -> Any:
62
- """Run model with interleaved conversation format.
63
-
64
- Args:
65
- messages: List of messages to process
66
- system: System prompt
67
- max_tokens: Maximum tokens to generate
68
-
69
- Returns:
70
- Model response
71
- """
72
- last_error = None
73
-
74
- for attempt in range(self.max_retries):
75
- try:
76
- # Convert messages to Anthropic format
77
- anthropic_messages = self._convert_message_format(messages)
78
-
79
- response = await self.client.messages.create(
80
- model=self.model,
81
- max_tokens=max_tokens,
82
- temperature=0,
83
- system=system,
84
- messages=anthropic_messages,
85
- )
86
-
87
- return response
88
-
89
- except (ConnectError, ReadTimeout) as e:
90
- last_error = e
91
- logger.warning(
92
- f"Connection error on attempt {attempt + 1}/{self.max_retries}: {str(e)}"
93
- )
94
- if attempt < self.max_retries - 1:
95
- await asyncio.sleep(self.retry_delay * (attempt + 1)) # Exponential backoff
96
- continue
97
-
98
- except Exception as e:
99
- logger.error(f"Unexpected error in Anthropic API call: {str(e)}")
100
- raise RuntimeError(f"Anthropic API call failed: {str(e)}")
101
-
102
- # If we get here, all retries failed
103
- raise RuntimeError(f"Connection error after {self.max_retries} retries: {str(last_error)}")
@@ -1,35 +0,0 @@
1
- """Base client implementation for Omni providers."""
2
-
3
- import logging
4
- from typing import Dict, List, Optional, Any, Tuple
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
-
9
- class BaseOmniClient:
10
- """Base class for provider-specific clients."""
11
-
12
- def __init__(self, api_key: Optional[str] = None, model: Optional[str] = None):
13
- """Initialize base client.
14
-
15
- Args:
16
- api_key: Optional API key
17
- model: Optional model name
18
- """
19
- self.api_key = api_key
20
- self.model = model
21
-
22
- async def run_interleaved(
23
- self, messages: List[Dict[str, Any]], system: str, max_tokens: Optional[int] = None
24
- ) -> Dict[str, Any]:
25
- """Run interleaved chat completion.
26
-
27
- Args:
28
- messages: List of message dicts
29
- system: System prompt
30
- max_tokens: Optional max tokens override
31
-
32
- Returns:
33
- Response dict
34
- """
35
- raise NotImplementedError