daita-agents 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. daita/__init__.py +216 -0
  2. daita/agents/__init__.py +33 -0
  3. daita/agents/base.py +743 -0
  4. daita/agents/substrate.py +1141 -0
  5. daita/cli/__init__.py +145 -0
  6. daita/cli/__main__.py +7 -0
  7. daita/cli/ascii_art.py +44 -0
  8. daita/cli/core/__init__.py +0 -0
  9. daita/cli/core/create.py +254 -0
  10. daita/cli/core/deploy.py +473 -0
  11. daita/cli/core/deployments.py +309 -0
  12. daita/cli/core/import_detector.py +219 -0
  13. daita/cli/core/init.py +481 -0
  14. daita/cli/core/logs.py +239 -0
  15. daita/cli/core/managed_deploy.py +709 -0
  16. daita/cli/core/run.py +648 -0
  17. daita/cli/core/status.py +421 -0
  18. daita/cli/core/test.py +239 -0
  19. daita/cli/core/webhooks.py +172 -0
  20. daita/cli/main.py +588 -0
  21. daita/cli/utils.py +541 -0
  22. daita/config/__init__.py +62 -0
  23. daita/config/base.py +159 -0
  24. daita/config/settings.py +184 -0
  25. daita/core/__init__.py +262 -0
  26. daita/core/decision_tracing.py +701 -0
  27. daita/core/exceptions.py +480 -0
  28. daita/core/focus.py +251 -0
  29. daita/core/interfaces.py +76 -0
  30. daita/core/plugin_tracing.py +550 -0
  31. daita/core/relay.py +779 -0
  32. daita/core/reliability.py +381 -0
  33. daita/core/scaling.py +459 -0
  34. daita/core/tools.py +554 -0
  35. daita/core/tracing.py +770 -0
  36. daita/core/workflow.py +1144 -0
  37. daita/display/__init__.py +1 -0
  38. daita/display/console.py +160 -0
  39. daita/execution/__init__.py +58 -0
  40. daita/execution/client.py +856 -0
  41. daita/execution/exceptions.py +92 -0
  42. daita/execution/models.py +317 -0
  43. daita/llm/__init__.py +60 -0
  44. daita/llm/anthropic.py +291 -0
  45. daita/llm/base.py +530 -0
  46. daita/llm/factory.py +101 -0
  47. daita/llm/gemini.py +355 -0
  48. daita/llm/grok.py +219 -0
  49. daita/llm/mock.py +172 -0
  50. daita/llm/openai.py +220 -0
  51. daita/plugins/__init__.py +141 -0
  52. daita/plugins/base.py +37 -0
  53. daita/plugins/base_db.py +167 -0
  54. daita/plugins/elasticsearch.py +849 -0
  55. daita/plugins/mcp.py +481 -0
  56. daita/plugins/mongodb.py +520 -0
  57. daita/plugins/mysql.py +362 -0
  58. daita/plugins/postgresql.py +342 -0
  59. daita/plugins/redis_messaging.py +500 -0
  60. daita/plugins/rest.py +537 -0
  61. daita/plugins/s3.py +770 -0
  62. daita/plugins/slack.py +729 -0
  63. daita/utils/__init__.py +18 -0
  64. daita_agents-0.2.0.dist-info/METADATA +409 -0
  65. daita_agents-0.2.0.dist-info/RECORD +69 -0
  66. daita_agents-0.2.0.dist-info/WHEEL +5 -0
  67. daita_agents-0.2.0.dist-info/entry_points.txt +2 -0
  68. daita_agents-0.2.0.dist-info/licenses/LICENSE +56 -0
  69. daita_agents-0.2.0.dist-info/top_level.txt +1 -0
daita/llm/anthropic.py ADDED
@@ -0,0 +1,291 @@
1
+ """
2
+ Anthropic LLM provider implementation with integrated tracing.
3
+ """
4
+ import os
5
+ import logging
6
+ from typing import Dict, Any, Optional
7
+
8
+ from ..core.exceptions import LLMError
9
+ from .base import BaseLLMProvider
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class AnthropicProvider(BaseLLMProvider):
14
+ """Anthropic LLM provider implementation with automatic call tracing."""
15
+
16
+ def __init__(
17
+ self,
18
+ model: str = "claude-3-sonnet-20240229",
19
+ api_key: Optional[str] = None,
20
+ **kwargs
21
+ ):
22
+ """
23
+ Initialize Anthropic provider.
24
+
25
+ Args:
26
+ model: Anthropic model name
27
+ api_key: Anthropic API key
28
+ **kwargs: Additional Anthropic-specific parameters
29
+ """
30
+ # Get API key from parameter or environment
31
+ api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
32
+
33
+ super().__init__(model=model, api_key=api_key, **kwargs)
34
+
35
+ # Anthropic-specific default parameters
36
+ self.default_params.update({
37
+ 'timeout': kwargs.get('timeout', 60)
38
+ })
39
+
40
+ # Lazy-load Anthropic client
41
+ self._client = None
42
+
43
+ @property
44
+ def client(self):
45
+ """Lazy-load Anthropic client."""
46
+ if self._client is None:
47
+ try:
48
+ import anthropic
49
+ self._validate_api_key()
50
+ self._client = anthropic.AsyncAnthropic(api_key=self.api_key)
51
+ logger.debug("Anthropic client initialized")
52
+ except ImportError:
53
+ raise LLMError(
54
+ "Anthropic package not installed. Install with: pip install anthropic"
55
+ )
56
+ return self._client
57
+
58
+ async def _generate_impl(self, prompt: str, **kwargs) -> str:
59
+ """
60
+ Provider-specific implementation of text generation for Anthropic.
61
+
62
+ This method contains the actual Anthropic API call logic and is automatically
63
+ wrapped with tracing by the base class generate() method.
64
+
65
+ Args:
66
+ prompt: Input prompt
67
+ **kwargs: Optional parameters
68
+
69
+ Returns:
70
+ Generated text response
71
+ """
72
+ try:
73
+ # Merge parameters
74
+ params = self._merge_params(kwargs)
75
+
76
+ # Make API call
77
+ response = await self.client.messages.create(
78
+ model=self.model,
79
+ max_tokens=params.get('max_tokens'),
80
+ temperature=params.get('temperature'),
81
+ messages=[
82
+ {"role": "user", "content": prompt}
83
+ ],
84
+ timeout=params.get('timeout')
85
+ )
86
+
87
+ # Store usage for base class token extraction
88
+ self._last_usage = response.usage
89
+
90
+ return response.content[0].text
91
+
92
+ except Exception as e:
93
+ logger.error(f"Anthropic generation failed: {str(e)}")
94
+ raise LLMError(f"Anthropic generation failed: {str(e)}")
95
+
96
+ async def generate_with_system(self, prompt: str, system_message: str, **kwargs) -> str:
97
+ """
98
+ Generate text with a system message using Anthropic's system parameter.
99
+
100
+ Note: This method bypasses automatic tracing since it's not part of the
101
+ base interface. If you want tracing for system messages, call the base
102
+ generate() method with a formatted prompt instead.
103
+
104
+ Args:
105
+ prompt: User prompt
106
+ system_message: System message to set context
107
+ **kwargs: Optional parameters
108
+
109
+ Returns:
110
+ Generated text
111
+ """
112
+ try:
113
+ # Merge parameters
114
+ params = self._merge_params(kwargs)
115
+
116
+ # Make API call with system parameter
117
+ response = await self.client.messages.create(
118
+ model=self.model,
119
+ max_tokens=params.get('max_tokens'),
120
+ temperature=params.get('temperature'),
121
+ system=system_message,
122
+ messages=[
123
+ {"role": "user", "content": prompt}
124
+ ],
125
+ timeout=params.get('timeout')
126
+ )
127
+
128
+ # Store usage for potential token extraction
129
+ self._last_usage = response.usage
130
+
131
+ return response.content[0].text
132
+
133
+ except Exception as e:
134
+ logger.error(f"Anthropic generation with system message failed: {str(e)}")
135
+ raise LLMError(f"Anthropic generation failed: {str(e)}")
136
+
137
+ def _get_last_token_usage(self) -> Dict[str, int]:
138
+ """
139
+ Override base class method to handle Anthropic's token format.
140
+
141
+ Anthropic uses input_tokens and output_tokens format, different from OpenAI.
142
+ """
143
+ if self._last_usage:
144
+ # Anthropic format: input_tokens + output_tokens
145
+ input_tokens = getattr(self._last_usage, 'input_tokens', 0)
146
+ output_tokens = getattr(self._last_usage, 'output_tokens', 0)
147
+ total_tokens = input_tokens + output_tokens
148
+
149
+ return {
150
+ 'total_tokens': total_tokens,
151
+ 'prompt_tokens': input_tokens, # Map input_tokens to prompt_tokens
152
+ 'completion_tokens': output_tokens # Map output_tokens to completion_tokens
153
+ }
154
+
155
+ # Fallback to base class estimation
156
+ return super()._get_last_token_usage()
157
+
158
+ def _convert_tools_to_format(self, tools: list['AgentTool']) -> list[Dict[str, Any]]:
159
+ """
160
+ Convert AgentTool list to Anthropic tool format.
161
+
162
+ Anthropic uses a different tool format than OpenAI.
163
+ """
164
+ return [tool.to_anthropic_tool() for tool in tools]
165
+
166
+ async def _generate_with_tools_single(
167
+ self,
168
+ messages: list[Dict[str, Any]],
169
+ tools: list[Dict[str, Any]],
170
+ **kwargs
171
+ ) -> Dict[str, Any]:
172
+ """
173
+ Anthropic-specific tool calling implementation.
174
+
175
+ Args:
176
+ messages: Conversation history in OpenAI format
177
+ tools: Tool specifications in Anthropic format
178
+ **kwargs: Optional parameters
179
+
180
+ Returns:
181
+ {
182
+ "tool_calls": [...], # If LLM wants to call tools
183
+ "content": "...", # If LLM has final answer
184
+ }
185
+ """
186
+ try:
187
+ # Merge parameters
188
+ params = self._merge_params(kwargs)
189
+
190
+ # Convert messages to Anthropic format
191
+ anthropic_messages = self._convert_messages_to_anthropic(messages)
192
+
193
+ # Make API call with tools
194
+ response = await self.client.messages.create(
195
+ model=self.model,
196
+ messages=anthropic_messages,
197
+ tools=tools,
198
+ max_tokens=params.get('max_tokens', 4096),
199
+ temperature=params.get('temperature'),
200
+ timeout=params.get('timeout')
201
+ )
202
+
203
+ # Store usage for token tracking
204
+ if hasattr(response, 'usage'):
205
+ self._last_usage = response.usage
206
+
207
+ # Check for tool use blocks
208
+ tool_use_blocks = [
209
+ block for block in response.content
210
+ if hasattr(block, 'type') and block.type == "tool_use"
211
+ ]
212
+
213
+ if tool_use_blocks:
214
+ # LLM wants to call tools
215
+ return {
216
+ "tool_calls": [
217
+ {
218
+ "id": block.id,
219
+ "name": block.name,
220
+ "arguments": block.input
221
+ }
222
+ for block in tool_use_blocks
223
+ ]
224
+ }
225
+ else:
226
+ # LLM has final answer
227
+ text_blocks = [
228
+ block.text for block in response.content
229
+ if hasattr(block, 'type') and block.type == "text"
230
+ ]
231
+ return {
232
+ "content": "".join(text_blocks)
233
+ }
234
+
235
+ except Exception as e:
236
+ logger.error(f"Anthropic tool calling failed: {str(e)}")
237
+ raise LLMError(f"Anthropic tool calling failed: {str(e)}")
238
+
239
+ def _convert_messages_to_anthropic(
240
+ self,
241
+ messages: list[Dict[str, Any]]
242
+ ) -> list[Dict[str, Any]]:
243
+ """
244
+ Convert OpenAI-style messages to Anthropic format.
245
+
246
+ Anthropic uses a different message format, especially for tool results.
247
+ """
248
+ anthropic_messages = []
249
+
250
+ for msg in messages:
251
+ if msg["role"] == "tool":
252
+ # Tool result - convert to Anthropic format
253
+ anthropic_messages.append({
254
+ "role": "user",
255
+ "content": [
256
+ {
257
+ "type": "tool_result",
258
+ "tool_use_id": msg["tool_call_id"],
259
+ "content": msg["content"]
260
+ }
261
+ ]
262
+ })
263
+ elif msg["role"] == "assistant" and msg.get("tool_calls"):
264
+ # Assistant with tool calls (already in flat format)
265
+ content_blocks = []
266
+ for tc in msg["tool_calls"]:
267
+ content_blocks.append({
268
+ "type": "tool_use",
269
+ "id": tc["id"],
270
+ "name": tc["name"],
271
+ "input": tc["arguments"]
272
+ })
273
+ anthropic_messages.append({
274
+ "role": "assistant",
275
+ "content": content_blocks
276
+ })
277
+ else:
278
+ # Regular message
279
+ anthropic_messages.append(msg)
280
+
281
+ return anthropic_messages
282
+
283
+ @property
284
+ def info(self) -> Dict[str, Any]:
285
+ """Get information about the Anthropic provider."""
286
+ base_info = super().info
287
+ base_info.update({
288
+ 'provider_name': 'Anthropic',
289
+ 'api_compatible': 'Anthropic'
290
+ })
291
+ return base_info