daita-agents 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. daita/__init__.py +216 -0
  2. daita/agents/__init__.py +33 -0
  3. daita/agents/base.py +743 -0
  4. daita/agents/substrate.py +1141 -0
  5. daita/cli/__init__.py +145 -0
  6. daita/cli/__main__.py +7 -0
  7. daita/cli/ascii_art.py +44 -0
  8. daita/cli/core/__init__.py +0 -0
  9. daita/cli/core/create.py +254 -0
  10. daita/cli/core/deploy.py +473 -0
  11. daita/cli/core/deployments.py +309 -0
  12. daita/cli/core/import_detector.py +219 -0
  13. daita/cli/core/init.py +481 -0
  14. daita/cli/core/logs.py +239 -0
  15. daita/cli/core/managed_deploy.py +709 -0
  16. daita/cli/core/run.py +648 -0
  17. daita/cli/core/status.py +421 -0
  18. daita/cli/core/test.py +239 -0
  19. daita/cli/core/webhooks.py +172 -0
  20. daita/cli/main.py +588 -0
  21. daita/cli/utils.py +541 -0
  22. daita/config/__init__.py +62 -0
  23. daita/config/base.py +159 -0
  24. daita/config/settings.py +184 -0
  25. daita/core/__init__.py +262 -0
  26. daita/core/decision_tracing.py +701 -0
  27. daita/core/exceptions.py +480 -0
  28. daita/core/focus.py +251 -0
  29. daita/core/interfaces.py +76 -0
  30. daita/core/plugin_tracing.py +550 -0
  31. daita/core/relay.py +779 -0
  32. daita/core/reliability.py +381 -0
  33. daita/core/scaling.py +459 -0
  34. daita/core/tools.py +554 -0
  35. daita/core/tracing.py +770 -0
  36. daita/core/workflow.py +1144 -0
  37. daita/display/__init__.py +1 -0
  38. daita/display/console.py +160 -0
  39. daita/execution/__init__.py +58 -0
  40. daita/execution/client.py +856 -0
  41. daita/execution/exceptions.py +92 -0
  42. daita/execution/models.py +317 -0
  43. daita/llm/__init__.py +60 -0
  44. daita/llm/anthropic.py +291 -0
  45. daita/llm/base.py +530 -0
  46. daita/llm/factory.py +101 -0
  47. daita/llm/gemini.py +355 -0
  48. daita/llm/grok.py +219 -0
  49. daita/llm/mock.py +172 -0
  50. daita/llm/openai.py +220 -0
  51. daita/plugins/__init__.py +141 -0
  52. daita/plugins/base.py +37 -0
  53. daita/plugins/base_db.py +167 -0
  54. daita/plugins/elasticsearch.py +849 -0
  55. daita/plugins/mcp.py +481 -0
  56. daita/plugins/mongodb.py +520 -0
  57. daita/plugins/mysql.py +362 -0
  58. daita/plugins/postgresql.py +342 -0
  59. daita/plugins/redis_messaging.py +500 -0
  60. daita/plugins/rest.py +537 -0
  61. daita/plugins/s3.py +770 -0
  62. daita/plugins/slack.py +729 -0
  63. daita/utils/__init__.py +18 -0
  64. daita_agents-0.2.0.dist-info/METADATA +409 -0
  65. daita_agents-0.2.0.dist-info/RECORD +69 -0
  66. daita_agents-0.2.0.dist-info/WHEEL +5 -0
  67. daita_agents-0.2.0.dist-info/entry_points.txt +2 -0
  68. daita_agents-0.2.0.dist-info/licenses/LICENSE +56 -0
  69. daita_agents-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,92 @@
1
+ """
2
+ Execution-specific exceptions for the autonomous execution system.
3
+
4
+ These exceptions provide specific error handling for common issues that
5
+ can occur during programmatic agent execution.
6
+ """
7
+
8
+ from typing import Optional
9
+
10
+
11
+ class ExecutionError(Exception):
12
+ """Base exception for execution-related errors."""
13
+
14
+ def __init__(
15
+ self,
16
+ message: str,
17
+ execution_id: Optional[str] = None,
18
+ status_code: Optional[int] = None
19
+ ):
20
+ self.message = message
21
+ self.execution_id = execution_id
22
+ self.status_code = status_code
23
+ super().__init__(message)
24
+
25
+ def __str__(self) -> str:
26
+ if self.execution_id:
27
+ return f"ExecutionError (ID: {self.execution_id}): {self.message}"
28
+ return f"ExecutionError: {self.message}"
29
+
30
+
31
+ class AuthenticationError(ExecutionError):
32
+ """Raised when API key authentication fails."""
33
+
34
+ def __init__(self, message: str = "Invalid API key or insufficient permissions"):
35
+ super().__init__(message, status_code=401)
36
+
37
+
38
+ class NotFoundError(ExecutionError):
39
+ """Raised when requested agent, workflow, or execution is not found."""
40
+
41
+ def __init__(self, message: str, resource_type: str = "resource"):
42
+ self.resource_type = resource_type
43
+ super().__init__(message, status_code=404)
44
+
45
+ def __str__(self) -> str:
46
+ return f"NotFoundError ({self.resource_type}): {self.message}"
47
+
48
+
49
+ class ValidationError(ExecutionError):
50
+ """Raised when request validation fails."""
51
+
52
+ def __init__(self, message: str, field: Optional[str] = None):
53
+ self.field = field
54
+ super().__init__(message, status_code=400)
55
+
56
+ def __str__(self) -> str:
57
+ if self.field:
58
+ return f"ValidationError ({self.field}): {self.message}"
59
+ return f"ValidationError: {self.message}"
60
+
61
+
62
+ class RateLimitError(ExecutionError):
63
+ """Raised when API rate limits are exceeded."""
64
+
65
+ def __init__(self, message: str = "Rate limit exceeded", retry_after: Optional[int] = None):
66
+ self.retry_after = retry_after
67
+ super().__init__(message, status_code=429)
68
+
69
+ def __str__(self) -> str:
70
+ if self.retry_after:
71
+ return f"RateLimitError: {self.message} (retry after {self.retry_after}s)"
72
+ return f"RateLimitError: {self.message}"
73
+
74
+
75
+ class TimeoutError(ExecutionError):
76
+ """Raised when execution times out."""
77
+
78
+ def __init__(self, message: str = "Execution timeout", timeout_seconds: Optional[int] = None):
79
+ self.timeout_seconds = timeout_seconds
80
+ super().__init__(message, status_code=408)
81
+
82
+ def __str__(self) -> str:
83
+ if self.timeout_seconds:
84
+ return f"TimeoutError: {self.message} (timeout: {self.timeout_seconds}s)"
85
+ return f"TimeoutError: {self.message}"
86
+
87
+
88
+ class ServerError(ExecutionError):
89
+ """Raised when server encounters an internal error."""
90
+
91
+ def __init__(self, message: str = "Internal server error"):
92
+ super().__init__(message, status_code=500)
@@ -0,0 +1,317 @@
1
+ """
2
+ Data models for the autonomous execution system.
3
+
4
+ These models provide structured representations of execution results,
5
+ scheduled tasks, and webhook triggers.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime, timezone
10
+ from typing import Dict, Any, Optional, Union
11
+ import json
12
+
13
+
14
+ @dataclass
15
+ class ExecutionResult:
16
+ """
17
+ Represents the result of an autonomous agent or workflow execution.
18
+
19
+ This class provides a structured way to access execution results
20
+ and metadata from programmatic agent executions.
21
+ """
22
+
23
+ execution_id: str
24
+ status: str # queued, running, completed, failed, cancelled
25
+ target_type: str # agent or workflow
26
+ target_name: str
27
+
28
+ # Result data
29
+ result: Optional[Dict[str, Any]] = None
30
+ error: Optional[str] = None
31
+
32
+ # Timing information
33
+ created_at: Optional[datetime] = None
34
+ started_at: Optional[datetime] = None
35
+ completed_at: Optional[datetime] = None
36
+ duration_ms: Optional[int] = None
37
+
38
+ # Resource usage
39
+ memory_used_mb: Optional[float] = None
40
+ cost_estimate: Optional[float] = None
41
+
42
+ # Monitoring and debugging
43
+ trace_id: Optional[str] = None
44
+ dashboard_url: Optional[str] = None
45
+
46
+ # Execution metadata
47
+ execution_source: str = "autonomous_sdk"
48
+ source_metadata: Dict[str, Any] = field(default_factory=dict)
49
+
50
+ @property
51
+ def is_complete(self) -> bool:
52
+ """Check if execution is complete (success or failure)."""
53
+ return self.status in ['completed', 'success', 'failed', 'cancelled']
54
+
55
+ @property
56
+ def is_success(self) -> bool:
57
+ """Check if execution completed successfully."""
58
+ return self.status in ['completed', 'success']
59
+
60
+ @property
61
+ def is_running(self) -> bool:
62
+ """Check if execution is currently running."""
63
+ return self.status in ['queued', 'running']
64
+
65
+ @property
66
+ def duration_seconds(self) -> Optional[float]:
67
+ """Get duration in seconds."""
68
+ return self.duration_ms / 1000 if self.duration_ms else None
69
+
70
+ @classmethod
71
+ def from_dict(cls, data: Dict[str, Any]) -> "ExecutionResult":
72
+ """Create ExecutionResult from API response data."""
73
+
74
+ # Parse datetime fields
75
+ created_at = None
76
+ if data.get('created_at'):
77
+ created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
78
+
79
+ started_at = None
80
+ if data.get('started_at'):
81
+ started_at = datetime.fromisoformat(data['started_at'].replace('Z', '+00:00'))
82
+
83
+ completed_at = None
84
+ if data.get('completed_at'):
85
+ completed_at = datetime.fromisoformat(data['completed_at'].replace('Z', '+00:00'))
86
+
87
+ # Parse result field - handle both string and dict formats
88
+ result = data.get('result')
89
+ if isinstance(result, str):
90
+ try:
91
+ result = json.loads(result)
92
+ except (json.JSONDecodeError, TypeError):
93
+ # If JSON parsing fails, keep as string or convert to dict with raw content
94
+ result = {"raw_output": result} if result else None
95
+
96
+ return cls(
97
+ execution_id=data['execution_id'],
98
+ status=data['status'],
99
+ target_type=data['target_type'],
100
+ target_name=data['target_name'],
101
+ result=result,
102
+ error=data.get('error'),
103
+ created_at=created_at,
104
+ started_at=started_at,
105
+ completed_at=completed_at,
106
+ duration_ms=data.get('duration_ms'),
107
+ memory_used_mb=data.get('memory_used_mb'),
108
+ cost_estimate=data.get('cost_estimate'),
109
+ trace_id=data.get('trace_id'),
110
+ dashboard_url=data.get('dashboard_url'),
111
+ execution_source=data.get('execution_source', 'autonomous_sdk'),
112
+ source_metadata=data.get('source_metadata', {})
113
+ )
114
+
115
+ def to_dict(self) -> Dict[str, Any]:
116
+ """Convert to dictionary representation."""
117
+ return {
118
+ 'execution_id': self.execution_id,
119
+ 'status': self.status,
120
+ 'target_type': self.target_type,
121
+ 'target_name': self.target_name,
122
+ 'result': self.result,
123
+ 'error': self.error,
124
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
125
+ 'started_at': self.started_at.isoformat() if self.started_at else None,
126
+ 'completed_at': self.completed_at.isoformat() if self.completed_at else None,
127
+ 'duration_ms': self.duration_ms,
128
+ 'memory_used_mb': self.memory_used_mb,
129
+ 'cost_estimate': self.cost_estimate,
130
+ 'trace_id': self.trace_id,
131
+ 'dashboard_url': self.dashboard_url,
132
+ 'execution_source': self.execution_source,
133
+ 'source_metadata': self.source_metadata
134
+ }
135
+
136
+ def __repr__(self) -> str:
137
+ return f"ExecutionResult(id={self.execution_id[:8]}..., status={self.status}, target={self.target_name})"
138
+
139
+
140
+ @dataclass
141
+ class ScheduledTask:
142
+ """
143
+ Represents a scheduled agent or workflow execution.
144
+
145
+ This class provides information about scheduled tasks configured
146
+ through the YAML-based scheduling system.
147
+ """
148
+
149
+ # Required fields (no defaults)
150
+ task_id: str
151
+ organization_id: int
152
+ schedule: str # cron expression
153
+
154
+ # Optional fields (with defaults)
155
+ deployment_id: Optional[str] = None
156
+ agent_name: Optional[str] = None
157
+ workflow_name: Optional[str] = None
158
+ data: Dict[str, Any] = field(default_factory=dict)
159
+ timezone: str = "UTC"
160
+ enabled: bool = True
161
+
162
+ # Timing information
163
+ next_run: Optional[datetime] = None
164
+ last_run: Optional[datetime] = None
165
+ created_at: Optional[datetime] = None
166
+ updated_at: Optional[datetime] = None
167
+
168
+ # AWS EventBridge information
169
+ eventbridge_rule_arn: Optional[str] = None
170
+
171
+ @property
172
+ def target_name(self) -> str:
173
+ """Get the target name (agent or workflow)."""
174
+ return self.agent_name or self.workflow_name or "unknown"
175
+
176
+ @property
177
+ def target_type(self) -> str:
178
+ """Get the target type (agent or workflow)."""
179
+ return "agent" if self.agent_name else "workflow"
180
+
181
+ @classmethod
182
+ def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
183
+ """Create ScheduledTask from API response data."""
184
+
185
+ # Parse datetime fields
186
+ next_run = None
187
+ if data.get('next_run'):
188
+ next_run = datetime.fromisoformat(data['next_run'].replace('Z', '+00:00'))
189
+
190
+ last_run = None
191
+ if data.get('last_run'):
192
+ last_run = datetime.fromisoformat(data['last_run'].replace('Z', '+00:00'))
193
+
194
+ created_at = None
195
+ if data.get('created_at'):
196
+ created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
197
+
198
+ updated_at = None
199
+ if data.get('updated_at'):
200
+ updated_at = datetime.fromisoformat(data['updated_at'].replace('Z', '+00:00'))
201
+
202
+ # Handle schedule_config if it's a nested object
203
+ schedule_config = data.get('schedule_config', {})
204
+ if isinstance(schedule_config, str):
205
+ schedule_config = json.loads(schedule_config)
206
+
207
+ return cls(
208
+ task_id=data['id'],
209
+ organization_id=data['organization_id'],
210
+ deployment_id=data.get('deployment_id'),
211
+ agent_name=data.get('agent_name'),
212
+ workflow_name=data.get('workflow_name'),
213
+ schedule=schedule_config.get('cron', data.get('schedule', '')),
214
+ data=schedule_config.get('data', {}),
215
+ timezone=schedule_config.get('timezone', 'UTC'),
216
+ enabled=data.get('enabled', True),
217
+ next_run=next_run,
218
+ last_run=last_run,
219
+ created_at=created_at,
220
+ updated_at=updated_at,
221
+ eventbridge_rule_arn=data.get('eventbridge_rule_arn')
222
+ )
223
+
224
+ def __repr__(self) -> str:
225
+ return f"ScheduledTask(id={self.task_id[:8]}..., target={self.target_name}, schedule={self.schedule})"
226
+
227
+
228
+ @dataclass
229
+ class WebhookTrigger:
230
+ """
231
+ Represents a webhook trigger for agent or workflow execution.
232
+
233
+ This class provides information about webhook configurations that
234
+ can trigger agent executions based on external events.
235
+ """
236
+
237
+ webhook_id: str
238
+ webhook_url: str
239
+ organization_id: int
240
+ deployment_id: Optional[str] = None
241
+
242
+ # Target information
243
+ agent_name: Optional[str] = None
244
+ workflow_name: Optional[str] = None
245
+
246
+ # Webhook configuration
247
+ data_template: Dict[str, Any] = field(default_factory=dict)
248
+ enabled: bool = True
249
+
250
+ # Timing information
251
+ created_at: Optional[datetime] = None
252
+
253
+ # Statistics
254
+ trigger_count: int = 0
255
+ last_triggered: Optional[datetime] = None
256
+
257
+ @property
258
+ def target_name(self) -> str:
259
+ """Get the target name (agent or workflow)."""
260
+ return self.agent_name or self.workflow_name or "unknown"
261
+
262
+ @property
263
+ def target_type(self) -> str:
264
+ """Get the target type (agent or workflow)."""
265
+ return "agent" if self.agent_name else "workflow"
266
+
267
+ @classmethod
268
+ def from_dict(cls, data: Dict[str, Any]) -> "WebhookTrigger":
269
+ """Create WebhookTrigger from API response data."""
270
+
271
+ # Parse datetime fields
272
+ created_at = None
273
+ if data.get('created_at'):
274
+ created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
275
+
276
+ last_triggered = None
277
+ if data.get('last_triggered'):
278
+ last_triggered = datetime.fromisoformat(data['last_triggered'].replace('Z', '+00:00'))
279
+
280
+ # Handle data_template if it's a string
281
+ data_template = data.get('data_template', {})
282
+ if isinstance(data_template, str):
283
+ data_template = json.loads(data_template)
284
+
285
+ return cls(
286
+ webhook_id=data['webhook_id'],
287
+ webhook_url=data['webhook_url'],
288
+ organization_id=data['organization_id'],
289
+ deployment_id=data.get('deployment_id'),
290
+ agent_name=data.get('agent_name'),
291
+ workflow_name=data.get('workflow_name'),
292
+ data_template=data_template,
293
+ enabled=data.get('enabled', True),
294
+ created_at=created_at,
295
+ trigger_count=data.get('trigger_count', 0),
296
+ last_triggered=last_triggered
297
+ )
298
+
299
+ def __repr__(self) -> str:
300
+ return f"WebhookTrigger(id={self.webhook_id[:8]}..., target={self.target_name}, enabled={self.enabled})"
301
+
302
+
303
+ # Utility functions for working with models
304
+
305
+ def parse_execution_response(response_data: Dict[str, Any]) -> ExecutionResult:
306
+ """Parse API response into ExecutionResult."""
307
+ return ExecutionResult.from_dict(response_data)
308
+
309
+
310
+ def parse_schedule_list(response_data: list) -> list[ScheduledTask]:
311
+ """Parse API response list into ScheduledTask objects."""
312
+ return [ScheduledTask.from_dict(item) for item in response_data]
313
+
314
+
315
+ def parse_webhook_list(response_data: list) -> list[WebhookTrigger]:
316
+ """Parse API response list into WebhookTrigger objects."""
317
+ return [WebhookTrigger.from_dict(item) for item in response_data]
daita/llm/__init__.py ADDED
@@ -0,0 +1,60 @@
1
+ """
2
+ LLM provider integrations for Daita Agents.
3
+
4
+ This module provides a unified interface for different LLM providers:
5
+ - OpenAI (GPT-4, GPT-3.5-turbo, etc.)
6
+ - Anthropic (Claude models)
7
+ - Google Gemini
8
+ - xAI Grok
9
+ - Mock provider for testing
10
+
11
+ The factory pattern allows easy switching between providers while maintaining
12
+ a consistent interface for agents.
13
+
14
+ Usage:
15
+ ```python
16
+ from daita.llm import create_llm_provider
17
+
18
+ # Create OpenAI provider
19
+ llm = create_llm_provider("openai", "gpt-4", api_key="sk-...")
20
+ response = await llm.generate("Hello, world!")
21
+
22
+ # Create Anthropic provider
23
+ llm = create_llm_provider("anthropic", "claude-3-sonnet-20240229")
24
+ response = await llm.generate("Analyze this data...")
25
+ ```
26
+ """
27
+
28
+ # Factory and registry functions
29
+ from .factory import (
30
+ create_llm_provider,
31
+ register_llm_provider,
32
+ list_available_providers
33
+ )
34
+
35
+ # Base class for custom providers
36
+ from .base import BaseLLMProvider
37
+
38
+ # Concrete provider implementations
39
+ from .openai import OpenAIProvider
40
+ from .anthropic import AnthropicProvider
41
+ from .grok import GrokProvider
42
+ from .gemini import GeminiProvider
43
+ from .mock import MockLLMProvider
44
+
45
+ __all__ = [
46
+ # Factory functions
47
+ "create_llm_provider",
48
+ "register_llm_provider",
49
+ "list_available_providers",
50
+
51
+ # Base class
52
+ "BaseLLMProvider",
53
+
54
+ # Provider implementations
55
+ "OpenAIProvider",
56
+ "AnthropicProvider",
57
+ "GrokProvider",
58
+ "GeminiProvider",
59
+ "MockLLMProvider",
60
+ ]