daita-agents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of daita-agents might be problematic. Click here for more details.
- daita/__init__.py +208 -0
- daita/agents/__init__.py +33 -0
- daita/agents/base.py +722 -0
- daita/agents/substrate.py +895 -0
- daita/cli/__init__.py +145 -0
- daita/cli/__main__.py +7 -0
- daita/cli/ascii_art.py +44 -0
- daita/cli/core/__init__.py +0 -0
- daita/cli/core/create.py +254 -0
- daita/cli/core/deploy.py +473 -0
- daita/cli/core/deployments.py +309 -0
- daita/cli/core/import_detector.py +219 -0
- daita/cli/core/init.py +382 -0
- daita/cli/core/logs.py +239 -0
- daita/cli/core/managed_deploy.py +709 -0
- daita/cli/core/run.py +648 -0
- daita/cli/core/status.py +421 -0
- daita/cli/core/test.py +239 -0
- daita/cli/core/webhooks.py +172 -0
- daita/cli/main.py +588 -0
- daita/cli/utils.py +541 -0
- daita/config/__init__.py +62 -0
- daita/config/base.py +159 -0
- daita/config/settings.py +184 -0
- daita/core/__init__.py +262 -0
- daita/core/decision_tracing.py +701 -0
- daita/core/exceptions.py +480 -0
- daita/core/focus.py +251 -0
- daita/core/interfaces.py +76 -0
- daita/core/plugin_tracing.py +550 -0
- daita/core/relay.py +695 -0
- daita/core/reliability.py +381 -0
- daita/core/scaling.py +444 -0
- daita/core/tools.py +402 -0
- daita/core/tracing.py +770 -0
- daita/core/workflow.py +1084 -0
- daita/display/__init__.py +1 -0
- daita/display/console.py +160 -0
- daita/execution/__init__.py +58 -0
- daita/execution/client.py +856 -0
- daita/execution/exceptions.py +92 -0
- daita/execution/models.py +317 -0
- daita/llm/__init__.py +60 -0
- daita/llm/anthropic.py +166 -0
- daita/llm/base.py +373 -0
- daita/llm/factory.py +101 -0
- daita/llm/gemini.py +152 -0
- daita/llm/grok.py +114 -0
- daita/llm/mock.py +135 -0
- daita/llm/openai.py +109 -0
- daita/plugins/__init__.py +141 -0
- daita/plugins/base.py +37 -0
- daita/plugins/base_db.py +167 -0
- daita/plugins/elasticsearch.py +844 -0
- daita/plugins/mcp.py +481 -0
- daita/plugins/mongodb.py +510 -0
- daita/plugins/mysql.py +351 -0
- daita/plugins/postgresql.py +331 -0
- daita/plugins/redis_messaging.py +500 -0
- daita/plugins/rest.py +529 -0
- daita/plugins/s3.py +761 -0
- daita/plugins/slack.py +729 -0
- daita/utils/__init__.py +18 -0
- daita_agents-0.1.0.dist-info/METADATA +350 -0
- daita_agents-0.1.0.dist-info/RECORD +69 -0
- daita_agents-0.1.0.dist-info/WHEEL +5 -0
- daita_agents-0.1.0.dist-info/entry_points.txt +2 -0
- daita_agents-0.1.0.dist-info/licenses/LICENSE +56 -0
- daita_agents-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Execution-specific exceptions for the autonomous execution system.
|
|
3
|
+
|
|
4
|
+
These exceptions provide specific error handling for common issues that
|
|
5
|
+
can occur during programmatic agent execution.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ExecutionError(Exception):
|
|
12
|
+
"""Base exception for execution-related errors."""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
message: str,
|
|
17
|
+
execution_id: Optional[str] = None,
|
|
18
|
+
status_code: Optional[int] = None
|
|
19
|
+
):
|
|
20
|
+
self.message = message
|
|
21
|
+
self.execution_id = execution_id
|
|
22
|
+
self.status_code = status_code
|
|
23
|
+
super().__init__(message)
|
|
24
|
+
|
|
25
|
+
def __str__(self) -> str:
|
|
26
|
+
if self.execution_id:
|
|
27
|
+
return f"ExecutionError (ID: {self.execution_id}): {self.message}"
|
|
28
|
+
return f"ExecutionError: {self.message}"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AuthenticationError(ExecutionError):
|
|
32
|
+
"""Raised when API key authentication fails."""
|
|
33
|
+
|
|
34
|
+
def __init__(self, message: str = "Invalid API key or insufficient permissions"):
|
|
35
|
+
super().__init__(message, status_code=401)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class NotFoundError(ExecutionError):
|
|
39
|
+
"""Raised when requested agent, workflow, or execution is not found."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, message: str, resource_type: str = "resource"):
|
|
42
|
+
self.resource_type = resource_type
|
|
43
|
+
super().__init__(message, status_code=404)
|
|
44
|
+
|
|
45
|
+
def __str__(self) -> str:
|
|
46
|
+
return f"NotFoundError ({self.resource_type}): {self.message}"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ValidationError(ExecutionError):
|
|
50
|
+
"""Raised when request validation fails."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, message: str, field: Optional[str] = None):
|
|
53
|
+
self.field = field
|
|
54
|
+
super().__init__(message, status_code=400)
|
|
55
|
+
|
|
56
|
+
def __str__(self) -> str:
|
|
57
|
+
if self.field:
|
|
58
|
+
return f"ValidationError ({self.field}): {self.message}"
|
|
59
|
+
return f"ValidationError: {self.message}"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class RateLimitError(ExecutionError):
|
|
63
|
+
"""Raised when API rate limits are exceeded."""
|
|
64
|
+
|
|
65
|
+
def __init__(self, message: str = "Rate limit exceeded", retry_after: Optional[int] = None):
|
|
66
|
+
self.retry_after = retry_after
|
|
67
|
+
super().__init__(message, status_code=429)
|
|
68
|
+
|
|
69
|
+
def __str__(self) -> str:
|
|
70
|
+
if self.retry_after:
|
|
71
|
+
return f"RateLimitError: {self.message} (retry after {self.retry_after}s)"
|
|
72
|
+
return f"RateLimitError: {self.message}"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class TimeoutError(ExecutionError):
|
|
76
|
+
"""Raised when execution times out."""
|
|
77
|
+
|
|
78
|
+
def __init__(self, message: str = "Execution timeout", timeout_seconds: Optional[int] = None):
|
|
79
|
+
self.timeout_seconds = timeout_seconds
|
|
80
|
+
super().__init__(message, status_code=408)
|
|
81
|
+
|
|
82
|
+
def __str__(self) -> str:
|
|
83
|
+
if self.timeout_seconds:
|
|
84
|
+
return f"TimeoutError: {self.message} (timeout: {self.timeout_seconds}s)"
|
|
85
|
+
return f"TimeoutError: {self.message}"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ServerError(ExecutionError):
|
|
89
|
+
"""Raised when server encounters an internal error."""
|
|
90
|
+
|
|
91
|
+
def __init__(self, message: str = "Internal server error"):
|
|
92
|
+
super().__init__(message, status_code=500)
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Data models for the autonomous execution system.
|
|
3
|
+
|
|
4
|
+
These models provide structured representations of execution results,
|
|
5
|
+
scheduled tasks, and webhook triggers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime, timezone
|
|
10
|
+
from typing import Dict, Any, Optional, Union
|
|
11
|
+
import json
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ExecutionResult:
|
|
16
|
+
"""
|
|
17
|
+
Represents the result of an autonomous agent or workflow execution.
|
|
18
|
+
|
|
19
|
+
This class provides a structured way to access execution results
|
|
20
|
+
and metadata from programmatic agent executions.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
execution_id: str
|
|
24
|
+
status: str # queued, running, completed, failed, cancelled
|
|
25
|
+
target_type: str # agent or workflow
|
|
26
|
+
target_name: str
|
|
27
|
+
|
|
28
|
+
# Result data
|
|
29
|
+
result: Optional[Dict[str, Any]] = None
|
|
30
|
+
error: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
# Timing information
|
|
33
|
+
created_at: Optional[datetime] = None
|
|
34
|
+
started_at: Optional[datetime] = None
|
|
35
|
+
completed_at: Optional[datetime] = None
|
|
36
|
+
duration_ms: Optional[int] = None
|
|
37
|
+
|
|
38
|
+
# Resource usage
|
|
39
|
+
memory_used_mb: Optional[float] = None
|
|
40
|
+
cost_estimate: Optional[float] = None
|
|
41
|
+
|
|
42
|
+
# Monitoring and debugging
|
|
43
|
+
trace_id: Optional[str] = None
|
|
44
|
+
dashboard_url: Optional[str] = None
|
|
45
|
+
|
|
46
|
+
# Execution metadata
|
|
47
|
+
execution_source: str = "autonomous_sdk"
|
|
48
|
+
source_metadata: Dict[str, Any] = field(default_factory=dict)
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def is_complete(self) -> bool:
|
|
52
|
+
"""Check if execution is complete (success or failure)."""
|
|
53
|
+
return self.status in ['completed', 'success', 'failed', 'cancelled']
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def is_success(self) -> bool:
|
|
57
|
+
"""Check if execution completed successfully."""
|
|
58
|
+
return self.status in ['completed', 'success']
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def is_running(self) -> bool:
|
|
62
|
+
"""Check if execution is currently running."""
|
|
63
|
+
return self.status in ['queued', 'running']
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def duration_seconds(self) -> Optional[float]:
|
|
67
|
+
"""Get duration in seconds."""
|
|
68
|
+
return self.duration_ms / 1000 if self.duration_ms else None
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ExecutionResult":
|
|
72
|
+
"""Create ExecutionResult from API response data."""
|
|
73
|
+
|
|
74
|
+
# Parse datetime fields
|
|
75
|
+
created_at = None
|
|
76
|
+
if data.get('created_at'):
|
|
77
|
+
created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
|
|
78
|
+
|
|
79
|
+
started_at = None
|
|
80
|
+
if data.get('started_at'):
|
|
81
|
+
started_at = datetime.fromisoformat(data['started_at'].replace('Z', '+00:00'))
|
|
82
|
+
|
|
83
|
+
completed_at = None
|
|
84
|
+
if data.get('completed_at'):
|
|
85
|
+
completed_at = datetime.fromisoformat(data['completed_at'].replace('Z', '+00:00'))
|
|
86
|
+
|
|
87
|
+
# Parse result field - handle both string and dict formats
|
|
88
|
+
result = data.get('result')
|
|
89
|
+
if isinstance(result, str):
|
|
90
|
+
try:
|
|
91
|
+
result = json.loads(result)
|
|
92
|
+
except (json.JSONDecodeError, TypeError):
|
|
93
|
+
# If JSON parsing fails, keep as string or convert to dict with raw content
|
|
94
|
+
result = {"raw_output": result} if result else None
|
|
95
|
+
|
|
96
|
+
return cls(
|
|
97
|
+
execution_id=data['execution_id'],
|
|
98
|
+
status=data['status'],
|
|
99
|
+
target_type=data['target_type'],
|
|
100
|
+
target_name=data['target_name'],
|
|
101
|
+
result=result,
|
|
102
|
+
error=data.get('error'),
|
|
103
|
+
created_at=created_at,
|
|
104
|
+
started_at=started_at,
|
|
105
|
+
completed_at=completed_at,
|
|
106
|
+
duration_ms=data.get('duration_ms'),
|
|
107
|
+
memory_used_mb=data.get('memory_used_mb'),
|
|
108
|
+
cost_estimate=data.get('cost_estimate'),
|
|
109
|
+
trace_id=data.get('trace_id'),
|
|
110
|
+
dashboard_url=data.get('dashboard_url'),
|
|
111
|
+
execution_source=data.get('execution_source', 'autonomous_sdk'),
|
|
112
|
+
source_metadata=data.get('source_metadata', {})
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
116
|
+
"""Convert to dictionary representation."""
|
|
117
|
+
return {
|
|
118
|
+
'execution_id': self.execution_id,
|
|
119
|
+
'status': self.status,
|
|
120
|
+
'target_type': self.target_type,
|
|
121
|
+
'target_name': self.target_name,
|
|
122
|
+
'result': self.result,
|
|
123
|
+
'error': self.error,
|
|
124
|
+
'created_at': self.created_at.isoformat() if self.created_at else None,
|
|
125
|
+
'started_at': self.started_at.isoformat() if self.started_at else None,
|
|
126
|
+
'completed_at': self.completed_at.isoformat() if self.completed_at else None,
|
|
127
|
+
'duration_ms': self.duration_ms,
|
|
128
|
+
'memory_used_mb': self.memory_used_mb,
|
|
129
|
+
'cost_estimate': self.cost_estimate,
|
|
130
|
+
'trace_id': self.trace_id,
|
|
131
|
+
'dashboard_url': self.dashboard_url,
|
|
132
|
+
'execution_source': self.execution_source,
|
|
133
|
+
'source_metadata': self.source_metadata
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def __repr__(self) -> str:
|
|
137
|
+
return f"ExecutionResult(id={self.execution_id[:8]}..., status={self.status}, target={self.target_name})"
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@dataclass
|
|
141
|
+
class ScheduledTask:
|
|
142
|
+
"""
|
|
143
|
+
Represents a scheduled agent or workflow execution.
|
|
144
|
+
|
|
145
|
+
This class provides information about scheduled tasks configured
|
|
146
|
+
through the YAML-based scheduling system.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
# Required fields (no defaults)
|
|
150
|
+
task_id: str
|
|
151
|
+
organization_id: int
|
|
152
|
+
schedule: str # cron expression
|
|
153
|
+
|
|
154
|
+
# Optional fields (with defaults)
|
|
155
|
+
deployment_id: Optional[str] = None
|
|
156
|
+
agent_name: Optional[str] = None
|
|
157
|
+
workflow_name: Optional[str] = None
|
|
158
|
+
data: Dict[str, Any] = field(default_factory=dict)
|
|
159
|
+
timezone: str = "UTC"
|
|
160
|
+
enabled: bool = True
|
|
161
|
+
|
|
162
|
+
# Timing information
|
|
163
|
+
next_run: Optional[datetime] = None
|
|
164
|
+
last_run: Optional[datetime] = None
|
|
165
|
+
created_at: Optional[datetime] = None
|
|
166
|
+
updated_at: Optional[datetime] = None
|
|
167
|
+
|
|
168
|
+
# AWS EventBridge information
|
|
169
|
+
eventbridge_rule_arn: Optional[str] = None
|
|
170
|
+
|
|
171
|
+
@property
|
|
172
|
+
def target_name(self) -> str:
|
|
173
|
+
"""Get the target name (agent or workflow)."""
|
|
174
|
+
return self.agent_name or self.workflow_name or "unknown"
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def target_type(self) -> str:
|
|
178
|
+
"""Get the target type (agent or workflow)."""
|
|
179
|
+
return "agent" if self.agent_name else "workflow"
|
|
180
|
+
|
|
181
|
+
@classmethod
|
|
182
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ScheduledTask":
|
|
183
|
+
"""Create ScheduledTask from API response data."""
|
|
184
|
+
|
|
185
|
+
# Parse datetime fields
|
|
186
|
+
next_run = None
|
|
187
|
+
if data.get('next_run'):
|
|
188
|
+
next_run = datetime.fromisoformat(data['next_run'].replace('Z', '+00:00'))
|
|
189
|
+
|
|
190
|
+
last_run = None
|
|
191
|
+
if data.get('last_run'):
|
|
192
|
+
last_run = datetime.fromisoformat(data['last_run'].replace('Z', '+00:00'))
|
|
193
|
+
|
|
194
|
+
created_at = None
|
|
195
|
+
if data.get('created_at'):
|
|
196
|
+
created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
|
|
197
|
+
|
|
198
|
+
updated_at = None
|
|
199
|
+
if data.get('updated_at'):
|
|
200
|
+
updated_at = datetime.fromisoformat(data['updated_at'].replace('Z', '+00:00'))
|
|
201
|
+
|
|
202
|
+
# Handle schedule_config if it's a nested object
|
|
203
|
+
schedule_config = data.get('schedule_config', {})
|
|
204
|
+
if isinstance(schedule_config, str):
|
|
205
|
+
schedule_config = json.loads(schedule_config)
|
|
206
|
+
|
|
207
|
+
return cls(
|
|
208
|
+
task_id=data['id'],
|
|
209
|
+
organization_id=data['organization_id'],
|
|
210
|
+
deployment_id=data.get('deployment_id'),
|
|
211
|
+
agent_name=data.get('agent_name'),
|
|
212
|
+
workflow_name=data.get('workflow_name'),
|
|
213
|
+
schedule=schedule_config.get('cron', data.get('schedule', '')),
|
|
214
|
+
data=schedule_config.get('data', {}),
|
|
215
|
+
timezone=schedule_config.get('timezone', 'UTC'),
|
|
216
|
+
enabled=data.get('enabled', True),
|
|
217
|
+
next_run=next_run,
|
|
218
|
+
last_run=last_run,
|
|
219
|
+
created_at=created_at,
|
|
220
|
+
updated_at=updated_at,
|
|
221
|
+
eventbridge_rule_arn=data.get('eventbridge_rule_arn')
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
def __repr__(self) -> str:
|
|
225
|
+
return f"ScheduledTask(id={self.task_id[:8]}..., target={self.target_name}, schedule={self.schedule})"
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
@dataclass
|
|
229
|
+
class WebhookTrigger:
|
|
230
|
+
"""
|
|
231
|
+
Represents a webhook trigger for agent or workflow execution.
|
|
232
|
+
|
|
233
|
+
This class provides information about webhook configurations that
|
|
234
|
+
can trigger agent executions based on external events.
|
|
235
|
+
"""
|
|
236
|
+
|
|
237
|
+
webhook_id: str
|
|
238
|
+
webhook_url: str
|
|
239
|
+
organization_id: int
|
|
240
|
+
deployment_id: Optional[str] = None
|
|
241
|
+
|
|
242
|
+
# Target information
|
|
243
|
+
agent_name: Optional[str] = None
|
|
244
|
+
workflow_name: Optional[str] = None
|
|
245
|
+
|
|
246
|
+
# Webhook configuration
|
|
247
|
+
data_template: Dict[str, Any] = field(default_factory=dict)
|
|
248
|
+
enabled: bool = True
|
|
249
|
+
|
|
250
|
+
# Timing information
|
|
251
|
+
created_at: Optional[datetime] = None
|
|
252
|
+
|
|
253
|
+
# Statistics
|
|
254
|
+
trigger_count: int = 0
|
|
255
|
+
last_triggered: Optional[datetime] = None
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def target_name(self) -> str:
|
|
259
|
+
"""Get the target name (agent or workflow)."""
|
|
260
|
+
return self.agent_name or self.workflow_name or "unknown"
|
|
261
|
+
|
|
262
|
+
@property
|
|
263
|
+
def target_type(self) -> str:
|
|
264
|
+
"""Get the target type (agent or workflow)."""
|
|
265
|
+
return "agent" if self.agent_name else "workflow"
|
|
266
|
+
|
|
267
|
+
@classmethod
|
|
268
|
+
def from_dict(cls, data: Dict[str, Any]) -> "WebhookTrigger":
|
|
269
|
+
"""Create WebhookTrigger from API response data."""
|
|
270
|
+
|
|
271
|
+
# Parse datetime fields
|
|
272
|
+
created_at = None
|
|
273
|
+
if data.get('created_at'):
|
|
274
|
+
created_at = datetime.fromisoformat(data['created_at'].replace('Z', '+00:00'))
|
|
275
|
+
|
|
276
|
+
last_triggered = None
|
|
277
|
+
if data.get('last_triggered'):
|
|
278
|
+
last_triggered = datetime.fromisoformat(data['last_triggered'].replace('Z', '+00:00'))
|
|
279
|
+
|
|
280
|
+
# Handle data_template if it's a string
|
|
281
|
+
data_template = data.get('data_template', {})
|
|
282
|
+
if isinstance(data_template, str):
|
|
283
|
+
data_template = json.loads(data_template)
|
|
284
|
+
|
|
285
|
+
return cls(
|
|
286
|
+
webhook_id=data['webhook_id'],
|
|
287
|
+
webhook_url=data['webhook_url'],
|
|
288
|
+
organization_id=data['organization_id'],
|
|
289
|
+
deployment_id=data.get('deployment_id'),
|
|
290
|
+
agent_name=data.get('agent_name'),
|
|
291
|
+
workflow_name=data.get('workflow_name'),
|
|
292
|
+
data_template=data_template,
|
|
293
|
+
enabled=data.get('enabled', True),
|
|
294
|
+
created_at=created_at,
|
|
295
|
+
trigger_count=data.get('trigger_count', 0),
|
|
296
|
+
last_triggered=last_triggered
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
def __repr__(self) -> str:
|
|
300
|
+
return f"WebhookTrigger(id={self.webhook_id[:8]}..., target={self.target_name}, enabled={self.enabled})"
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# Utility functions for working with models
|
|
304
|
+
|
|
305
|
+
def parse_execution_response(response_data: Dict[str, Any]) -> ExecutionResult:
|
|
306
|
+
"""Parse API response into ExecutionResult."""
|
|
307
|
+
return ExecutionResult.from_dict(response_data)
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
def parse_schedule_list(response_data: list) -> list[ScheduledTask]:
|
|
311
|
+
"""Parse API response list into ScheduledTask objects."""
|
|
312
|
+
return [ScheduledTask.from_dict(item) for item in response_data]
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def parse_webhook_list(response_data: list) -> list[WebhookTrigger]:
|
|
316
|
+
"""Parse API response list into WebhookTrigger objects."""
|
|
317
|
+
return [WebhookTrigger.from_dict(item) for item in response_data]
|
daita/llm/__init__.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM provider integrations for Daita Agents.
|
|
3
|
+
|
|
4
|
+
This module provides a unified interface for different LLM providers:
|
|
5
|
+
- OpenAI (GPT-4, GPT-3.5-turbo, etc.)
|
|
6
|
+
- Anthropic (Claude models)
|
|
7
|
+
- Google Gemini
|
|
8
|
+
- xAI Grok
|
|
9
|
+
- Mock provider for testing
|
|
10
|
+
|
|
11
|
+
The factory pattern allows easy switching between providers while maintaining
|
|
12
|
+
a consistent interface for agents.
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
```python
|
|
16
|
+
from daita.llm import create_llm_provider
|
|
17
|
+
|
|
18
|
+
# Create OpenAI provider
|
|
19
|
+
llm = create_llm_provider("openai", "gpt-4", api_key="sk-...")
|
|
20
|
+
response = await llm.generate("Hello, world!")
|
|
21
|
+
|
|
22
|
+
# Create Anthropic provider
|
|
23
|
+
llm = create_llm_provider("anthropic", "claude-3-sonnet-20240229")
|
|
24
|
+
response = await llm.generate("Analyze this data...")
|
|
25
|
+
```
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# Factory and registry functions
|
|
29
|
+
from .factory import (
|
|
30
|
+
create_llm_provider,
|
|
31
|
+
register_llm_provider,
|
|
32
|
+
list_available_providers
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Base class for custom providers
|
|
36
|
+
from .base import BaseLLMProvider
|
|
37
|
+
|
|
38
|
+
# Concrete provider implementations
|
|
39
|
+
from .openai import OpenAIProvider
|
|
40
|
+
from .anthropic import AnthropicProvider
|
|
41
|
+
from .grok import GrokProvider
|
|
42
|
+
from .gemini import GeminiProvider
|
|
43
|
+
from .mock import MockLLMProvider
|
|
44
|
+
|
|
45
|
+
__all__ = [
|
|
46
|
+
# Factory functions
|
|
47
|
+
"create_llm_provider",
|
|
48
|
+
"register_llm_provider",
|
|
49
|
+
"list_available_providers",
|
|
50
|
+
|
|
51
|
+
# Base class
|
|
52
|
+
"BaseLLMProvider",
|
|
53
|
+
|
|
54
|
+
# Provider implementations
|
|
55
|
+
"OpenAIProvider",
|
|
56
|
+
"AnthropicProvider",
|
|
57
|
+
"GrokProvider",
|
|
58
|
+
"GeminiProvider",
|
|
59
|
+
"MockLLMProvider",
|
|
60
|
+
]
|
daita/llm/anthropic.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Anthropic LLM provider implementation with integrated tracing.
|
|
3
|
+
"""
|
|
4
|
+
import os
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Dict, Any, Optional
|
|
7
|
+
|
|
8
|
+
from ..core.exceptions import LLMError
|
|
9
|
+
from .base import BaseLLMProvider
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class AnthropicProvider(BaseLLMProvider):
|
|
14
|
+
"""Anthropic LLM provider implementation with automatic call tracing."""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: str = "claude-3-sonnet-20240229",
|
|
19
|
+
api_key: Optional[str] = None,
|
|
20
|
+
**kwargs
|
|
21
|
+
):
|
|
22
|
+
"""
|
|
23
|
+
Initialize Anthropic provider.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
model: Anthropic model name
|
|
27
|
+
api_key: Anthropic API key
|
|
28
|
+
**kwargs: Additional Anthropic-specific parameters
|
|
29
|
+
"""
|
|
30
|
+
# Get API key from parameter or environment
|
|
31
|
+
api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
|
|
32
|
+
|
|
33
|
+
super().__init__(model=model, api_key=api_key, **kwargs)
|
|
34
|
+
|
|
35
|
+
# Anthropic-specific default parameters
|
|
36
|
+
self.default_params.update({
|
|
37
|
+
'timeout': kwargs.get('timeout', 60)
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
# Lazy-load Anthropic client
|
|
41
|
+
self._client = None
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def client(self):
|
|
45
|
+
"""Lazy-load Anthropic client."""
|
|
46
|
+
if self._client is None:
|
|
47
|
+
try:
|
|
48
|
+
import anthropic
|
|
49
|
+
self._validate_api_key()
|
|
50
|
+
self._client = anthropic.AsyncAnthropic(api_key=self.api_key)
|
|
51
|
+
logger.debug("Anthropic client initialized")
|
|
52
|
+
except ImportError:
|
|
53
|
+
raise LLMError(
|
|
54
|
+
"Anthropic package not installed. Install with: pip install anthropic"
|
|
55
|
+
)
|
|
56
|
+
return self._client
|
|
57
|
+
|
|
58
|
+
async def _generate_impl(self, prompt: str, **kwargs) -> str:
|
|
59
|
+
"""
|
|
60
|
+
Provider-specific implementation of text generation for Anthropic.
|
|
61
|
+
|
|
62
|
+
This method contains the actual Anthropic API call logic and is automatically
|
|
63
|
+
wrapped with tracing by the base class generate() method.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
prompt: Input prompt
|
|
67
|
+
**kwargs: Optional parameters
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Generated text response
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
# Merge parameters
|
|
74
|
+
params = self._merge_params(kwargs)
|
|
75
|
+
|
|
76
|
+
# Make API call
|
|
77
|
+
response = await self.client.messages.create(
|
|
78
|
+
model=self.model,
|
|
79
|
+
max_tokens=params.get('max_tokens'),
|
|
80
|
+
temperature=params.get('temperature'),
|
|
81
|
+
messages=[
|
|
82
|
+
{"role": "user", "content": prompt}
|
|
83
|
+
],
|
|
84
|
+
timeout=params.get('timeout')
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Store usage for base class token extraction
|
|
88
|
+
self._last_usage = response.usage
|
|
89
|
+
|
|
90
|
+
return response.content[0].text
|
|
91
|
+
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logger.error(f"Anthropic generation failed: {str(e)}")
|
|
94
|
+
raise LLMError(f"Anthropic generation failed: {str(e)}")
|
|
95
|
+
|
|
96
|
+
async def generate_with_system(self, prompt: str, system_message: str, **kwargs) -> str:
|
|
97
|
+
"""
|
|
98
|
+
Generate text with a system message using Anthropic's system parameter.
|
|
99
|
+
|
|
100
|
+
Note: This method bypasses automatic tracing since it's not part of the
|
|
101
|
+
base interface. If you want tracing for system messages, call the base
|
|
102
|
+
generate() method with a formatted prompt instead.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
prompt: User prompt
|
|
106
|
+
system_message: System message to set context
|
|
107
|
+
**kwargs: Optional parameters
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Generated text
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
# Merge parameters
|
|
114
|
+
params = self._merge_params(kwargs)
|
|
115
|
+
|
|
116
|
+
# Make API call with system parameter
|
|
117
|
+
response = await self.client.messages.create(
|
|
118
|
+
model=self.model,
|
|
119
|
+
max_tokens=params.get('max_tokens'),
|
|
120
|
+
temperature=params.get('temperature'),
|
|
121
|
+
system=system_message,
|
|
122
|
+
messages=[
|
|
123
|
+
{"role": "user", "content": prompt}
|
|
124
|
+
],
|
|
125
|
+
timeout=params.get('timeout')
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Store usage for potential token extraction
|
|
129
|
+
self._last_usage = response.usage
|
|
130
|
+
|
|
131
|
+
return response.content[0].text
|
|
132
|
+
|
|
133
|
+
except Exception as e:
|
|
134
|
+
logger.error(f"Anthropic generation with system message failed: {str(e)}")
|
|
135
|
+
raise LLMError(f"Anthropic generation failed: {str(e)}")
|
|
136
|
+
|
|
137
|
+
def _get_last_token_usage(self) -> Dict[str, int]:
|
|
138
|
+
"""
|
|
139
|
+
Override base class method to handle Anthropic's token format.
|
|
140
|
+
|
|
141
|
+
Anthropic uses input_tokens and output_tokens format, different from OpenAI.
|
|
142
|
+
"""
|
|
143
|
+
if self._last_usage:
|
|
144
|
+
# Anthropic format: input_tokens + output_tokens
|
|
145
|
+
input_tokens = getattr(self._last_usage, 'input_tokens', 0)
|
|
146
|
+
output_tokens = getattr(self._last_usage, 'output_tokens', 0)
|
|
147
|
+
total_tokens = input_tokens + output_tokens
|
|
148
|
+
|
|
149
|
+
return {
|
|
150
|
+
'total_tokens': total_tokens,
|
|
151
|
+
'prompt_tokens': input_tokens, # Map input_tokens to prompt_tokens
|
|
152
|
+
'completion_tokens': output_tokens # Map output_tokens to completion_tokens
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
# Fallback to base class estimation
|
|
156
|
+
return super()._get_last_token_usage()
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
def info(self) -> Dict[str, Any]:
|
|
160
|
+
"""Get information about the Anthropic provider."""
|
|
161
|
+
base_info = super().info
|
|
162
|
+
base_info.update({
|
|
163
|
+
'provider_name': 'Anthropic',
|
|
164
|
+
'api_compatible': 'Anthropic'
|
|
165
|
+
})
|
|
166
|
+
return base_info
|