daita-agents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of daita-agents might be problematic. Click here for more details.
- daita/__init__.py +208 -0
- daita/agents/__init__.py +33 -0
- daita/agents/base.py +722 -0
- daita/agents/substrate.py +895 -0
- daita/cli/__init__.py +145 -0
- daita/cli/__main__.py +7 -0
- daita/cli/ascii_art.py +44 -0
- daita/cli/core/__init__.py +0 -0
- daita/cli/core/create.py +254 -0
- daita/cli/core/deploy.py +473 -0
- daita/cli/core/deployments.py +309 -0
- daita/cli/core/import_detector.py +219 -0
- daita/cli/core/init.py +382 -0
- daita/cli/core/logs.py +239 -0
- daita/cli/core/managed_deploy.py +709 -0
- daita/cli/core/run.py +648 -0
- daita/cli/core/status.py +421 -0
- daita/cli/core/test.py +239 -0
- daita/cli/core/webhooks.py +172 -0
- daita/cli/main.py +588 -0
- daita/cli/utils.py +541 -0
- daita/config/__init__.py +62 -0
- daita/config/base.py +159 -0
- daita/config/settings.py +184 -0
- daita/core/__init__.py +262 -0
- daita/core/decision_tracing.py +701 -0
- daita/core/exceptions.py +480 -0
- daita/core/focus.py +251 -0
- daita/core/interfaces.py +76 -0
- daita/core/plugin_tracing.py +550 -0
- daita/core/relay.py +695 -0
- daita/core/reliability.py +381 -0
- daita/core/scaling.py +444 -0
- daita/core/tools.py +402 -0
- daita/core/tracing.py +770 -0
- daita/core/workflow.py +1084 -0
- daita/display/__init__.py +1 -0
- daita/display/console.py +160 -0
- daita/execution/__init__.py +58 -0
- daita/execution/client.py +856 -0
- daita/execution/exceptions.py +92 -0
- daita/execution/models.py +317 -0
- daita/llm/__init__.py +60 -0
- daita/llm/anthropic.py +166 -0
- daita/llm/base.py +373 -0
- daita/llm/factory.py +101 -0
- daita/llm/gemini.py +152 -0
- daita/llm/grok.py +114 -0
- daita/llm/mock.py +135 -0
- daita/llm/openai.py +109 -0
- daita/plugins/__init__.py +141 -0
- daita/plugins/base.py +37 -0
- daita/plugins/base_db.py +167 -0
- daita/plugins/elasticsearch.py +844 -0
- daita/plugins/mcp.py +481 -0
- daita/plugins/mongodb.py +510 -0
- daita/plugins/mysql.py +351 -0
- daita/plugins/postgresql.py +331 -0
- daita/plugins/redis_messaging.py +500 -0
- daita/plugins/rest.py +529 -0
- daita/plugins/s3.py +761 -0
- daita/plugins/slack.py +729 -0
- daita/utils/__init__.py +18 -0
- daita_agents-0.1.0.dist-info/METADATA +350 -0
- daita_agents-0.1.0.dist-info/RECORD +69 -0
- daita_agents-0.1.0.dist-info/WHEEL +5 -0
- daita_agents-0.1.0.dist-info/entry_points.txt +2 -0
- daita_agents-0.1.0.dist-info/licenses/LICENSE +56 -0
- daita_agents-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,856 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DaitaClient - Primary interface for autonomous agent execution.
|
|
3
|
+
|
|
4
|
+
This client provides a clean, developer-friendly API for programmatically
|
|
5
|
+
executing deployed agents and workflows. It handles authentication, retry
|
|
6
|
+
logic, and error handling automatically.
|
|
7
|
+
|
|
8
|
+
Sync-First Design:
|
|
9
|
+
All primary methods are synchronous by default. Async methods available
|
|
10
|
+
with _async suffix for advanced users who need async/await patterns.
|
|
11
|
+
|
|
12
|
+
Example Usage:
|
|
13
|
+
from daita import DaitaClient
|
|
14
|
+
|
|
15
|
+
# Initialize with user's existing API key
|
|
16
|
+
client = DaitaClient(api_key="your_api_key")
|
|
17
|
+
|
|
18
|
+
# Execute agent and wait for completion
|
|
19
|
+
result = client.execute_agent("my_agent", data={"input": "data"}, wait=True)
|
|
20
|
+
|
|
21
|
+
# Check execution status
|
|
22
|
+
status = client.get_execution(result.execution_id)
|
|
23
|
+
|
|
24
|
+
# Wait for completion if needed
|
|
25
|
+
final_result = client.wait_for_execution(result.execution_id)
|
|
26
|
+
|
|
27
|
+
# List recent executions
|
|
28
|
+
executions = client.list_executions(limit=10)
|
|
29
|
+
|
|
30
|
+
# Get latest result for specific agent
|
|
31
|
+
latest = client.get_latest_execution(agent_name="my_agent")
|
|
32
|
+
|
|
33
|
+
Async Usage (Advanced):
|
|
34
|
+
async with DaitaClient(api_key="your_api_key") as client:
|
|
35
|
+
result = await client.execute_agent_async("my_agent")
|
|
36
|
+
status = await client.get_execution_async(result.execution_id)
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
import asyncio
|
|
40
|
+
import json
|
|
41
|
+
import os
|
|
42
|
+
import time
|
|
43
|
+
from typing import Dict, Any, Optional, List, Union
|
|
44
|
+
from datetime import datetime, timezone
|
|
45
|
+
|
|
46
|
+
import aiohttp
|
|
47
|
+
|
|
48
|
+
from .models import ExecutionResult, ScheduledTask, WebhookTrigger
|
|
49
|
+
from .exceptions import (
|
|
50
|
+
ExecutionError,
|
|
51
|
+
AuthenticationError,
|
|
52
|
+
NotFoundError,
|
|
53
|
+
ValidationError,
|
|
54
|
+
RateLimitError,
|
|
55
|
+
TimeoutError,
|
|
56
|
+
ServerError
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class DaitaClient:
|
|
61
|
+
"""
|
|
62
|
+
Primary client for autonomous agent and workflow execution.
|
|
63
|
+
|
|
64
|
+
This client provides programmatic access to deployed agents and workflows,
|
|
65
|
+
enabling users to build autonomous systems that can trigger executions
|
|
66
|
+
from external code.
|
|
67
|
+
|
|
68
|
+
Features:
|
|
69
|
+
- Clean, intuitive API with sync methods as primary interface
|
|
70
|
+
- Async methods available with _async suffix for advanced users
|
|
71
|
+
- Automatic retry logic with exponential backoff
|
|
72
|
+
- Comprehensive error handling and tracing
|
|
73
|
+
- Built on existing DAITA infrastructure
|
|
74
|
+
|
|
75
|
+
Primary Methods (Sync):
|
|
76
|
+
- execute_agent() / execute_workflow() - Run agents/workflows
|
|
77
|
+
- get_execution() - Get current status
|
|
78
|
+
- wait_for_execution() - Wait for completion
|
|
79
|
+
- list_executions() - List recent executions
|
|
80
|
+
- get_latest_execution() - Get most recent execution
|
|
81
|
+
- cancel_execution() - Cancel running execution
|
|
82
|
+
|
|
83
|
+
Advanced Methods (Async):
|
|
84
|
+
- All methods available with _async suffix for async usage
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
api_key: str,
|
|
90
|
+
api_base: Optional[str] = None,
|
|
91
|
+
timeout: int = 300,
|
|
92
|
+
max_retries: int = 3,
|
|
93
|
+
retry_delay: float = 1.0
|
|
94
|
+
):
|
|
95
|
+
"""
|
|
96
|
+
Initialize the DaitaClient.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
api_key: User's DAITA API key (same key used for CLI)
|
|
100
|
+
api_base: API base URL (defaults to production API)
|
|
101
|
+
timeout: Request timeout in seconds
|
|
102
|
+
max_retries: Maximum number of retry attempts
|
|
103
|
+
retry_delay: Base delay between retries in seconds
|
|
104
|
+
"""
|
|
105
|
+
self.api_key = api_key
|
|
106
|
+
self.api_base = api_base or os.getenv('DAITA_API_ENDPOINT', 'https://api.daita-tech.io')
|
|
107
|
+
self.timeout = timeout
|
|
108
|
+
self.max_retries = max_retries
|
|
109
|
+
self.retry_delay = retry_delay
|
|
110
|
+
|
|
111
|
+
# HTTP session for connection pooling
|
|
112
|
+
self._session: Optional[aiohttp.ClientSession] = None
|
|
113
|
+
|
|
114
|
+
async def __aenter__(self):
|
|
115
|
+
"""Async context manager entry."""
|
|
116
|
+
await self._ensure_session()
|
|
117
|
+
return self
|
|
118
|
+
|
|
119
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
120
|
+
"""Async context manager exit."""
|
|
121
|
+
if self._session:
|
|
122
|
+
await self._session.close()
|
|
123
|
+
self._session = None
|
|
124
|
+
|
|
125
|
+
async def _ensure_session(self):
|
|
126
|
+
"""Ensure HTTP session is created."""
|
|
127
|
+
if not self._session or self._session.closed:
|
|
128
|
+
self._session = aiohttp.ClientSession(
|
|
129
|
+
headers={
|
|
130
|
+
'Authorization': f'Bearer {self.api_key}',
|
|
131
|
+
'Content-Type': 'application/json',
|
|
132
|
+
'User-Agent': 'Daita-Autonomous-Client/1.0.0'
|
|
133
|
+
},
|
|
134
|
+
timeout=aiohttp.ClientTimeout(total=self.timeout)
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Core execution methods
|
|
138
|
+
|
|
139
|
+
async def run(
|
|
140
|
+
self,
|
|
141
|
+
agent_name: Optional[str] = None,
|
|
142
|
+
workflow_name: Optional[str] = None,
|
|
143
|
+
data: Dict[str, Any] = None,
|
|
144
|
+
task: str = "process",
|
|
145
|
+
context: Dict[str, Any] = None,
|
|
146
|
+
environment: str = "production",
|
|
147
|
+
wait_for_completion: bool = False,
|
|
148
|
+
poll_interval: float = 2.0
|
|
149
|
+
) -> ExecutionResult:
|
|
150
|
+
"""
|
|
151
|
+
Execute an agent or workflow programmatically.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
agent_name: Name of the agent to execute (mutually exclusive with workflow_name)
|
|
155
|
+
workflow_name: Name of the workflow to execute (mutually exclusive with agent_name)
|
|
156
|
+
data: Input data for the execution
|
|
157
|
+
task: Task to execute (for agents only)
|
|
158
|
+
context: Additional execution context
|
|
159
|
+
environment: Environment to execute in (default: production)
|
|
160
|
+
wait_for_completion: If True, wait for execution to complete
|
|
161
|
+
poll_interval: How often to poll for completion (seconds)
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
ExecutionResult containing execution details and results
|
|
165
|
+
|
|
166
|
+
Raises:
|
|
167
|
+
ValidationError: If both or neither agent_name/workflow_name are provided
|
|
168
|
+
AuthenticationError: If API key is invalid
|
|
169
|
+
NotFoundError: If agent/workflow is not found
|
|
170
|
+
ExecutionError: If execution fails
|
|
171
|
+
"""
|
|
172
|
+
# Validation
|
|
173
|
+
if not agent_name and not workflow_name:
|
|
174
|
+
raise ValidationError("Either agent_name or workflow_name must be specified")
|
|
175
|
+
if agent_name and workflow_name:
|
|
176
|
+
raise ValidationError("Cannot specify both agent_name and workflow_name")
|
|
177
|
+
|
|
178
|
+
# Prepare request
|
|
179
|
+
request_data = {
|
|
180
|
+
"data": data or {},
|
|
181
|
+
"task": task,
|
|
182
|
+
"context": context or {},
|
|
183
|
+
"environment": environment,
|
|
184
|
+
"execution_source": "autonomous_sdk",
|
|
185
|
+
"source_metadata": {
|
|
186
|
+
"client_version": "1.0.0",
|
|
187
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if agent_name:
|
|
192
|
+
request_data["agent_name"] = agent_name
|
|
193
|
+
else:
|
|
194
|
+
request_data["workflow_name"] = workflow_name
|
|
195
|
+
|
|
196
|
+
# Execute request
|
|
197
|
+
response_data = await self._make_request("POST", "/autonomous/execute", request_data)
|
|
198
|
+
result = ExecutionResult.from_dict(response_data)
|
|
199
|
+
|
|
200
|
+
# Wait for completion if requested
|
|
201
|
+
if wait_for_completion:
|
|
202
|
+
result = await self._wait_for_completion(result.execution_id, poll_interval)
|
|
203
|
+
|
|
204
|
+
return result
|
|
205
|
+
|
|
206
|
+
async def status(self, execution_id: str) -> ExecutionResult:
|
|
207
|
+
"""
|
|
208
|
+
Get the current status of an execution.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
execution_id: ID of the execution to check
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
ExecutionResult with current status and results
|
|
215
|
+
|
|
216
|
+
Raises:
|
|
217
|
+
NotFoundError: If execution is not found
|
|
218
|
+
ExecutionError: If status check fails
|
|
219
|
+
"""
|
|
220
|
+
response_data = await self._make_request("GET", f"/autonomous/executions/{execution_id}")
|
|
221
|
+
return ExecutionResult.from_dict(response_data)
|
|
222
|
+
|
|
223
|
+
async def cancel(self, execution_id: str) -> bool:
|
|
224
|
+
"""
|
|
225
|
+
Cancel a running execution.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
execution_id: ID of the execution to cancel
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
True if cancellation was successful
|
|
232
|
+
|
|
233
|
+
Raises:
|
|
234
|
+
NotFoundError: If execution is not found
|
|
235
|
+
ExecutionError: If cancellation fails
|
|
236
|
+
"""
|
|
237
|
+
try:
|
|
238
|
+
await self._make_request("DELETE", f"/autonomous/executions/{execution_id}")
|
|
239
|
+
return True
|
|
240
|
+
except Exception as e:
|
|
241
|
+
raise ExecutionError(f"Failed to cancel execution: {e}", execution_id=execution_id)
|
|
242
|
+
|
|
243
|
+
async def list_executions(
|
|
244
|
+
self,
|
|
245
|
+
limit: int = 50,
|
|
246
|
+
offset: int = 0,
|
|
247
|
+
status: Optional[str] = None,
|
|
248
|
+
target_type: Optional[str] = None,
|
|
249
|
+
environment: Optional[str] = None
|
|
250
|
+
) -> List[ExecutionResult]:
|
|
251
|
+
"""
|
|
252
|
+
List recent autonomous executions.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
limit: Maximum number of executions to return
|
|
256
|
+
offset: Number of executions to skip
|
|
257
|
+
status: Filter by status (queued, running, completed, failed, cancelled)
|
|
258
|
+
target_type: Filter by target type (agent, workflow)
|
|
259
|
+
environment: Filter by environment (production, staging)
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
List of ExecutionResult objects
|
|
263
|
+
|
|
264
|
+
Raises:
|
|
265
|
+
ExecutionError: If listing fails
|
|
266
|
+
"""
|
|
267
|
+
params = {'limit': limit, 'offset': offset}
|
|
268
|
+
if status:
|
|
269
|
+
params['status'] = status
|
|
270
|
+
if target_type:
|
|
271
|
+
params['target_type'] = target_type
|
|
272
|
+
if environment:
|
|
273
|
+
params['environment'] = environment
|
|
274
|
+
|
|
275
|
+
response_data = await self._make_request("GET", "/autonomous/executions", params=params)
|
|
276
|
+
|
|
277
|
+
# Handle both list response and paginated response
|
|
278
|
+
if isinstance(response_data, list):
|
|
279
|
+
executions = response_data
|
|
280
|
+
else:
|
|
281
|
+
executions = response_data.get('executions', response_data.get('items', []))
|
|
282
|
+
|
|
283
|
+
return [ExecutionResult.from_dict(item) for item in executions]
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
# Internal helper methods
|
|
288
|
+
|
|
289
|
+
async def _wait_for_completion(
|
|
290
|
+
self,
|
|
291
|
+
execution_id: str,
|
|
292
|
+
poll_interval: float = 2.0,
|
|
293
|
+
max_wait_time: int = 3600 # 1 hour max
|
|
294
|
+
) -> ExecutionResult:
|
|
295
|
+
"""Wait for execution to complete by polling status."""
|
|
296
|
+
start_time = time.time()
|
|
297
|
+
|
|
298
|
+
while time.time() - start_time < max_wait_time:
|
|
299
|
+
result = await self.status(execution_id)
|
|
300
|
+
|
|
301
|
+
if result.is_complete:
|
|
302
|
+
return result
|
|
303
|
+
|
|
304
|
+
await asyncio.sleep(poll_interval)
|
|
305
|
+
|
|
306
|
+
raise TimeoutError(
|
|
307
|
+
f"Execution {execution_id} did not complete within {max_wait_time} seconds",
|
|
308
|
+
timeout_seconds=max_wait_time
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
async def _make_request(
|
|
312
|
+
self,
|
|
313
|
+
method: str,
|
|
314
|
+
endpoint: str,
|
|
315
|
+
data: Optional[Dict[str, Any]] = None,
|
|
316
|
+
params: Optional[Dict[str, Any]] = None
|
|
317
|
+
) -> Dict[str, Any]:
|
|
318
|
+
"""Make HTTP request with retry logic and error handling."""
|
|
319
|
+
await self._ensure_session()
|
|
320
|
+
|
|
321
|
+
url = f"{self.api_base}/api/v1{endpoint}"
|
|
322
|
+
|
|
323
|
+
for attempt in range(self.max_retries):
|
|
324
|
+
try:
|
|
325
|
+
async with self._session.request(
|
|
326
|
+
method=method,
|
|
327
|
+
url=url,
|
|
328
|
+
json=data if method in ['POST', 'PUT', 'PATCH'] else None,
|
|
329
|
+
params=params,
|
|
330
|
+
) as response:
|
|
331
|
+
|
|
332
|
+
# Handle different response status codes
|
|
333
|
+
if response.status == 200:
|
|
334
|
+
return await response.json()
|
|
335
|
+
|
|
336
|
+
elif response.status == 401:
|
|
337
|
+
raise AuthenticationError("Invalid API key or insufficient permissions")
|
|
338
|
+
|
|
339
|
+
elif response.status == 404:
|
|
340
|
+
error_data = await self._safe_json(response)
|
|
341
|
+
detail = error_data.get('detail', 'Resource not found')
|
|
342
|
+
raise NotFoundError(detail)
|
|
343
|
+
|
|
344
|
+
elif response.status == 400:
|
|
345
|
+
error_data = await self._safe_json(response)
|
|
346
|
+
detail = error_data.get('detail', 'Bad request')
|
|
347
|
+
raise ValidationError(detail)
|
|
348
|
+
|
|
349
|
+
elif response.status == 429:
|
|
350
|
+
# Rate limited - check for retry-after header
|
|
351
|
+
retry_after = response.headers.get('retry-after')
|
|
352
|
+
if retry_after:
|
|
353
|
+
await asyncio.sleep(int(retry_after))
|
|
354
|
+
else:
|
|
355
|
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
|
356
|
+
continue
|
|
357
|
+
|
|
358
|
+
elif response.status >= 500:
|
|
359
|
+
error_data = await self._safe_json(response)
|
|
360
|
+
detail = error_data.get('detail', f'Server error (status: {response.status})')
|
|
361
|
+
|
|
362
|
+
# Retry on server errors
|
|
363
|
+
if attempt < self.max_retries - 1:
|
|
364
|
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
|
365
|
+
continue
|
|
366
|
+
else:
|
|
367
|
+
raise ServerError(detail)
|
|
368
|
+
|
|
369
|
+
else:
|
|
370
|
+
# Other client errors - don't retry
|
|
371
|
+
error_data = await self._safe_json(response)
|
|
372
|
+
detail = error_data.get('detail', f'HTTP {response.status}')
|
|
373
|
+
raise ExecutionError(detail, status_code=response.status)
|
|
374
|
+
|
|
375
|
+
except aiohttp.ClientError as e:
|
|
376
|
+
if attempt == self.max_retries - 1:
|
|
377
|
+
raise ExecutionError(f"Network error: {e}")
|
|
378
|
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
|
379
|
+
|
|
380
|
+
except asyncio.TimeoutError:
|
|
381
|
+
if attempt == self.max_retries - 1:
|
|
382
|
+
raise TimeoutError("Request timeout")
|
|
383
|
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
|
384
|
+
|
|
385
|
+
except (AuthenticationError, NotFoundError, ValidationError):
|
|
386
|
+
# Don't retry authentication, not found, or validation errors
|
|
387
|
+
raise
|
|
388
|
+
|
|
389
|
+
raise ExecutionError("Max retries exceeded")
|
|
390
|
+
|
|
391
|
+
async def _safe_json(self, response: aiohttp.ClientResponse) -> Dict[str, Any]:
|
|
392
|
+
"""Safely parse JSON response, returning empty dict if parsing fails."""
|
|
393
|
+
try:
|
|
394
|
+
return await response.json()
|
|
395
|
+
except Exception:
|
|
396
|
+
return {}
|
|
397
|
+
|
|
398
|
+
# ===========================================
|
|
399
|
+
# PRIMARY SYNC INTERFACE (Main Methods)
|
|
400
|
+
# ===========================================
|
|
401
|
+
|
|
402
|
+
def execute_agent(self, agent_name: str, data: Dict[str, Any] = None,
|
|
403
|
+
task: str = "process", context: Dict[str, Any] = None,
|
|
404
|
+
environment: str = "production", wait: bool = False) -> ExecutionResult:
|
|
405
|
+
"""
|
|
406
|
+
Execute an agent synchronously.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
agent_name: Name of the agent to execute
|
|
410
|
+
data: Input data for the agent (default: {})
|
|
411
|
+
task: Task for the agent to perform (default: "process")
|
|
412
|
+
context: Additional execution context (default: {})
|
|
413
|
+
environment: Environment to execute in (default: "production")
|
|
414
|
+
wait: If True, wait for completion before returning (default: False)
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
ExecutionResult containing execution details and agent output
|
|
418
|
+
|
|
419
|
+
Raises:
|
|
420
|
+
ValidationError: If agent_name is invalid
|
|
421
|
+
ExecutionError: If execution fails
|
|
422
|
+
TimeoutError: If wait=True and execution times out
|
|
423
|
+
|
|
424
|
+
Example:
|
|
425
|
+
# Simple execution
|
|
426
|
+
result = client.execute_agent("my_agent")
|
|
427
|
+
|
|
428
|
+
# Execute with data and wait for completion
|
|
429
|
+
result = client.execute_agent(
|
|
430
|
+
"data_processor",
|
|
431
|
+
data={"csv_file": "data.csv"},
|
|
432
|
+
wait=True
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
# Check result
|
|
436
|
+
if result.is_success:
|
|
437
|
+
print(f"Agent output: {result.result}")
|
|
438
|
+
"""
|
|
439
|
+
return asyncio.run(self.execute_agent_async(
|
|
440
|
+
agent_name=agent_name,
|
|
441
|
+
data=data,
|
|
442
|
+
task=task,
|
|
443
|
+
context=context,
|
|
444
|
+
environment=environment,
|
|
445
|
+
wait=wait
|
|
446
|
+
))
|
|
447
|
+
|
|
448
|
+
def execute_workflow(self, workflow_name: str, data: Dict[str, Any] = None,
|
|
449
|
+
context: Dict[str, Any] = None, environment: str = "production",
|
|
450
|
+
wait: bool = False) -> ExecutionResult:
|
|
451
|
+
"""
|
|
452
|
+
Execute a workflow synchronously.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
workflow_name: Name of the workflow to execute
|
|
456
|
+
data: Input data for the workflow (default: {})
|
|
457
|
+
context: Additional execution context (default: {})
|
|
458
|
+
environment: Environment to execute in (default: "production")
|
|
459
|
+
wait: If True, wait for completion before returning (default: False)
|
|
460
|
+
|
|
461
|
+
Returns:
|
|
462
|
+
ExecutionResult containing execution details and workflow output
|
|
463
|
+
|
|
464
|
+
Example:
|
|
465
|
+
result = client.execute_workflow("data_pipeline", data={"source": "s3"})
|
|
466
|
+
"""
|
|
467
|
+
return asyncio.run(self.execute_workflow_async(
|
|
468
|
+
workflow_name=workflow_name,
|
|
469
|
+
data=data,
|
|
470
|
+
context=context,
|
|
471
|
+
environment=environment,
|
|
472
|
+
wait=wait
|
|
473
|
+
))
|
|
474
|
+
|
|
475
|
+
def execute_and_wait(self, agent_name: str = None, workflow_name: str = None,
|
|
476
|
+
data: Dict[str, Any] = None, timeout: int = 300, **kwargs) -> ExecutionResult:
|
|
477
|
+
"""
|
|
478
|
+
Execute and wait for completion synchronously.
|
|
479
|
+
|
|
480
|
+
Args:
|
|
481
|
+
agent_name: Name of the agent to execute (mutually exclusive with workflow_name)
|
|
482
|
+
workflow_name: Name of the workflow to execute (mutually exclusive with agent_name)
|
|
483
|
+
data: Input data for execution
|
|
484
|
+
timeout: Maximum time to wait in seconds (default: 300)
|
|
485
|
+
**kwargs: Additional arguments passed to execution
|
|
486
|
+
|
|
487
|
+
Returns:
|
|
488
|
+
ExecutionResult with completed execution results
|
|
489
|
+
|
|
490
|
+
Example:
|
|
491
|
+
result = client.execute_and_wait("my_agent", data={"input": "test"})
|
|
492
|
+
"""
|
|
493
|
+
return asyncio.run(self.execute_and_wait_async(
|
|
494
|
+
agent_name=agent_name,
|
|
495
|
+
workflow_name=workflow_name,
|
|
496
|
+
data=data,
|
|
497
|
+
timeout=timeout,
|
|
498
|
+
**kwargs
|
|
499
|
+
))
|
|
500
|
+
|
|
501
|
+
def get_execution(self, execution_id: str) -> ExecutionResult:
|
|
502
|
+
"""
|
|
503
|
+
Get current execution status and results.
|
|
504
|
+
|
|
505
|
+
Args:
|
|
506
|
+
execution_id: ID of the execution to check
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
ExecutionResult with current status and results
|
|
510
|
+
|
|
511
|
+
Example:
|
|
512
|
+
result = client.get_execution("exec_123")
|
|
513
|
+
print(f"Status: {result.status}")
|
|
514
|
+
"""
|
|
515
|
+
return asyncio.run(self.get_execution_async(execution_id))
|
|
516
|
+
|
|
517
|
+
def wait_for_execution(self, execution_id: str, timeout: int = 300,
|
|
518
|
+
poll_interval: float = 2.0) -> ExecutionResult:
|
|
519
|
+
"""
|
|
520
|
+
Wait for execution to complete.
|
|
521
|
+
|
|
522
|
+
Args:
|
|
523
|
+
execution_id: ID of the execution to wait for
|
|
524
|
+
timeout: Maximum time to wait in seconds (default: 300)
|
|
525
|
+
poll_interval: How often to poll for completion in seconds (default: 2.0)
|
|
526
|
+
|
|
527
|
+
Returns:
|
|
528
|
+
ExecutionResult with completed execution results
|
|
529
|
+
|
|
530
|
+
Example:
|
|
531
|
+
result = client.wait_for_execution("exec_123", timeout=60)
|
|
532
|
+
"""
|
|
533
|
+
return asyncio.run(self.wait_for_execution_async(
|
|
534
|
+
execution_id=execution_id,
|
|
535
|
+
timeout=timeout,
|
|
536
|
+
poll_interval=poll_interval
|
|
537
|
+
))
|
|
538
|
+
|
|
539
|
+
def cancel_execution(self, execution_id: str) -> bool:
|
|
540
|
+
"""
|
|
541
|
+
Cancel a running execution.
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
execution_id: ID of the execution to cancel
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
True if cancellation was successful
|
|
548
|
+
|
|
549
|
+
Example:
|
|
550
|
+
success = client.cancel_execution("exec_123")
|
|
551
|
+
"""
|
|
552
|
+
return asyncio.run(self.cancel_execution_async(execution_id))
|
|
553
|
+
|
|
554
|
+
def list_executions(self, limit: int = 50, offset: int = 0,
|
|
555
|
+
status: str = None, target_type: str = None,
|
|
556
|
+
environment: str = None) -> List[ExecutionResult]:
|
|
557
|
+
"""
|
|
558
|
+
List recent executions with filtering.
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
limit: Maximum number of executions to return (default: 50)
|
|
562
|
+
offset: Number of executions to skip (default: 0)
|
|
563
|
+
status: Filter by status (queued, running, completed, failed, cancelled)
|
|
564
|
+
target_type: Filter by target type (agent, workflow)
|
|
565
|
+
environment: Filter by environment (production, staging)
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
List of ExecutionResult objects
|
|
569
|
+
|
|
570
|
+
Example:
|
|
571
|
+
executions = client.list_executions(limit=10, status="completed")
|
|
572
|
+
"""
|
|
573
|
+
# Call the original async method directly
|
|
574
|
+
async def _list():
|
|
575
|
+
params = {'limit': limit, 'offset': offset}
|
|
576
|
+
if status:
|
|
577
|
+
params['status'] = status
|
|
578
|
+
if target_type:
|
|
579
|
+
params['target_type'] = target_type
|
|
580
|
+
if environment:
|
|
581
|
+
params['environment'] = environment
|
|
582
|
+
|
|
583
|
+
response_data = await self._make_request("GET", "/autonomous/executions", params=params)
|
|
584
|
+
|
|
585
|
+
# Handle both list response and paginated response
|
|
586
|
+
if isinstance(response_data, list):
|
|
587
|
+
executions = response_data
|
|
588
|
+
else:
|
|
589
|
+
executions = response_data.get('executions', response_data.get('items', []))
|
|
590
|
+
|
|
591
|
+
return [ExecutionResult.from_dict(item) for item in executions]
|
|
592
|
+
|
|
593
|
+
return asyncio.run(_list())
|
|
594
|
+
|
|
595
|
+
def get_latest_execution(self, agent_name: str = None, workflow_name: str = None,
|
|
596
|
+
environment: str = "production") -> Optional[ExecutionResult]:
|
|
597
|
+
"""
|
|
598
|
+
Get the most recent execution for an agent or workflow.
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
agent_name: Name of the agent (mutually exclusive with workflow_name)
|
|
602
|
+
workflow_name: Name of the workflow (mutually exclusive with agent_name)
|
|
603
|
+
environment: Environment to filter by (default: "production")
|
|
604
|
+
|
|
605
|
+
Returns:
|
|
606
|
+
ExecutionResult with the latest execution, or None if no executions found
|
|
607
|
+
|
|
608
|
+
Example:
|
|
609
|
+
latest = client.get_latest_execution(agent_name="my_agent")
|
|
610
|
+
if latest:
|
|
611
|
+
print(f"Latest result: {latest.result}")
|
|
612
|
+
"""
|
|
613
|
+
# Call the original async method directly
|
|
614
|
+
async def _get_latest():
|
|
615
|
+
# Validation
|
|
616
|
+
if not agent_name and not workflow_name:
|
|
617
|
+
raise ValidationError("Either agent_name or workflow_name must be specified")
|
|
618
|
+
if agent_name and workflow_name:
|
|
619
|
+
raise ValidationError("Cannot specify both agent_name and workflow_name")
|
|
620
|
+
|
|
621
|
+
# Get recent executions for the target
|
|
622
|
+
target_type = "agent" if agent_name else "workflow"
|
|
623
|
+
|
|
624
|
+
# Use the original async list_executions method
|
|
625
|
+
params = {'limit': 10, 'target_type': target_type, 'environment': environment}
|
|
626
|
+
response_data = await self._make_request("GET", "/autonomous/executions", params=params)
|
|
627
|
+
|
|
628
|
+
# Handle both list response and paginated response
|
|
629
|
+
if isinstance(response_data, list):
|
|
630
|
+
executions_data = response_data
|
|
631
|
+
else:
|
|
632
|
+
executions_data = response_data.get('executions', response_data.get('items', []))
|
|
633
|
+
|
|
634
|
+
executions = [ExecutionResult.from_dict(item) for item in executions_data]
|
|
635
|
+
|
|
636
|
+
# Filter by specific agent/workflow name if needed
|
|
637
|
+
target_name = agent_name or workflow_name
|
|
638
|
+
for execution in executions:
|
|
639
|
+
if execution.target_name == target_name:
|
|
640
|
+
return execution
|
|
641
|
+
|
|
642
|
+
return None
|
|
643
|
+
|
|
644
|
+
return asyncio.run(_get_latest())
|
|
645
|
+
|
|
646
|
+
# ===========================================
|
|
647
|
+
# ASYNC VERSIONS (For Advanced Users)
|
|
648
|
+
# ===========================================
|
|
649
|
+
|
|
650
|
+
async def execute_agent_async(self, agent_name: str, data: Dict[str, Any] = None,
|
|
651
|
+
task: str = "process", context: Dict[str, Any] = None,
|
|
652
|
+
environment: str = "production", wait: bool = False) -> ExecutionResult:
|
|
653
|
+
"""Execute an agent asynchronously."""
|
|
654
|
+
return await self.run(
|
|
655
|
+
agent_name=agent_name,
|
|
656
|
+
data=data,
|
|
657
|
+
task=task,
|
|
658
|
+
context=context,
|
|
659
|
+
environment=environment,
|
|
660
|
+
wait_for_completion=wait
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
async def execute_workflow_async(self, workflow_name: str, data: Dict[str, Any] = None,
|
|
664
|
+
context: Dict[str, Any] = None, environment: str = "production",
|
|
665
|
+
wait: bool = False) -> ExecutionResult:
|
|
666
|
+
"""Execute a workflow asynchronously."""
|
|
667
|
+
return await self.run(
|
|
668
|
+
workflow_name=workflow_name,
|
|
669
|
+
data=data,
|
|
670
|
+
context=context,
|
|
671
|
+
environment=environment,
|
|
672
|
+
wait_for_completion=wait
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
async def execute_and_wait_async(self, agent_name: str = None, workflow_name: str = None,
|
|
676
|
+
data: Dict[str, Any] = None, timeout: int = 300, **kwargs) -> ExecutionResult:
|
|
677
|
+
"""Execute and wait for completion asynchronously."""
|
|
678
|
+
return await self.run(
|
|
679
|
+
agent_name=agent_name,
|
|
680
|
+
workflow_name=workflow_name,
|
|
681
|
+
data=data,
|
|
682
|
+
wait_for_completion=True,
|
|
683
|
+
**kwargs
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
async def get_execution_async(self, execution_id: str) -> ExecutionResult:
|
|
687
|
+
"""Get execution status asynchronously."""
|
|
688
|
+
return await self.status(execution_id)
|
|
689
|
+
|
|
690
|
+
async def wait_for_execution_async(self, execution_id: str, timeout: int = 300,
|
|
691
|
+
poll_interval: float = 2.0) -> ExecutionResult:
|
|
692
|
+
"""Wait for execution completion asynchronously."""
|
|
693
|
+
return await self._wait_for_completion(execution_id, poll_interval, timeout)
|
|
694
|
+
|
|
695
|
+
async def cancel_execution_async(self, execution_id: str) -> bool:
|
|
696
|
+
"""Cancel execution asynchronously."""
|
|
697
|
+
return await self.cancel(execution_id)
|
|
698
|
+
|
|
699
|
+
async def list_executions_async(self, limit: int = 50, offset: int = 0,
|
|
700
|
+
status: str = None, target_type: str = None,
|
|
701
|
+
environment: str = None) -> List[ExecutionResult]:
|
|
702
|
+
"""List executions asynchronously."""
|
|
703
|
+
# Use the original async method
|
|
704
|
+
params = {'limit': limit, 'offset': offset}
|
|
705
|
+
if status:
|
|
706
|
+
params['status'] = status
|
|
707
|
+
if target_type:
|
|
708
|
+
params['target_type'] = target_type
|
|
709
|
+
if environment:
|
|
710
|
+
params['environment'] = environment
|
|
711
|
+
|
|
712
|
+
response_data = await self._make_request("GET", "/autonomous/executions", params=params)
|
|
713
|
+
|
|
714
|
+
# Handle both list response and paginated response
|
|
715
|
+
if isinstance(response_data, list):
|
|
716
|
+
executions = response_data
|
|
717
|
+
else:
|
|
718
|
+
executions = response_data.get('executions', response_data.get('items', []))
|
|
719
|
+
|
|
720
|
+
return [ExecutionResult.from_dict(item) for item in executions]
|
|
721
|
+
|
|
722
|
+
async def get_latest_execution_async(self, agent_name: str = None, workflow_name: str = None,
|
|
723
|
+
environment: str = "production") -> Optional[ExecutionResult]:
|
|
724
|
+
"""Get latest execution asynchronously."""
|
|
725
|
+
# Validation
|
|
726
|
+
if not agent_name and not workflow_name:
|
|
727
|
+
raise ValidationError("Either agent_name or workflow_name must be specified")
|
|
728
|
+
if agent_name and workflow_name:
|
|
729
|
+
raise ValidationError("Cannot specify both agent_name and workflow_name")
|
|
730
|
+
|
|
731
|
+
# Get recent executions for the target
|
|
732
|
+
target_type = "agent" if agent_name else "workflow"
|
|
733
|
+
executions = await self.list_executions_async(
|
|
734
|
+
limit=10, # Get more to filter by name
|
|
735
|
+
target_type=target_type,
|
|
736
|
+
environment=environment
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
# Filter by specific agent/workflow name if needed
|
|
740
|
+
target_name = agent_name or workflow_name
|
|
741
|
+
for execution in executions:
|
|
742
|
+
if execution.target_name == target_name:
|
|
743
|
+
return execution
|
|
744
|
+
|
|
745
|
+
return None
|
|
746
|
+
|
|
747
|
+
# Cleanup
|
|
748
|
+
def close(self):
|
|
749
|
+
"""Close the HTTP session (for sync usage)."""
|
|
750
|
+
if self._session and not self._session.closed:
|
|
751
|
+
asyncio.run(self._session.close())
|
|
752
|
+
|
|
753
|
+
def __del__(self):
|
|
754
|
+
"""Cleanup on deletion."""
|
|
755
|
+
try:
|
|
756
|
+
self.close()
|
|
757
|
+
except Exception:
|
|
758
|
+
pass # Ignore cleanup errors
|
|
759
|
+
|
|
760
|
+
|
|
761
|
+
# ===========================================
|
|
762
|
+
# CONVENIENCE FUNCTIONS (One-off executions)
|
|
763
|
+
# ===========================================
|
|
764
|
+
|
|
765
|
+
async def execute_agent(
|
|
766
|
+
api_key: str,
|
|
767
|
+
agent_name: str,
|
|
768
|
+
data: Dict[str, Any] = None,
|
|
769
|
+
**kwargs
|
|
770
|
+
) -> ExecutionResult:
|
|
771
|
+
"""
|
|
772
|
+
Execute an agent with a one-off client (async).
|
|
773
|
+
|
|
774
|
+
Args:
|
|
775
|
+
api_key: DAITA API key
|
|
776
|
+
agent_name: Name of the agent to execute
|
|
777
|
+
data: Input data for the execution
|
|
778
|
+
**kwargs: Additional arguments passed to execute_agent_async()
|
|
779
|
+
|
|
780
|
+
Returns:
|
|
781
|
+
ExecutionResult containing execution details
|
|
782
|
+
"""
|
|
783
|
+
async with DaitaClient(api_key) as client:
|
|
784
|
+
return await client.execute_agent_async(agent_name, data, **kwargs)
|
|
785
|
+
|
|
786
|
+
|
|
787
|
+
async def execute_workflow(
|
|
788
|
+
api_key: str,
|
|
789
|
+
workflow_name: str,
|
|
790
|
+
data: Dict[str, Any] = None,
|
|
791
|
+
**kwargs
|
|
792
|
+
) -> ExecutionResult:
|
|
793
|
+
"""
|
|
794
|
+
Execute a workflow with a one-off client (async).
|
|
795
|
+
|
|
796
|
+
Args:
|
|
797
|
+
api_key: DAITA API key
|
|
798
|
+
workflow_name: Name of the workflow to execute
|
|
799
|
+
data: Input data for the execution
|
|
800
|
+
**kwargs: Additional arguments passed to execute_workflow_async()
|
|
801
|
+
|
|
802
|
+
Returns:
|
|
803
|
+
ExecutionResult containing execution details
|
|
804
|
+
"""
|
|
805
|
+
async with DaitaClient(api_key) as client:
|
|
806
|
+
return await client.execute_workflow_async(workflow_name, data, **kwargs)
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
# Synchronous convenience functions (primary interface)
|
|
810
|
+
|
|
811
|
+
def execute_agent_standalone(
|
|
812
|
+
api_key: str,
|
|
813
|
+
agent_name: str,
|
|
814
|
+
data: Dict[str, Any] = None,
|
|
815
|
+
**kwargs
|
|
816
|
+
) -> ExecutionResult:
|
|
817
|
+
"""
|
|
818
|
+
Execute an agent with a one-off client.
|
|
819
|
+
|
|
820
|
+
Args:
|
|
821
|
+
api_key: DAITA API key
|
|
822
|
+
agent_name: Name of the agent to execute
|
|
823
|
+
data: Input data for the execution
|
|
824
|
+
**kwargs: Additional arguments
|
|
825
|
+
|
|
826
|
+
Returns:
|
|
827
|
+
ExecutionResult containing execution details
|
|
828
|
+
|
|
829
|
+
Example:
|
|
830
|
+
result = execute_agent_standalone("sk-123", "my_agent", data={"input": "test"})
|
|
831
|
+
"""
|
|
832
|
+
return asyncio.run(execute_agent(api_key, agent_name, data, **kwargs))
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
def execute_workflow_standalone(
|
|
836
|
+
api_key: str,
|
|
837
|
+
workflow_name: str,
|
|
838
|
+
data: Dict[str, Any] = None,
|
|
839
|
+
**kwargs
|
|
840
|
+
) -> ExecutionResult:
|
|
841
|
+
"""
|
|
842
|
+
Execute a workflow with a one-off client.
|
|
843
|
+
|
|
844
|
+
Args:
|
|
845
|
+
api_key: DAITA API key
|
|
846
|
+
workflow_name: Name of the workflow to execute
|
|
847
|
+
data: Input data for the execution
|
|
848
|
+
**kwargs: Additional arguments
|
|
849
|
+
|
|
850
|
+
Returns:
|
|
851
|
+
ExecutionResult containing execution details
|
|
852
|
+
|
|
853
|
+
Example:
|
|
854
|
+
result = execute_workflow_standalone("sk-123", "data_pipeline", data={"source": "s3"})
|
|
855
|
+
"""
|
|
856
|
+
return asyncio.run(execute_workflow(api_key, workflow_name, data, **kwargs))
|