edda-framework 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edda/__init__.py +56 -0
- edda/activity.py +505 -0
- edda/app.py +996 -0
- edda/compensation.py +326 -0
- edda/context.py +489 -0
- edda/events.py +505 -0
- edda/exceptions.py +64 -0
- edda/hooks.py +284 -0
- edda/locking.py +322 -0
- edda/outbox/__init__.py +15 -0
- edda/outbox/relayer.py +274 -0
- edda/outbox/transactional.py +112 -0
- edda/pydantic_utils.py +316 -0
- edda/replay.py +799 -0
- edda/retry.py +207 -0
- edda/serialization/__init__.py +9 -0
- edda/serialization/base.py +83 -0
- edda/serialization/json.py +102 -0
- edda/storage/__init__.py +9 -0
- edda/storage/models.py +194 -0
- edda/storage/protocol.py +737 -0
- edda/storage/sqlalchemy_storage.py +1809 -0
- edda/viewer_ui/__init__.py +20 -0
- edda/viewer_ui/app.py +1399 -0
- edda/viewer_ui/components.py +1105 -0
- edda/viewer_ui/data_service.py +880 -0
- edda/visualizer/__init__.py +11 -0
- edda/visualizer/ast_analyzer.py +383 -0
- edda/visualizer/mermaid_generator.py +355 -0
- edda/workflow.py +218 -0
- edda_framework-0.1.0.dist-info/METADATA +748 -0
- edda_framework-0.1.0.dist-info/RECORD +35 -0
- edda_framework-0.1.0.dist-info/WHEEL +4 -0
- edda_framework-0.1.0.dist-info/entry_points.txt +2 -0
- edda_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,880 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Data service for retrieving workflow instance data from storage.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from edda.pydantic_utils import is_pydantic_model
|
|
11
|
+
from edda.storage.protocol import StorageProtocol
|
|
12
|
+
from edda.workflow import get_all_workflows
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class WorkflowDataService:
|
|
18
|
+
"""Service for retrieving workflow instance data using StorageProtocol."""
|
|
19
|
+
|
|
20
|
+
def __init__(self, storage: StorageProtocol):
|
|
21
|
+
"""
|
|
22
|
+
Initialize data service.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
storage: Storage instance implementing StorageProtocol
|
|
26
|
+
"""
|
|
27
|
+
self.storage = storage
|
|
28
|
+
|
|
29
|
+
async def get_all_instances(self, limit: int = 50) -> list[dict[str, Any]]:
|
|
30
|
+
"""
|
|
31
|
+
Get all workflow instances.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
limit: Maximum number of instances to return
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
List of workflow instance dictionaries
|
|
38
|
+
"""
|
|
39
|
+
instances = await self.storage.list_instances(limit=limit)
|
|
40
|
+
return instances
|
|
41
|
+
|
|
42
|
+
async def get_workflow_compensations(self, instance_id: str) -> dict[str, dict[str, Any]]:
|
|
43
|
+
"""
|
|
44
|
+
Get registered compensations for a workflow instance.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
instance_id: Workflow instance ID
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Dictionary mapping activity_id to compensation info:
|
|
51
|
+
{activity_id: {"activity_name": str, "args": dict}}
|
|
52
|
+
"""
|
|
53
|
+
compensations_list = await self.storage.get_compensations(instance_id)
|
|
54
|
+
|
|
55
|
+
# Create a mapping of activity_id -> compensation info for quick lookup
|
|
56
|
+
compensations_map: dict[str, dict[str, Any]] = {}
|
|
57
|
+
for comp in compensations_list:
|
|
58
|
+
activity_id = comp.get("activity_id")
|
|
59
|
+
if activity_id is not None:
|
|
60
|
+
compensations_map[activity_id] = {
|
|
61
|
+
"activity_name": comp.get("activity_name"),
|
|
62
|
+
"args": comp.get("args", {}),
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return compensations_map
|
|
66
|
+
|
|
67
|
+
async def get_instance_detail(self, instance_id: str) -> dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Get detailed information about a workflow instance.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
instance_id: Workflow instance ID
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Dictionary containing instance, history, and compensation data
|
|
76
|
+
"""
|
|
77
|
+
# Get instance basic info using StorageProtocol
|
|
78
|
+
instance = await self.storage.get_instance(instance_id)
|
|
79
|
+
|
|
80
|
+
if not instance:
|
|
81
|
+
return {"instance": None, "history": []}
|
|
82
|
+
|
|
83
|
+
# Get execution history using StorageProtocol
|
|
84
|
+
history_rows = await self.storage.get_history(instance_id)
|
|
85
|
+
|
|
86
|
+
# Transform workflow_history format to viewer format
|
|
87
|
+
history = []
|
|
88
|
+
workflow_name = instance.get("workflow_name", "unknown")
|
|
89
|
+
|
|
90
|
+
for row in history_rows:
|
|
91
|
+
# event_data is already parsed as dict by StorageProtocol
|
|
92
|
+
event_data = row.get("event_data", {})
|
|
93
|
+
|
|
94
|
+
# Determine status from event_type
|
|
95
|
+
event_type = row.get("event_type", "")
|
|
96
|
+
if event_type == "ActivityCompleted":
|
|
97
|
+
status = "completed"
|
|
98
|
+
elif event_type == "ActivityFailed":
|
|
99
|
+
status = "failed"
|
|
100
|
+
elif event_type == "CompensationExecuted":
|
|
101
|
+
status = "compensated"
|
|
102
|
+
elif event_type == "CompensationFailed":
|
|
103
|
+
status = "compensation_failed"
|
|
104
|
+
elif event_type == "EventReceived":
|
|
105
|
+
status = "event_received"
|
|
106
|
+
else:
|
|
107
|
+
status = "running"
|
|
108
|
+
|
|
109
|
+
# Prepare activity name with prefix for compensation
|
|
110
|
+
activity_id = row.get("activity_id")
|
|
111
|
+
activity_name = event_data.get("activity_name", activity_id or "unknown")
|
|
112
|
+
if event_type in ("CompensationExecuted", "CompensationFailed"):
|
|
113
|
+
activity_name = f"Compensate: {activity_name}"
|
|
114
|
+
|
|
115
|
+
history.append(
|
|
116
|
+
{
|
|
117
|
+
"activity_id": activity_id,
|
|
118
|
+
"workflow_name": workflow_name,
|
|
119
|
+
"activity_name": activity_name,
|
|
120
|
+
"status": status,
|
|
121
|
+
"input_data": json.dumps(event_data.get("input", event_data.get("kwargs", {}))),
|
|
122
|
+
"output_data": json.dumps(event_data.get("result")),
|
|
123
|
+
"executed_at": row.get("created_at"),
|
|
124
|
+
"error": event_data.get("error_message"), # Fixed: use correct field name
|
|
125
|
+
"error_type": event_data.get("error_type"),
|
|
126
|
+
"stack_trace": event_data.get("stack_trace"),
|
|
127
|
+
}
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Get compensation information
|
|
131
|
+
compensations = await self.get_workflow_compensations(instance_id)
|
|
132
|
+
|
|
133
|
+
return {
|
|
134
|
+
"instance": instance,
|
|
135
|
+
"history": history,
|
|
136
|
+
"compensations": compensations,
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
async def get_activity_detail(
|
|
140
|
+
self, instance_id: str, activity_id: str
|
|
141
|
+
) -> dict[str, Any] | None:
|
|
142
|
+
"""
|
|
143
|
+
Get detailed information about a specific activity execution.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
instance_id: Workflow instance ID
|
|
147
|
+
activity_id: Activity ID
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Activity detail dictionary or None if not found
|
|
151
|
+
"""
|
|
152
|
+
# Get full history and find the specific activity
|
|
153
|
+
history = await self.storage.get_history(instance_id)
|
|
154
|
+
|
|
155
|
+
# Find the activity in history
|
|
156
|
+
activity_row = None
|
|
157
|
+
for row in history:
|
|
158
|
+
if row.get("activity_id") == activity_id:
|
|
159
|
+
activity_row = row
|
|
160
|
+
break
|
|
161
|
+
|
|
162
|
+
if not activity_row:
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
# event_data is already parsed as dict by StorageProtocol
|
|
166
|
+
event_data = activity_row.get("event_data", {})
|
|
167
|
+
|
|
168
|
+
# Determine status from event_type
|
|
169
|
+
event_type = activity_row.get("event_type", "")
|
|
170
|
+
if event_type == "ActivityCompleted":
|
|
171
|
+
status = "completed"
|
|
172
|
+
elif event_type == "ActivityFailed":
|
|
173
|
+
status = "failed"
|
|
174
|
+
elif event_type == "CompensationExecuted":
|
|
175
|
+
status = "compensated"
|
|
176
|
+
elif event_type == "CompensationFailed":
|
|
177
|
+
status = "compensation_failed"
|
|
178
|
+
else:
|
|
179
|
+
status = "running"
|
|
180
|
+
|
|
181
|
+
# Parse input and output
|
|
182
|
+
# For compensations, input is in 'kwargs', for activities it's in 'input'
|
|
183
|
+
if event_type in ("CompensationExecuted", "CompensationFailed"):
|
|
184
|
+
input_data = event_data.get("kwargs", {})
|
|
185
|
+
output_data = None # Compensations don't record output
|
|
186
|
+
else:
|
|
187
|
+
input_data = event_data.get("input", {})
|
|
188
|
+
output_data = event_data.get("result")
|
|
189
|
+
|
|
190
|
+
return {
|
|
191
|
+
"activity_id": activity_id,
|
|
192
|
+
"activity_name": event_data.get("activity_name", activity_id or "unknown"),
|
|
193
|
+
"status": status,
|
|
194
|
+
"input": input_data,
|
|
195
|
+
"output": output_data,
|
|
196
|
+
"executed_at": activity_row.get("created_at"),
|
|
197
|
+
"error": event_data.get(
|
|
198
|
+
"error_message"
|
|
199
|
+
), # Fixed: was "error", should be "error_message"
|
|
200
|
+
"error_type": event_data.get("error_type"),
|
|
201
|
+
"stack_trace": event_data.get("stack_trace"),
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
def get_workflow_source(self, workflow_name: str) -> str | None:
|
|
205
|
+
"""
|
|
206
|
+
Get source code for a workflow by name.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
workflow_name: Name of the workflow
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Source code as string, or None if not found or error occurred
|
|
213
|
+
"""
|
|
214
|
+
try:
|
|
215
|
+
# Get all registered workflows
|
|
216
|
+
all_workflows = get_all_workflows()
|
|
217
|
+
|
|
218
|
+
# Find the workflow by name
|
|
219
|
+
if workflow_name not in all_workflows:
|
|
220
|
+
return None
|
|
221
|
+
|
|
222
|
+
workflow = all_workflows[workflow_name]
|
|
223
|
+
|
|
224
|
+
# Get source code from the workflow function
|
|
225
|
+
source_code = inspect.getsource(workflow.func)
|
|
226
|
+
return source_code
|
|
227
|
+
|
|
228
|
+
except (OSError, TypeError) as e:
|
|
229
|
+
# OSError: source not available (e.g., interactive shell)
|
|
230
|
+
# TypeError: not a module, class, method, function, etc.
|
|
231
|
+
print(f"Warning: Could not get source for {workflow_name}: {e}")
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
async def get_activity_executions(
|
|
235
|
+
self, instance_id: str, activity_name: str
|
|
236
|
+
) -> list[dict[str, Any]]:
|
|
237
|
+
"""
|
|
238
|
+
Get all executions of a specific activity (for activities executed multiple times).
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
instance_id: Workflow instance ID
|
|
242
|
+
activity_name: Activity name
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
List of execution details, ordered by execution time
|
|
246
|
+
"""
|
|
247
|
+
# Get full history
|
|
248
|
+
history = await self.storage.get_history(instance_id)
|
|
249
|
+
|
|
250
|
+
executions = []
|
|
251
|
+
for row in history:
|
|
252
|
+
event_data = row.get("event_data", {})
|
|
253
|
+
if event_data.get("activity_name") == activity_name:
|
|
254
|
+
# Determine status from event_type
|
|
255
|
+
event_type = row.get("event_type", "")
|
|
256
|
+
if event_type == "ActivityCompleted":
|
|
257
|
+
status = "completed"
|
|
258
|
+
elif event_type == "ActivityFailed":
|
|
259
|
+
status = "failed"
|
|
260
|
+
else:
|
|
261
|
+
status = "running"
|
|
262
|
+
|
|
263
|
+
executions.append(
|
|
264
|
+
{
|
|
265
|
+
"activity_id": row.get("activity_id"),
|
|
266
|
+
"activity_name": activity_name,
|
|
267
|
+
"status": status,
|
|
268
|
+
"input": event_data.get("input", {}),
|
|
269
|
+
"output": event_data.get("result"),
|
|
270
|
+
"executed_at": row.get("created_at"),
|
|
271
|
+
"error": event_data.get(
|
|
272
|
+
"error_message"
|
|
273
|
+
), # Fixed: was "error", should be "error_message"
|
|
274
|
+
"error_type": event_data.get("error_type"),
|
|
275
|
+
"stack_trace": event_data.get("stack_trace"),
|
|
276
|
+
}
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
# Already sorted by execution time (created_at) via get_history()
|
|
280
|
+
return executions
|
|
281
|
+
|
|
282
|
+
async def cancel_workflow(self, instance_id: str, edda_app_url: str) -> tuple[bool, str]:
|
|
283
|
+
"""
|
|
284
|
+
Cancel a workflow via EddaApp API.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
instance_id: Workflow instance ID
|
|
288
|
+
edda_app_url: EddaApp API base URL (e.g., "http://localhost:8001")
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Tuple of (success: bool, message: str)
|
|
292
|
+
"""
|
|
293
|
+
import httpx
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
print(f"[Cancel] Attempting to cancel workflow: {instance_id}")
|
|
297
|
+
print(f"[Cancel] API URL: {edda_app_url}/cancel/{instance_id}")
|
|
298
|
+
|
|
299
|
+
async with httpx.AsyncClient() as client:
|
|
300
|
+
response = await client.post(
|
|
301
|
+
f"{edda_app_url}/cancel/{instance_id}",
|
|
302
|
+
timeout=10.0,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
print(f"[Cancel] Response status: {response.status_code}")
|
|
306
|
+
print(f"[Cancel] Response body: {response.text}")
|
|
307
|
+
|
|
308
|
+
if 200 <= response.status_code < 300:
|
|
309
|
+
return True, "Workflow cancelled successfully"
|
|
310
|
+
elif response.status_code == 400:
|
|
311
|
+
error_msg = response.json().get("error", "Unknown error")
|
|
312
|
+
return False, f"Cannot cancel: {error_msg}"
|
|
313
|
+
elif response.status_code == 404:
|
|
314
|
+
return False, "Workflow not found"
|
|
315
|
+
else:
|
|
316
|
+
return False, f"Server error: HTTP {response.status_code}"
|
|
317
|
+
|
|
318
|
+
except httpx.ConnectError as e:
|
|
319
|
+
error_msg = f"Cannot connect to EddaApp at {edda_app_url}. Is it running?"
|
|
320
|
+
print(f"[Cancel] Connection error: {e}")
|
|
321
|
+
return False, error_msg
|
|
322
|
+
|
|
323
|
+
except httpx.TimeoutException as e:
|
|
324
|
+
error_msg = "Request timed out. The server may be busy."
|
|
325
|
+
print(f"[Cancel] Timeout error: {e}")
|
|
326
|
+
return False, error_msg
|
|
327
|
+
|
|
328
|
+
except Exception as e:
|
|
329
|
+
error_msg = f"Unexpected error: {type(e).__name__}: {str(e)}"
|
|
330
|
+
print(f"[Cancel] Unexpected error: {e}")
|
|
331
|
+
return False, error_msg
|
|
332
|
+
|
|
333
|
+
def get_all_workflows(self) -> dict[str, Any]:
|
|
334
|
+
"""
|
|
335
|
+
Get all registered workflows.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
Dictionary mapping workflow name to workflow instance
|
|
339
|
+
"""
|
|
340
|
+
from edda.workflow import get_all_workflows
|
|
341
|
+
|
|
342
|
+
return get_all_workflows()
|
|
343
|
+
|
|
344
|
+
def get_workflow_parameters(self, workflow_name: str) -> dict[str, Any]:
|
|
345
|
+
"""
|
|
346
|
+
Extract parameter information from a workflow's function signature.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
workflow_name: Name of the workflow
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
Dictionary mapping parameter name to parameter info:
|
|
353
|
+
{
|
|
354
|
+
"param_name": {
|
|
355
|
+
"name": str,
|
|
356
|
+
"type": str, # "int", "str", "float", "bool", "enum", "list", "dict", "json"
|
|
357
|
+
"required": bool,
|
|
358
|
+
"default": Any | None,
|
|
359
|
+
# Enum specific
|
|
360
|
+
"enum_class": Type[Enum] | None,
|
|
361
|
+
"enum_values": list[tuple[str, Any]],
|
|
362
|
+
# list specific
|
|
363
|
+
"item_type": str | None, # "str", "int", "enum", "dict", etc.
|
|
364
|
+
"item_enum_class": Type[Enum] | None,
|
|
365
|
+
# dict specific
|
|
366
|
+
"key_type": str | None,
|
|
367
|
+
"value_type": str | None,
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
"""
|
|
371
|
+
from typing import get_args, get_origin
|
|
372
|
+
|
|
373
|
+
all_workflows = self.get_all_workflows()
|
|
374
|
+
if workflow_name not in all_workflows:
|
|
375
|
+
return {}
|
|
376
|
+
|
|
377
|
+
workflow = all_workflows[workflow_name]
|
|
378
|
+
sig = inspect.signature(workflow.func)
|
|
379
|
+
|
|
380
|
+
params_info = {}
|
|
381
|
+
for param_name, param in sig.parameters.items():
|
|
382
|
+
# Skip WorkflowContext parameter (usually the first parameter)
|
|
383
|
+
if (
|
|
384
|
+
param.annotation.__name__ == "WorkflowContext"
|
|
385
|
+
if hasattr(param.annotation, "__name__")
|
|
386
|
+
else False
|
|
387
|
+
):
|
|
388
|
+
continue
|
|
389
|
+
|
|
390
|
+
if param.annotation == inspect.Parameter.empty:
|
|
391
|
+
# No annotation, fallback to JSON
|
|
392
|
+
params_info[param_name] = {
|
|
393
|
+
"name": param_name,
|
|
394
|
+
"type": "json",
|
|
395
|
+
"required": param.default == inspect.Parameter.empty,
|
|
396
|
+
"default": param.default if param.default != inspect.Parameter.empty else None,
|
|
397
|
+
}
|
|
398
|
+
continue
|
|
399
|
+
|
|
400
|
+
# Get the type annotation
|
|
401
|
+
annotation = param.annotation
|
|
402
|
+
|
|
403
|
+
# Handle typing.Optional[T] and Union[T, None] → extract T
|
|
404
|
+
annotation_type_str = str(type(annotation))
|
|
405
|
+
origin = get_origin(annotation)
|
|
406
|
+
origin_str = str(origin) if origin else ""
|
|
407
|
+
|
|
408
|
+
# Python 3.10+ uses types.UnionType (no __origin__ attribute)
|
|
409
|
+
# typing.Union uses __origin__ = typing.Union
|
|
410
|
+
is_union = (
|
|
411
|
+
"UnionType" in annotation_type_str or "Union" in origin_str or origin is type(None)
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
if is_union:
|
|
415
|
+
# Optional[T] is Union[T, None]
|
|
416
|
+
args = get_args(annotation)
|
|
417
|
+
if args:
|
|
418
|
+
# Get the first non-None type
|
|
419
|
+
annotation = next((arg for arg in args if arg is not type(None)), annotation)
|
|
420
|
+
origin = get_origin(annotation)
|
|
421
|
+
|
|
422
|
+
# Extract type information
|
|
423
|
+
param_info = self._extract_type_info(annotation, origin)
|
|
424
|
+
param_info["name"] = param_name
|
|
425
|
+
param_info["required"] = param.default == inspect.Parameter.empty
|
|
426
|
+
param_info["default"] = (
|
|
427
|
+
param.default if param.default != inspect.Parameter.empty else None
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
params_info[param_name] = param_info
|
|
431
|
+
|
|
432
|
+
# If there's only one parameter and it's a Pydantic model,
|
|
433
|
+
# expand its fields to provide individual form inputs instead of JSON textarea
|
|
434
|
+
if len(params_info) == 1:
|
|
435
|
+
single_param_name, single_param_info = next(iter(params_info.items()))
|
|
436
|
+
if single_param_info.get("type") == "pydantic":
|
|
437
|
+
# Try to expand Pydantic model fields
|
|
438
|
+
expanded_params = self._expand_pydantic_fields(
|
|
439
|
+
single_param_info["json_schema"],
|
|
440
|
+
single_param_info.get("required", True),
|
|
441
|
+
single_param_info.get("model_class"),
|
|
442
|
+
)
|
|
443
|
+
# If expansion succeeded (has fields), use expanded params
|
|
444
|
+
# and store original model info for reconstruction
|
|
445
|
+
if expanded_params:
|
|
446
|
+
# Mark all expanded params with original model name for reconstruction
|
|
447
|
+
for field_info in expanded_params.values():
|
|
448
|
+
field_info["_pydantic_model_name"] = single_param_name
|
|
449
|
+
field_info["_pydantic_model_class"] = single_param_info.get("model_class")
|
|
450
|
+
return expanded_params
|
|
451
|
+
|
|
452
|
+
return params_info
|
|
453
|
+
|
|
454
|
+
def _extract_type_info(self, annotation: Any, origin: Any = None) -> dict[str, Any]:
|
|
455
|
+
"""
|
|
456
|
+
Extract detailed type information from annotation.
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
annotation: Type annotation
|
|
460
|
+
origin: Result of typing.get_origin(annotation)
|
|
461
|
+
|
|
462
|
+
Returns:
|
|
463
|
+
Dictionary with type information
|
|
464
|
+
"""
|
|
465
|
+
from enum import Enum
|
|
466
|
+
from typing import get_args, get_origin
|
|
467
|
+
|
|
468
|
+
if origin is None:
|
|
469
|
+
origin = get_origin(annotation)
|
|
470
|
+
|
|
471
|
+
# Check if it's a Pydantic model
|
|
472
|
+
if is_pydantic_model(annotation):
|
|
473
|
+
try:
|
|
474
|
+
# Generate JSON Schema from Pydantic model
|
|
475
|
+
schema = annotation.model_json_schema()
|
|
476
|
+
return {
|
|
477
|
+
"type": "pydantic",
|
|
478
|
+
"model_class": annotation,
|
|
479
|
+
"model_name": annotation.__name__,
|
|
480
|
+
"json_schema": schema,
|
|
481
|
+
}
|
|
482
|
+
except Exception as e:
|
|
483
|
+
# Fallback to JSON if schema generation fails
|
|
484
|
+
print(f"Warning: Failed to generate JSON Schema for {annotation}: {e}")
|
|
485
|
+
return {"type": "json"}
|
|
486
|
+
|
|
487
|
+
# Check if it's an Enum
|
|
488
|
+
if inspect.isclass(annotation) and issubclass(annotation, Enum):
|
|
489
|
+
return {
|
|
490
|
+
"type": "enum",
|
|
491
|
+
"enum_class": annotation,
|
|
492
|
+
"enum_values": [(member.name, member.value) for member in annotation],
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
# Check if it's a list
|
|
496
|
+
if origin is list:
|
|
497
|
+
args = get_args(annotation)
|
|
498
|
+
if args:
|
|
499
|
+
item_type_info = self._get_simple_type_name(args[0])
|
|
500
|
+
return {
|
|
501
|
+
"type": "list",
|
|
502
|
+
"item_type": item_type_info["type"],
|
|
503
|
+
"item_enum_class": item_type_info.get("enum_class"),
|
|
504
|
+
"item_enum_values": item_type_info.get("enum_values"),
|
|
505
|
+
}
|
|
506
|
+
else:
|
|
507
|
+
# list without type parameter, fallback to JSON
|
|
508
|
+
return {"type": "json"}
|
|
509
|
+
|
|
510
|
+
# Check if it's a dict
|
|
511
|
+
if origin is dict:
|
|
512
|
+
args = get_args(annotation)
|
|
513
|
+
if args and len(args) >= 2:
|
|
514
|
+
key_type_info = self._get_simple_type_name(args[0])
|
|
515
|
+
value_type_info = self._get_simple_type_name(args[1])
|
|
516
|
+
return {
|
|
517
|
+
"type": "dict",
|
|
518
|
+
"key_type": key_type_info["type"],
|
|
519
|
+
"value_type": value_type_info["type"],
|
|
520
|
+
}
|
|
521
|
+
else:
|
|
522
|
+
# dict without type parameters, fallback to JSON
|
|
523
|
+
return {"type": "json"}
|
|
524
|
+
|
|
525
|
+
# Basic types
|
|
526
|
+
type_str = getattr(annotation, "__name__", str(annotation))
|
|
527
|
+
if type_str in ("int", "str", "float", "bool"):
|
|
528
|
+
return {"type": type_str}
|
|
529
|
+
|
|
530
|
+
# Fallback to JSON for unknown types
|
|
531
|
+
return {"type": "json"}
|
|
532
|
+
|
|
533
|
+
def _get_simple_type_name(self, annotation: Any) -> dict[str, Any]:
|
|
534
|
+
"""
|
|
535
|
+
Get simple type name for list/dict item types.
|
|
536
|
+
|
|
537
|
+
Args:
|
|
538
|
+
annotation: Type annotation
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
Dictionary with type info
|
|
542
|
+
"""
|
|
543
|
+
from enum import Enum
|
|
544
|
+
from typing import get_origin
|
|
545
|
+
|
|
546
|
+
origin = get_origin(annotation)
|
|
547
|
+
|
|
548
|
+
# Check if it's a Pydantic model
|
|
549
|
+
if is_pydantic_model(annotation):
|
|
550
|
+
return {
|
|
551
|
+
"type": "pydantic",
|
|
552
|
+
"model_name": annotation.__name__,
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
# Check if it's an Enum
|
|
556
|
+
if inspect.isclass(annotation) and issubclass(annotation, Enum):
|
|
557
|
+
return {
|
|
558
|
+
"type": "enum",
|
|
559
|
+
"enum_class": annotation,
|
|
560
|
+
"enum_values": [(member.name, member.value) for member in annotation],
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
# Check if it's dict (for list[dict])
|
|
564
|
+
if origin is dict:
|
|
565
|
+
return {"type": "dict"}
|
|
566
|
+
|
|
567
|
+
# Basic types
|
|
568
|
+
type_str = getattr(annotation, "__name__", str(annotation))
|
|
569
|
+
if type_str in ("int", "str", "float", "bool"):
|
|
570
|
+
return {"type": type_str}
|
|
571
|
+
|
|
572
|
+
# Fallback to JSON
|
|
573
|
+
return {"type": "json"}
|
|
574
|
+
|
|
575
|
+
def _resolve_ref(self, schema: dict[str, Any], ref_path: str) -> dict[str, Any]:
|
|
576
|
+
"""
|
|
577
|
+
Resolve JSON Schema $ref reference.
|
|
578
|
+
|
|
579
|
+
Pydantic v2 uses $defs for nested models:
|
|
580
|
+
{"$ref": "#/$defs/ShippingAddress"}
|
|
581
|
+
|
|
582
|
+
Args:
|
|
583
|
+
schema: Full JSON Schema containing $defs
|
|
584
|
+
ref_path: Reference path like "#/$defs/ShippingAddress"
|
|
585
|
+
|
|
586
|
+
Returns:
|
|
587
|
+
Resolved schema definition
|
|
588
|
+
"""
|
|
589
|
+
if not ref_path.startswith("#/"):
|
|
590
|
+
# Only local refs supported
|
|
591
|
+
logger.warning(f"Unsupported $ref: {ref_path} (only local refs supported)")
|
|
592
|
+
return {}
|
|
593
|
+
|
|
594
|
+
# Remove "#/" and split by "/"
|
|
595
|
+
parts = ref_path[2:].split("/")
|
|
596
|
+
current = schema
|
|
597
|
+
for part in parts:
|
|
598
|
+
if isinstance(current, dict):
|
|
599
|
+
current = current.get(part, {})
|
|
600
|
+
else:
|
|
601
|
+
logger.warning(f"Cannot resolve $ref part '{part}' in {ref_path}")
|
|
602
|
+
return {}
|
|
603
|
+
|
|
604
|
+
return current if isinstance(current, dict) else {}
|
|
605
|
+
|
|
606
|
+
def _expand_pydantic_fields(
|
|
607
|
+
self,
|
|
608
|
+
json_schema: dict[str, Any],
|
|
609
|
+
_is_required: bool,
|
|
610
|
+
model_class: Any = None,
|
|
611
|
+
_parent_field_name: str | None = None,
|
|
612
|
+
nested_level: int = 0,
|
|
613
|
+
root_schema: dict[str, Any] | None = None,
|
|
614
|
+
) -> dict[str, Any]:
|
|
615
|
+
"""
|
|
616
|
+
Expand Pydantic model fields from JSON Schema to individual parameters.
|
|
617
|
+
|
|
618
|
+
Args:
|
|
619
|
+
json_schema: Pydantic model's JSON Schema (can be nested schema)
|
|
620
|
+
is_required: Whether the original parameter was required
|
|
621
|
+
model_class: The Pydantic model class (for Enum extraction)
|
|
622
|
+
parent_field_name: Parent field name for nested models
|
|
623
|
+
nested_level: Nesting depth (0 = root, 1 = nested, 2+ = fallback to JSON)
|
|
624
|
+
root_schema: Top-level schema for $ref resolution (same as json_schema at root)
|
|
625
|
+
|
|
626
|
+
Returns:
|
|
627
|
+
Dictionary mapping field name to field info, or empty dict if no flat fields
|
|
628
|
+
"""
|
|
629
|
+
from enum import Enum
|
|
630
|
+
|
|
631
|
+
# Set root_schema at the beginning if not provided (root level)
|
|
632
|
+
if root_schema is None:
|
|
633
|
+
root_schema = json_schema
|
|
634
|
+
|
|
635
|
+
expanded = {}
|
|
636
|
+
|
|
637
|
+
# Extract properties from JSON Schema
|
|
638
|
+
properties = json_schema.get("properties", {})
|
|
639
|
+
required_fields = set(json_schema.get("required", []))
|
|
640
|
+
|
|
641
|
+
for field_name, field_schema in properties.items():
|
|
642
|
+
field_type = field_schema.get("type")
|
|
643
|
+
field_schema.get("format")
|
|
644
|
+
|
|
645
|
+
# Determine parameter type
|
|
646
|
+
param_info: dict[str, Any] = {
|
|
647
|
+
"name": field_name,
|
|
648
|
+
"required": field_name in required_fields,
|
|
649
|
+
"default": field_schema.get("default"),
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
# First, try to get Enum from model_class annotations (most reliable)
|
|
653
|
+
enum_class = None
|
|
654
|
+
if model_class and hasattr(model_class, "__annotations__"):
|
|
655
|
+
field_annotation = model_class.__annotations__.get(field_name)
|
|
656
|
+
if (
|
|
657
|
+
field_annotation
|
|
658
|
+
and inspect.isclass(field_annotation)
|
|
659
|
+
and issubclass(field_annotation, Enum)
|
|
660
|
+
):
|
|
661
|
+
enum_class = field_annotation
|
|
662
|
+
|
|
663
|
+
# Check for Enum (JSON Schema: {"enum": [...]})
|
|
664
|
+
if enum_class or "enum" in field_schema:
|
|
665
|
+
if enum_class:
|
|
666
|
+
param_info["type"] = "enum"
|
|
667
|
+
param_info["enum_class"] = enum_class
|
|
668
|
+
param_info["enum_values"] = [
|
|
669
|
+
(member.name, member.value) for member in enum_class
|
|
670
|
+
]
|
|
671
|
+
elif "enum" in field_schema:
|
|
672
|
+
# Fallback: treat as str with choices
|
|
673
|
+
param_info["type"] = "str"
|
|
674
|
+
param_info["enum_values"] = field_schema["enum"]
|
|
675
|
+
|
|
676
|
+
# Basic types
|
|
677
|
+
elif field_type == "string":
|
|
678
|
+
param_info["type"] = "str"
|
|
679
|
+
elif field_type == "integer":
|
|
680
|
+
param_info["type"] = "int"
|
|
681
|
+
elif field_type == "number":
|
|
682
|
+
param_info["type"] = "float"
|
|
683
|
+
elif field_type == "boolean":
|
|
684
|
+
param_info["type"] = "bool"
|
|
685
|
+
|
|
686
|
+
# Nested models - expand if 1 level deep, otherwise JSON textarea
|
|
687
|
+
elif (
|
|
688
|
+
field_type == "object"
|
|
689
|
+
or "anyOf" in field_schema
|
|
690
|
+
or "allOf" in field_schema
|
|
691
|
+
or "$ref" in field_schema
|
|
692
|
+
):
|
|
693
|
+
# Nested Pydantic model (e.g., ShippingAddress)
|
|
694
|
+
# Can be represented as {"type": "object"} or {"$ref": "#/$defs/..."} or {"allOf": [...]}
|
|
695
|
+
|
|
696
|
+
# Resolve $ref if present (use root_schema for $defs lookup)
|
|
697
|
+
nested_schema = field_schema
|
|
698
|
+
if "$ref" in field_schema:
|
|
699
|
+
resolved = self._resolve_ref(root_schema, field_schema["$ref"])
|
|
700
|
+
if resolved:
|
|
701
|
+
nested_schema = resolved
|
|
702
|
+
else:
|
|
703
|
+
# Failed to resolve - fallback to JSON textarea
|
|
704
|
+
param_info["type"] = "json"
|
|
705
|
+
param_info["json_schema"] = field_schema
|
|
706
|
+
param_info["description"] = field_schema.get(
|
|
707
|
+
"description", f"JSON object for {field_name}"
|
|
708
|
+
)
|
|
709
|
+
expanded[field_name] = param_info
|
|
710
|
+
continue
|
|
711
|
+
|
|
712
|
+
# Check nesting level: 0 = root, 1 = nested (expand), 2+ = too deep (JSON textarea)
|
|
713
|
+
if nested_level >= 1:
|
|
714
|
+
# Too deep (2+ levels) - fallback to JSON textarea
|
|
715
|
+
param_info["type"] = "json"
|
|
716
|
+
param_info["json_schema"] = field_schema
|
|
717
|
+
param_info["description"] = field_schema.get(
|
|
718
|
+
"description", f"JSON object for {field_name} (nested)"
|
|
719
|
+
)
|
|
720
|
+
else:
|
|
721
|
+
# Level 0 → 1: Recursively expand nested model fields
|
|
722
|
+
nested_expanded = self._expand_pydantic_fields(
|
|
723
|
+
json_schema=nested_schema,
|
|
724
|
+
_is_required=field_name in required_fields,
|
|
725
|
+
model_class=None, # No model_class for nested (can't extract Enum reliably)
|
|
726
|
+
_parent_field_name=field_name,
|
|
727
|
+
nested_level=nested_level + 1,
|
|
728
|
+
root_schema=root_schema, # Pass root schema for $ref resolution
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
if nested_expanded:
|
|
732
|
+
# Successfully expanded - add all nested fields with metadata
|
|
733
|
+
for nested_field_name, nested_field_info in nested_expanded.items():
|
|
734
|
+
# Mark with parent field name and nested level for reconstruction
|
|
735
|
+
nested_field_info["_parent_field"] = field_name
|
|
736
|
+
nested_field_info["_nested_level"] = nested_level + 1
|
|
737
|
+
# Add nested field with qualified name (parent.child)
|
|
738
|
+
qualified_name = f"{field_name}.{nested_field_name}"
|
|
739
|
+
expanded[qualified_name] = nested_field_info
|
|
740
|
+
# Skip adding the parent field itself (it's been expanded)
|
|
741
|
+
continue
|
|
742
|
+
else:
|
|
743
|
+
# No fields expanded - fallback to JSON textarea
|
|
744
|
+
param_info["type"] = "json"
|
|
745
|
+
param_info["json_schema"] = field_schema
|
|
746
|
+
param_info["description"] = field_schema.get(
|
|
747
|
+
"description", f"JSON object for {field_name}"
|
|
748
|
+
)
|
|
749
|
+
elif field_type == "array":
|
|
750
|
+
items_schema = field_schema.get("items", {})
|
|
751
|
+
|
|
752
|
+
# Resolve $ref if present
|
|
753
|
+
if "$ref" in items_schema:
|
|
754
|
+
resolved_items_schema = self._resolve_ref(root_schema, items_schema["$ref"])
|
|
755
|
+
if resolved_items_schema:
|
|
756
|
+
items_schema = resolved_items_schema
|
|
757
|
+
|
|
758
|
+
# Check if items is a Pydantic model (object with properties)
|
|
759
|
+
if items_schema.get("type") == "object" and "properties" in items_schema:
|
|
760
|
+
# list[PydanticModel] - expand as dynamic list
|
|
761
|
+
param_info["type"] = "list_of_pydantic"
|
|
762
|
+
param_info["json_schema"] = field_schema
|
|
763
|
+
param_info["item_schema"] = items_schema
|
|
764
|
+
|
|
765
|
+
# Recursively expand nested model fields
|
|
766
|
+
item_fields = self._expand_pydantic_fields(
|
|
767
|
+
json_schema=items_schema,
|
|
768
|
+
_is_required=True, # Items in list are required
|
|
769
|
+
model_class=None,
|
|
770
|
+
_parent_field_name=None,
|
|
771
|
+
nested_level=0, # Reset nesting level for list items
|
|
772
|
+
root_schema=root_schema,
|
|
773
|
+
)
|
|
774
|
+
param_info["item_fields"] = item_fields
|
|
775
|
+
param_info["description"] = field_schema.get(
|
|
776
|
+
"description", f"Dynamic list of {items_schema.get('title', 'items')}"
|
|
777
|
+
)
|
|
778
|
+
else:
|
|
779
|
+
# list[str], list[int], list[dict], etc. - fallback to JSON textarea
|
|
780
|
+
param_info["type"] = "json"
|
|
781
|
+
param_info["json_schema"] = field_schema
|
|
782
|
+
items_type = items_schema.get("type", "object")
|
|
783
|
+
param_info["description"] = field_schema.get(
|
|
784
|
+
"description", f"JSON array for {field_name} (items: {items_type})"
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
# Unknown type - skip
|
|
788
|
+
else:
|
|
789
|
+
continue
|
|
790
|
+
|
|
791
|
+
# Add field to expanded params
|
|
792
|
+
expanded[field_name] = param_info
|
|
793
|
+
|
|
794
|
+
return expanded
|
|
795
|
+
|
|
796
|
+
async def start_workflow(
|
|
797
|
+
self, workflow_name: str, params: dict[str, Any], edda_app_url: str
|
|
798
|
+
) -> tuple[bool, str, str | None]:
|
|
799
|
+
"""
|
|
800
|
+
Start a workflow by sending CloudEvent to EddaApp.
|
|
801
|
+
|
|
802
|
+
This method creates a CloudEvent and sends it to EddaApp,
|
|
803
|
+
which will trigger the workflow execution.
|
|
804
|
+
|
|
805
|
+
Args:
|
|
806
|
+
workflow_name: Name of the workflow to start
|
|
807
|
+
params: Parameters to pass to the workflow
|
|
808
|
+
edda_app_url: EddaApp API base URL (e.g., "http://localhost:8001")
|
|
809
|
+
|
|
810
|
+
Returns:
|
|
811
|
+
Tuple of (success: bool, message: str, instance_id: str | None)
|
|
812
|
+
"""
|
|
813
|
+
import uuid
|
|
814
|
+
|
|
815
|
+
import httpx
|
|
816
|
+
from cloudevents.conversion import to_structured
|
|
817
|
+
from cloudevents.http import CloudEvent
|
|
818
|
+
|
|
819
|
+
try:
|
|
820
|
+
print(f"[StartWorkflow] Attempting to start workflow: {workflow_name}")
|
|
821
|
+
print(f"[StartWorkflow] Sending CloudEvent to: {edda_app_url}")
|
|
822
|
+
print(f"[StartWorkflow] Params: {params}")
|
|
823
|
+
|
|
824
|
+
# Verify workflow exists
|
|
825
|
+
all_workflows = self.get_all_workflows()
|
|
826
|
+
if workflow_name not in all_workflows:
|
|
827
|
+
return False, f"Workflow '{workflow_name}' not found in registry", None
|
|
828
|
+
|
|
829
|
+
# Create CloudEvent
|
|
830
|
+
attributes = {
|
|
831
|
+
"type": workflow_name, # Workflow name is the event type
|
|
832
|
+
"source": "edda.viewer",
|
|
833
|
+
"id": str(uuid.uuid4()),
|
|
834
|
+
}
|
|
835
|
+
event = CloudEvent(attributes, data=params)
|
|
836
|
+
|
|
837
|
+
# Convert to HTTP format (structured content mode)
|
|
838
|
+
headers, body = to_structured(event)
|
|
839
|
+
|
|
840
|
+
print(f"[StartWorkflow] CloudEvent ID: {attributes['id']}")
|
|
841
|
+
print(f"[StartWorkflow] CloudEvent type: {workflow_name}")
|
|
842
|
+
|
|
843
|
+
# Send CloudEvent to EddaApp
|
|
844
|
+
async with httpx.AsyncClient() as client:
|
|
845
|
+
response = await client.post(
|
|
846
|
+
edda_app_url,
|
|
847
|
+
headers=headers,
|
|
848
|
+
content=body,
|
|
849
|
+
timeout=10.0,
|
|
850
|
+
)
|
|
851
|
+
|
|
852
|
+
print(f"[StartWorkflow] Response status: {response.status_code}")
|
|
853
|
+
print(f"[StartWorkflow] Response body: {response.text}")
|
|
854
|
+
|
|
855
|
+
if 200 <= response.status_code < 300:
|
|
856
|
+
# CloudEvent accepted (200 OK or 202 Accepted)
|
|
857
|
+
# Note: We can't get instance_id from response because EddaApp
|
|
858
|
+
# returns immediately after accepting the event.
|
|
859
|
+
# The workflow will be executed asynchronously.
|
|
860
|
+
return True, f"Workflow '{workflow_name}' started successfully", None
|
|
861
|
+
else:
|
|
862
|
+
return False, f"Server error: HTTP {response.status_code}", None
|
|
863
|
+
|
|
864
|
+
except httpx.ConnectError as e:
|
|
865
|
+
error_msg = f"Cannot connect to EddaApp at {edda_app_url}. Is it running?"
|
|
866
|
+
print(f"[StartWorkflow] Connection error: {e}")
|
|
867
|
+
return False, error_msg, None
|
|
868
|
+
|
|
869
|
+
except httpx.TimeoutException as e:
|
|
870
|
+
error_msg = "Request timed out. The server may be busy."
|
|
871
|
+
print(f"[StartWorkflow] Timeout error: {e}")
|
|
872
|
+
return False, error_msg, None
|
|
873
|
+
|
|
874
|
+
except Exception as e:
|
|
875
|
+
error_msg = f"Unexpected error: {type(e).__name__}: {str(e)}"
|
|
876
|
+
print(f"[StartWorkflow] Unexpected error: {e}")
|
|
877
|
+
import traceback
|
|
878
|
+
|
|
879
|
+
traceback.print_exc()
|
|
880
|
+
return False, error_msg, None
|