pysfi 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pysfi-0.1.12.dist-info → pysfi-0.1.14.dist-info}/METADATA +1 -1
- pysfi-0.1.14.dist-info/RECORD +68 -0
- {pysfi-0.1.12.dist-info → pysfi-0.1.14.dist-info}/entry_points.txt +3 -0
- sfi/__init__.py +19 -2
- sfi/alarmclock/__init__.py +3 -0
- sfi/alarmclock/alarmclock.py +23 -40
- sfi/bumpversion/__init__.py +3 -1
- sfi/bumpversion/bumpversion.py +64 -15
- sfi/cleanbuild/__init__.py +3 -0
- sfi/cleanbuild/cleanbuild.py +5 -1
- sfi/cli.py +25 -4
- sfi/condasetup/__init__.py +1 -0
- sfi/condasetup/condasetup.py +91 -76
- sfi/docdiff/__init__.py +1 -0
- sfi/docdiff/docdiff.py +3 -2
- sfi/docscan/__init__.py +1 -1
- sfi/docscan/docscan.py +78 -23
- sfi/docscan/docscan_gui.py +152 -48
- sfi/filedate/filedate.py +12 -5
- sfi/img2pdf/img2pdf.py +453 -0
- sfi/llmclient/llmclient.py +31 -8
- sfi/llmquantize/llmquantize.py +76 -37
- sfi/llmserver/__init__.py +1 -0
- sfi/llmserver/llmserver.py +63 -13
- sfi/makepython/makepython.py +1145 -201
- sfi/pdfsplit/pdfsplit.py +45 -12
- sfi/pyarchive/__init__.py +1 -0
- sfi/pyarchive/pyarchive.py +908 -278
- sfi/pyembedinstall/pyembedinstall.py +88 -89
- sfi/pylibpack/pylibpack.py +561 -463
- sfi/pyloadergen/pyloadergen.py +372 -218
- sfi/pypack/pypack.py +510 -959
- sfi/pyprojectparse/pyprojectparse.py +337 -40
- sfi/pysourcepack/__init__.py +1 -0
- sfi/pysourcepack/pysourcepack.py +210 -131
- sfi/quizbase/quizbase_gui.py +2 -2
- sfi/taskkill/taskkill.py +168 -59
- sfi/which/which.py +11 -3
- pysfi-0.1.12.dist-info/RECORD +0 -62
- sfi/workflowengine/workflowengine.py +0 -444
- {pysfi-0.1.12.dist-info → pysfi-0.1.14.dist-info}/WHEEL +0 -0
- /sfi/{workflowengine → img2pdf}/__init__.py +0 -0
|
@@ -1,444 +0,0 @@
|
|
|
1
|
-
"""Workflow Engine - A flexible async task orchestration system.
|
|
2
|
-
|
|
3
|
-
This module provides a comprehensive workflow engine for managing
|
|
4
|
-
complex task dependencies with support for I/O tasks, CPU-intensive tasks,
|
|
5
|
-
serial tasks, and parallel task execution.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from __future__ import annotations
|
|
9
|
-
|
|
10
|
-
import asyncio
|
|
11
|
-
import time
|
|
12
|
-
from abc import ABC, abstractmethod
|
|
13
|
-
from collections import defaultdict, deque
|
|
14
|
-
from dataclasses import dataclass
|
|
15
|
-
from enum import Enum
|
|
16
|
-
from typing import Any, Callable, Sequence
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class TaskStatus(Enum):
|
|
20
|
-
"""Task status enumeration"""
|
|
21
|
-
|
|
22
|
-
PENDING = "pending"
|
|
23
|
-
READY = "ready"
|
|
24
|
-
RUNNING = "running"
|
|
25
|
-
COMPLETED = "completed"
|
|
26
|
-
FAILED = "failed"
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class TaskType(Enum):
|
|
30
|
-
"""Task type enumeration"""
|
|
31
|
-
|
|
32
|
-
SERIAL = "serial" # Serial task
|
|
33
|
-
PARALLEL = "parallel" # Parallel task
|
|
34
|
-
ASYNC = "async" # Async I/O task
|
|
35
|
-
CPU = "cpu" # CPU-intensive task
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
@dataclass
|
|
39
|
-
class TaskResult:
|
|
40
|
-
"""Task execution result"""
|
|
41
|
-
|
|
42
|
-
task_id: str
|
|
43
|
-
success: bool
|
|
44
|
-
data: Any
|
|
45
|
-
execution_time: float
|
|
46
|
-
error: Exception | None = None
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
class Task(ABC):
|
|
50
|
-
"""Task abstract base class"""
|
|
51
|
-
|
|
52
|
-
def __init__(self, task_id: str, task_type: TaskType, dependencies: list[str] | None = None, timeout: float = 30.0):
|
|
53
|
-
self.task_id = task_id
|
|
54
|
-
self.task_type = task_type
|
|
55
|
-
self.dependencies = dependencies or []
|
|
56
|
-
self.timeout = timeout
|
|
57
|
-
self.status = TaskStatus.PENDING
|
|
58
|
-
self.result: TaskResult | None = None
|
|
59
|
-
self.start_time: float | None = None
|
|
60
|
-
self.end_time: float | None = None
|
|
61
|
-
|
|
62
|
-
def get_dependencies(self) -> list[str]:
|
|
63
|
-
"""Get list of dependent task IDs"""
|
|
64
|
-
return self.dependencies.copy()
|
|
65
|
-
|
|
66
|
-
def can_execute(self, completed_tasks: set[str]) -> bool:
|
|
67
|
-
"""Check if task can be executed (dependencies satisfied)"""
|
|
68
|
-
return all(dep in completed_tasks for dep in self.dependencies)
|
|
69
|
-
|
|
70
|
-
def update_status(self, status: TaskStatus):
|
|
71
|
-
"""Update task status"""
|
|
72
|
-
self.status = status
|
|
73
|
-
|
|
74
|
-
@abstractmethod
|
|
75
|
-
async def execute(self, context: dict[str, TaskResult]) -> Any:
|
|
76
|
-
"""Execute task logic, must be implemented by subclasses"""
|
|
77
|
-
pass
|
|
78
|
-
|
|
79
|
-
def get_execution_time(self) -> float:
|
|
80
|
-
"""Get task execution time"""
|
|
81
|
-
if self.start_time and self.end_time:
|
|
82
|
-
return self.end_time - self.start_time
|
|
83
|
-
return 0.0
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
class IOTask(Task):
|
|
87
|
-
"""I/O-intensive task"""
|
|
88
|
-
|
|
89
|
-
def __init__(self, task_id: str, duration: float, dependencies: list[str] | None = None, timeout: float = 30.0):
|
|
90
|
-
super().__init__(task_id, TaskType.ASYNC, dependencies, timeout)
|
|
91
|
-
self.duration = duration
|
|
92
|
-
|
|
93
|
-
async def execute(self, context: dict[str, TaskResult]) -> Any:
|
|
94
|
-
"""Simulate I/O operation"""
|
|
95
|
-
print(f"[IO] Starting task {self.task_id}, estimated duration: {self.duration}s")
|
|
96
|
-
return await asyncio.wait_for(self._execute_io(context), timeout=self.timeout)
|
|
97
|
-
|
|
98
|
-
async def _execute_io(self, context: dict[str, TaskResult]) -> Any:
|
|
99
|
-
"""Internal I/O execution method"""
|
|
100
|
-
await asyncio.sleep(self.duration)
|
|
101
|
-
return f"IO task {self.task_id} completed, dependencies: {list(context.keys())}"
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
class CPUTask(Task):
|
|
105
|
-
"""CPU-intensive task"""
|
|
106
|
-
|
|
107
|
-
def __init__(self, task_id: str, iterations: int, dependencies: list[str] | None = None, timeout: float = 30.0):
|
|
108
|
-
super().__init__(task_id, TaskType.CPU, dependencies, timeout)
|
|
109
|
-
self.iterations = iterations
|
|
110
|
-
|
|
111
|
-
async def execute(self, context: dict[str, TaskResult]) -> Any:
|
|
112
|
-
"""CPU-intensive computation task"""
|
|
113
|
-
print(f"[CPU] Starting task {self.task_id}, iterations: {self.iterations}")
|
|
114
|
-
|
|
115
|
-
# Move CPU-intensive task to thread pool to avoid blocking event loop
|
|
116
|
-
def cpu_intensive_work():
|
|
117
|
-
result = 0
|
|
118
|
-
for i in range(self.iterations):
|
|
119
|
-
result += i * i
|
|
120
|
-
return result
|
|
121
|
-
|
|
122
|
-
# Use asyncio.wait_for with timeout to prevent infinite hangs
|
|
123
|
-
# Use run_in_executor for Python 3.8 compatibility (asyncio.to_thread is Python 3.9+)
|
|
124
|
-
loop = asyncio.get_event_loop()
|
|
125
|
-
result = await asyncio.wait_for(loop.run_in_executor(None, cpu_intensive_work), timeout=self.timeout)
|
|
126
|
-
return f"CPU task {self.task_id} completed, result: {result}"
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
class SerialTask(Task):
|
|
130
|
-
"""Serial task (stateful, must execute sequentially)"""
|
|
131
|
-
|
|
132
|
-
def __init__(
|
|
133
|
-
self, task_id: str, process_func: Callable, dependencies: list[str] | None = None, timeout: float = 30.0
|
|
134
|
-
):
|
|
135
|
-
super().__init__(task_id, TaskType.SERIAL, dependencies, timeout)
|
|
136
|
-
self.process_func = process_func
|
|
137
|
-
self.state = {}
|
|
138
|
-
|
|
139
|
-
async def execute(self, context: dict[str, TaskResult]) -> Any:
|
|
140
|
-
"""Execute serial task"""
|
|
141
|
-
print(f"[Serial] Starting serial task {self.task_id}")
|
|
142
|
-
|
|
143
|
-
# Collect results from dependent tasks
|
|
144
|
-
inputs = {dep_id: context[dep_id].data for dep_id in self.dependencies}
|
|
145
|
-
|
|
146
|
-
# Execute process function
|
|
147
|
-
if asyncio.iscoroutinefunction(self.process_func):
|
|
148
|
-
result = await self.process_func(inputs, self.state)
|
|
149
|
-
else:
|
|
150
|
-
result = self.process_func(inputs, self.state)
|
|
151
|
-
|
|
152
|
-
# Update state
|
|
153
|
-
self.state = {"last_result": result, "executed": True}
|
|
154
|
-
|
|
155
|
-
return f"Serial task {self.task_id} completed, result: {result}"
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
class ParallelTask(Task):
|
|
159
|
-
"""Parallel task (can execute concurrently with other tasks)"""
|
|
160
|
-
|
|
161
|
-
def __init__(
|
|
162
|
-
self,
|
|
163
|
-
task_id: str,
|
|
164
|
-
subtasks: Sequence[Task],
|
|
165
|
-
dependencies: list[str] | None = None,
|
|
166
|
-
timeout: float = 30.0,
|
|
167
|
-
max_concurrent: int = 3,
|
|
168
|
-
):
|
|
169
|
-
super().__init__(task_id, TaskType.PARALLEL, dependencies, timeout)
|
|
170
|
-
self.subtasks = subtasks
|
|
171
|
-
self.max_concurrent = max_concurrent
|
|
172
|
-
|
|
173
|
-
async def execute(self, context: dict[str, TaskResult]) -> Any:
|
|
174
|
-
"""Execute subtasks in parallel"""
|
|
175
|
-
print(f"[Parallel] Starting parallel task {self.task_id}, contains {len(self.subtasks)} subtasks")
|
|
176
|
-
|
|
177
|
-
# Create semaphore to control concurrency
|
|
178
|
-
semaphore = asyncio.Semaphore(self.max_concurrent)
|
|
179
|
-
|
|
180
|
-
async def execute_subtask(subtask: Task, sem: asyncio.Semaphore):
|
|
181
|
-
async with sem:
|
|
182
|
-
subtask.start_time = time.time()
|
|
183
|
-
subtask.update_status(TaskStatus.RUNNING)
|
|
184
|
-
|
|
185
|
-
try:
|
|
186
|
-
data = await asyncio.wait_for(subtask.execute(context), timeout=subtask.timeout)
|
|
187
|
-
subtask.result = TaskResult(
|
|
188
|
-
task_id=subtask.task_id,
|
|
189
|
-
success=True,
|
|
190
|
-
data=data,
|
|
191
|
-
execution_time=time.time() - subtask.start_time,
|
|
192
|
-
)
|
|
193
|
-
subtask.update_status(TaskStatus.COMPLETED)
|
|
194
|
-
return subtask.result
|
|
195
|
-
except asyncio.TimeoutError as e:
|
|
196
|
-
error = TimeoutError(f"Task {subtask.task_id} execution timeout")
|
|
197
|
-
subtask.result = TaskResult(
|
|
198
|
-
task_id=subtask.task_id,
|
|
199
|
-
success=False,
|
|
200
|
-
data=None,
|
|
201
|
-
execution_time=time.time() - subtask.start_time,
|
|
202
|
-
error=error,
|
|
203
|
-
)
|
|
204
|
-
subtask.update_status(TaskStatus.FAILED)
|
|
205
|
-
raise error from e
|
|
206
|
-
except Exception as e:
|
|
207
|
-
subtask.result = TaskResult(
|
|
208
|
-
task_id=subtask.task_id,
|
|
209
|
-
success=False,
|
|
210
|
-
data=None,
|
|
211
|
-
execution_time=time.time() - subtask.start_time,
|
|
212
|
-
error=e,
|
|
213
|
-
)
|
|
214
|
-
subtask.update_status(TaskStatus.FAILED)
|
|
215
|
-
raise e
|
|
216
|
-
|
|
217
|
-
# Execute all subtasks in parallel
|
|
218
|
-
tasks = [execute_subtask(subtask, semaphore) for subtask in self.subtasks]
|
|
219
|
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
220
|
-
|
|
221
|
-
# Process results using zip for better readability
|
|
222
|
-
successful_results = []
|
|
223
|
-
failed_results = []
|
|
224
|
-
|
|
225
|
-
for subtask, result in zip(self.subtasks, results):
|
|
226
|
-
if isinstance(result, Exception):
|
|
227
|
-
failed_results.append(f"Subtask {subtask.task_id} failed: {result}")
|
|
228
|
-
elif isinstance(result, TaskResult):
|
|
229
|
-
successful_results.append(result.data)
|
|
230
|
-
|
|
231
|
-
if failed_results:
|
|
232
|
-
return f"Parallel task {self.task_id} partially failed: {failed_results}"
|
|
233
|
-
|
|
234
|
-
return f"Parallel task {self.task_id} completed, results: {successful_results}"
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
class WorkflowEngine:
|
|
238
|
-
"""Workflow engine - core orchestrator"""
|
|
239
|
-
|
|
240
|
-
def __init__(self, max_concurrent: int = 4):
|
|
241
|
-
self.tasks: dict[str, Task] = {}
|
|
242
|
-
self.results: dict[str, TaskResult] = {}
|
|
243
|
-
self.max_concurrent = max_concurrent
|
|
244
|
-
self.execution_order: list[list[str]] = []
|
|
245
|
-
|
|
246
|
-
def add_task(self, task: Task):
|
|
247
|
-
"""Add task to workflow"""
|
|
248
|
-
self.tasks[task.task_id] = task
|
|
249
|
-
|
|
250
|
-
def validate_dependencies(self) -> bool:
|
|
251
|
-
"""Validate task dependencies, ensure no circular dependencies"""
|
|
252
|
-
# Build adjacency list
|
|
253
|
-
graph = defaultdict(list)
|
|
254
|
-
in_degree = dict.fromkeys(self.tasks, 0)
|
|
255
|
-
|
|
256
|
-
for task_id, task in self.tasks.items():
|
|
257
|
-
for dep in task.get_dependencies():
|
|
258
|
-
if dep not in self.tasks:
|
|
259
|
-
raise ValueError(f"Task {task_id} depends on unknown task {dep}")
|
|
260
|
-
graph[dep].append(task_id)
|
|
261
|
-
in_degree[task_id] += 1
|
|
262
|
-
|
|
263
|
-
# Detect circular dependencies
|
|
264
|
-
visited = 0
|
|
265
|
-
queue = deque([task_id for task_id, degree in in_degree.items() if degree == 0])
|
|
266
|
-
|
|
267
|
-
while queue:
|
|
268
|
-
current = queue.popleft()
|
|
269
|
-
visited += 1
|
|
270
|
-
|
|
271
|
-
for neighbor in graph[current]:
|
|
272
|
-
in_degree[neighbor] -= 1
|
|
273
|
-
if in_degree[neighbor] == 0:
|
|
274
|
-
queue.append(neighbor)
|
|
275
|
-
|
|
276
|
-
if visited != len(self.tasks):
|
|
277
|
-
raise ValueError("Circular dependency detected in workflow")
|
|
278
|
-
|
|
279
|
-
return True
|
|
280
|
-
|
|
281
|
-
def calculate_execution_order(self) -> list[list[str]]:
|
|
282
|
-
"""Calculate task execution order (topological sort + level grouping)"""
|
|
283
|
-
if not self.tasks:
|
|
284
|
-
return []
|
|
285
|
-
|
|
286
|
-
# Build adjacency list
|
|
287
|
-
graph = defaultdict(list)
|
|
288
|
-
in_degree = dict.fromkeys(self.tasks, 0)
|
|
289
|
-
|
|
290
|
-
for task_id, task in self.tasks.items():
|
|
291
|
-
for dep in task.get_dependencies():
|
|
292
|
-
graph[dep].append(task_id)
|
|
293
|
-
in_degree[task_id] += 1
|
|
294
|
-
|
|
295
|
-
# Level-based topological sort
|
|
296
|
-
execution_order = []
|
|
297
|
-
queue = deque([task_id for task_id, degree in in_degree.items() if degree == 0])
|
|
298
|
-
|
|
299
|
-
while queue:
|
|
300
|
-
level_size = len(queue)
|
|
301
|
-
current_level = []
|
|
302
|
-
|
|
303
|
-
for _ in range(level_size):
|
|
304
|
-
task_id = queue.popleft()
|
|
305
|
-
current_level.append(task_id)
|
|
306
|
-
|
|
307
|
-
for neighbor in graph[task_id]:
|
|
308
|
-
in_degree[neighbor] -= 1
|
|
309
|
-
if in_degree[neighbor] == 0:
|
|
310
|
-
queue.append(neighbor)
|
|
311
|
-
|
|
312
|
-
if current_level:
|
|
313
|
-
execution_order.append(current_level)
|
|
314
|
-
|
|
315
|
-
self.execution_order = execution_order
|
|
316
|
-
return execution_order
|
|
317
|
-
|
|
318
|
-
async def execute_workflow(self) -> dict[str, TaskResult]:
|
|
319
|
-
"""Execute entire workflow"""
|
|
320
|
-
print("=" * 50)
|
|
321
|
-
print("Starting workflow execution")
|
|
322
|
-
print("=" * 50)
|
|
323
|
-
|
|
324
|
-
# Validate dependencies
|
|
325
|
-
self.validate_dependencies()
|
|
326
|
-
|
|
327
|
-
# Calculate execution order
|
|
328
|
-
execution_order = self.calculate_execution_order()
|
|
329
|
-
print(f"Execution plan ({len(execution_order)} phases):")
|
|
330
|
-
for i, level in enumerate(execution_order, 1):
|
|
331
|
-
print(f" Phase {i}: {level}")
|
|
332
|
-
|
|
333
|
-
# Execute by level
|
|
334
|
-
completed_tasks: set[str] = set()
|
|
335
|
-
|
|
336
|
-
for level_index, level in enumerate(execution_order, 1):
|
|
337
|
-
print(f"\n{'=' * 20} Phase {level_index} ({len(level)} tasks) {'=' * 20}")
|
|
338
|
-
|
|
339
|
-
# Filter executable tasks in this level
|
|
340
|
-
ready_tasks = []
|
|
341
|
-
for task_id in level:
|
|
342
|
-
task = self.tasks[task_id]
|
|
343
|
-
if task.can_execute(completed_tasks):
|
|
344
|
-
task.update_status(TaskStatus.READY)
|
|
345
|
-
ready_tasks.append(task)
|
|
346
|
-
|
|
347
|
-
if not ready_tasks:
|
|
348
|
-
continue
|
|
349
|
-
|
|
350
|
-
# Create semaphore for this level to control concurrency
|
|
351
|
-
semaphore = asyncio.Semaphore(self.max_concurrent)
|
|
352
|
-
|
|
353
|
-
async def execute_single_task(task: Task, sem: asyncio.Semaphore):
|
|
354
|
-
async with sem:
|
|
355
|
-
task.start_time = time.time()
|
|
356
|
-
task.update_status(TaskStatus.RUNNING)
|
|
357
|
-
|
|
358
|
-
try:
|
|
359
|
-
# Collect results from dependent tasks
|
|
360
|
-
dependency_results = {dep_id: self.results[dep_id] for dep_id in task.get_dependencies()}
|
|
361
|
-
|
|
362
|
-
# Execute task
|
|
363
|
-
data = await asyncio.wait_for(task.execute(dependency_results), timeout=task.timeout)
|
|
364
|
-
|
|
365
|
-
task.end_time = time.time()
|
|
366
|
-
task.result = TaskResult(
|
|
367
|
-
task_id=task.task_id, success=True, data=data, execution_time=task.get_execution_time()
|
|
368
|
-
)
|
|
369
|
-
task.update_status(TaskStatus.COMPLETED)
|
|
370
|
-
|
|
371
|
-
# Store result
|
|
372
|
-
self.results[task.task_id] = task.result
|
|
373
|
-
completed_tasks.add(task.task_id)
|
|
374
|
-
|
|
375
|
-
print(f"[OK] Task {task.task_id} completed, duration: {task.get_execution_time():.2f}s")
|
|
376
|
-
|
|
377
|
-
return task.result
|
|
378
|
-
|
|
379
|
-
except asyncio.TimeoutError as e:
|
|
380
|
-
task.end_time = time.time()
|
|
381
|
-
task.result = TaskResult(
|
|
382
|
-
task_id=task.task_id,
|
|
383
|
-
success=False,
|
|
384
|
-
data=None,
|
|
385
|
-
execution_time=task.get_execution_time(),
|
|
386
|
-
error=e,
|
|
387
|
-
)
|
|
388
|
-
task.update_status(TaskStatus.FAILED)
|
|
389
|
-
|
|
390
|
-
# Store result and mark as completed (even if failed)
|
|
391
|
-
self.results[task.task_id] = task.result
|
|
392
|
-
completed_tasks.add(task.task_id)
|
|
393
|
-
|
|
394
|
-
print(f"[FAIL] Task {task.task_id} timeout")
|
|
395
|
-
raise e from e
|
|
396
|
-
except Exception as e:
|
|
397
|
-
task.end_time = time.time()
|
|
398
|
-
task.result = TaskResult(
|
|
399
|
-
task_id=task.task_id,
|
|
400
|
-
success=False,
|
|
401
|
-
data=None,
|
|
402
|
-
execution_time=task.get_execution_time(),
|
|
403
|
-
error=e,
|
|
404
|
-
)
|
|
405
|
-
task.update_status(TaskStatus.FAILED)
|
|
406
|
-
|
|
407
|
-
# Store result and mark as completed (even if failed)
|
|
408
|
-
self.results[task.task_id] = task.result
|
|
409
|
-
completed_tasks.add(task.task_id)
|
|
410
|
-
|
|
411
|
-
print(f"[FAIL] Task {task.task_id} failed: {e}")
|
|
412
|
-
raise e
|
|
413
|
-
|
|
414
|
-
# Execute all ready tasks in this level in parallel
|
|
415
|
-
tasks_to_execute = [execute_single_task(task, semaphore) for task in ready_tasks]
|
|
416
|
-
|
|
417
|
-
# Use return_exceptions=True to ensure all tasks complete even if some fail
|
|
418
|
-
await asyncio.gather(*tasks_to_execute, return_exceptions=True)
|
|
419
|
-
|
|
420
|
-
print(f"\n{'=' * 50}")
|
|
421
|
-
print("Workflow execution completed")
|
|
422
|
-
print(f"{'=' * 50}")
|
|
423
|
-
|
|
424
|
-
return self.results
|
|
425
|
-
|
|
426
|
-
def get_execution_summary(self) -> dict[str, Any]:
|
|
427
|
-
"""Get execution summary"""
|
|
428
|
-
total_tasks = len(self.tasks)
|
|
429
|
-
completed = sum(1 for task in self.tasks.values() if task.status == TaskStatus.COMPLETED)
|
|
430
|
-
failed = sum(1 for task in self.tasks.values() if task.status == TaskStatus.FAILED)
|
|
431
|
-
|
|
432
|
-
total_time = 0.0
|
|
433
|
-
for task in self.tasks.values():
|
|
434
|
-
if task.result:
|
|
435
|
-
total_time += task.result.execution_time
|
|
436
|
-
|
|
437
|
-
return {
|
|
438
|
-
"total_tasks": total_tasks,
|
|
439
|
-
"completed": completed,
|
|
440
|
-
"failed": failed,
|
|
441
|
-
"pending": total_tasks - completed - failed,
|
|
442
|
-
"total_execution_time": total_time,
|
|
443
|
-
"success_rate": completed / total_tasks if total_tasks > 0 else 0,
|
|
444
|
-
}
|
|
File without changes
|
|
File without changes
|