loom-core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. loom_core-0.1.0.dist-info/METADATA +342 -0
  2. loom_core-0.1.0.dist-info/RECORD +50 -0
  3. loom_core-0.1.0.dist-info/WHEEL +5 -0
  4. loom_core-0.1.0.dist-info/entry_points.txt +2 -0
  5. loom_core-0.1.0.dist-info/licenses/LICENSE +21 -0
  6. loom_core-0.1.0.dist-info/top_level.txt +1 -0
  7. src/__init__.py +45 -0
  8. src/cli/__init__.py +5 -0
  9. src/cli/cli.py +246 -0
  10. src/common/activity.py +30 -0
  11. src/common/config.py +9 -0
  12. src/common/errors.py +64 -0
  13. src/common/workflow.py +56 -0
  14. src/core/__init__.py +0 -0
  15. src/core/compiled.py +41 -0
  16. src/core/context.py +256 -0
  17. src/core/engine.py +106 -0
  18. src/core/handle.py +166 -0
  19. src/core/logger.py +60 -0
  20. src/core/runner.py +53 -0
  21. src/core/state.py +96 -0
  22. src/core/worker.py +147 -0
  23. src/core/workflow.py +168 -0
  24. src/database/__init__.py +0 -0
  25. src/database/db.py +716 -0
  26. src/decorators/__init__.py +0 -0
  27. src/decorators/activity.py +126 -0
  28. src/decorators/workflow.py +46 -0
  29. src/lib/progress.py +109 -0
  30. src/lib/utils.py +25 -0
  31. src/migrations/down/001_setup_pragma.sql +5 -0
  32. src/migrations/down/002_create_workflows.sql +3 -0
  33. src/migrations/down/003.create_events.sql +3 -0
  34. src/migrations/down/004.create_tasks.sql +3 -0
  35. src/migrations/down/005.create_indexes.sql +5 -0
  36. src/migrations/down/006_auto_update_triggers.sql +4 -0
  37. src/migrations/down/007_create_logs.sql +1 -0
  38. src/migrations/up/001_setup_pragma.sql +11 -0
  39. src/migrations/up/002_create_workflows.sql +15 -0
  40. src/migrations/up/003_create_events.sql +13 -0
  41. src/migrations/up/004_create_tasks.sql +23 -0
  42. src/migrations/up/005_create_indexes.sql +11 -0
  43. src/migrations/up/006_auto_update_triggers.sql +19 -0
  44. src/migrations/up/007_create_logs.sql +10 -0
  45. src/schemas/__init__.py +0 -0
  46. src/schemas/activity.py +13 -0
  47. src/schemas/database.py +17 -0
  48. src/schemas/events.py +70 -0
  49. src/schemas/tasks.py +58 -0
  50. src/schemas/workflow.py +33 -0
File without changes
@@ -0,0 +1,126 @@
1
+ from ..schemas.workflow import Func
2
+
3
+
4
+ def activity(
5
+ name: str | None = None,
6
+ description: str | None = None,
7
+ retry_count: int = 0,
8
+ timeout_seconds: int = 60,
9
+ ):
10
+ """
11
+ Decorator to define an activity function with execution policies.
12
+
13
+ Activities are the only place where side effects should occur in Loom workflows.
14
+ They represent external operations like API calls, database queries, file operations,
15
+ or any non-deterministic work. Activities can be retried on failure and have
16
+ configurable timeouts.
17
+
18
+ Args:
19
+ name: Custom name for the activity. If None, uses the function name.
20
+ Should be descriptive and unique for debugging purposes.
21
+ description: Human-readable description of what this activity does.
22
+ Used for documentation, logging, and monitoring.
23
+ retry_count: Number of times to retry the activity on failure.
24
+ Must be >= 0. Set to 0 to disable retries.
25
+ timeout_seconds: Maximum time in seconds to wait for activity completion.
26
+ Must be > 0. Activities exceeding this timeout will be cancelled.
27
+
28
+ Returns:
29
+ The decorated function with activity metadata attached.
30
+
31
+ Example:
32
+ ```python
33
+ @loom.activity(
34
+ name="send_notification_email",
35
+ description="Send email notification to user",
36
+ retry_count=3,
37
+ timeout_seconds=30
38
+ )
39
+ async def send_email(user_email: str, subject: str, body: str) -> bool:
40
+ # This activity can fail and be retried up to 3 times
41
+ async with httpx.AsyncClient() as client:
42
+ response = await client.post("/api/email/send", json={
43
+ "to": user_email,
44
+ "subject": subject,
45
+ "body": body
46
+ })
47
+ response.raise_for_status()
48
+ return True
49
+
50
+ # Usage in workflow step:
51
+ @loom.step
52
+ async def notify_user(self, ctx: WorkflowContext[MyState]):
53
+ success = await ctx.activity(
54
+ send_email,
55
+ ctx.input.user_email,
56
+ "Welcome!",
57
+ "Thanks for joining!"
58
+ )
59
+ ctx.state.email_sent = success
60
+ ```
61
+
62
+ Note:
63
+ - Activities should be idempotent when possible (safe to retry)
64
+ - Activities are the execution boundary - no side effects in workflow steps
65
+ - Activity results are persisted and replayed during workflow recovery
66
+ - Long-running activities should implement proper cancellation handling
67
+
68
+ Raises:
69
+ ValueError: If retry_count is negative or timeout_seconds is not positive.
70
+ """
71
+ # Validate parameters
72
+ if not isinstance(retry_count, int) or retry_count < 0:
73
+ raise ValueError(
74
+ f"Activity retry_count must be a non-negative integer, got {retry_count}"
75
+ )
76
+
77
+ if not isinstance(timeout_seconds, int) or timeout_seconds <= 0:
78
+ raise ValueError(
79
+ f"Activity timeout_seconds must be a positive integer, got {timeout_seconds}"
80
+ )
81
+
82
+ if name is not None and not isinstance(name, str):
83
+ raise ValueError(
84
+ f"Activity name must be a string or None, got {type(name).__name__}"
85
+ )
86
+
87
+ if description is not None and not isinstance(description, str):
88
+ raise ValueError(
89
+ f"Activity description must be a string or None, got {type(description).__name__}"
90
+ )
91
+
92
+ # Reasonable limits to prevent configuration errors
93
+ if retry_count > 100:
94
+ raise ValueError(
95
+ f"Activity retry_count seems excessive: {retry_count}. Maximum recommended is 100."
96
+ )
97
+
98
+ if timeout_seconds > 3600: # 1 hour
99
+ raise ValueError(
100
+ f"Activity timeout_seconds seems excessive: {timeout_seconds}s. "
101
+ f"Consider if this operation should really take more than 1 hour."
102
+ )
103
+
104
+ def decorator(func: Func) -> Func:
105
+ """
106
+ Apply activity metadata to the target function.
107
+
108
+ Args:
109
+ func: The function to decorate as an activity
110
+
111
+ Returns:
112
+ The function with activity metadata attached
113
+ """
114
+ # Attach activity metadata
115
+ setattr(func, "_activity_name", name or getattr(func, "__name__"))
116
+ setattr(func, "_activity_description", description or "")
117
+ setattr(func, "_activity_retry_count", retry_count)
118
+ setattr(func, "_activity_timeout_seconds", timeout_seconds)
119
+
120
+ # Add helpful debugging info
121
+ original_name = getattr(func, "__name__", "unknown")
122
+ setattr(func, "_activity_original_name", original_name)
123
+
124
+ return func
125
+
126
+ return decorator
@@ -0,0 +1,46 @@
1
+ from ..schemas.workflow import ClsT, Func
2
+
3
+
4
+ def workflow(
5
+ name: str | None = None,
6
+ description: str | None = None,
7
+ version: str = "1.0.0",
8
+ ):
9
+ """
10
+ Decorator to define a workflow class.
11
+
12
+ Args:
13
+ name (str | None): The name of the workflow. Defaults to the class name if None.
14
+ description (str | None): A brief description of the workflow. Defaults to an empty string if None.
15
+ version (str): The version of the workflow. Defaults to "1.0.0".
16
+ """
17
+
18
+ def decorator(cls: ClsT) -> ClsT:
19
+ setattr(cls, "_workflow_name", name or getattr(cls, "__name__"))
20
+ setattr(cls, "_workflow_classname", getattr(cls, "__name__"))
21
+ setattr(cls, "_workflow_module", getattr(cls, "__module__"))
22
+ setattr(cls, "_workflow_description", description or "")
23
+ setattr(cls, "_workflow_version", version)
24
+ return cls
25
+
26
+ return decorator
27
+
28
+
29
+ def step(
30
+ name: str | None = None,
31
+ description: str | None = None,
32
+ ):
33
+ """
34
+ Decorator to define a step method within a workflow.
35
+
36
+ Args:
37
+ name (str | None): The name of the step. Defaults to the method name if None.
38
+ description (str | None): A brief description of the step. Defaults to an empty string if None.
39
+ """
40
+
41
+ def decorator(func: Func) -> Func:
42
+ setattr(func, "_step_name", name or getattr(func, "__name__"))
43
+ setattr(func, "_step_description", description or "")
44
+ return func
45
+
46
+ return decorator
src/lib/progress.py ADDED
@@ -0,0 +1,109 @@
1
+ """Progress tracking and status display for workflows."""
2
+
3
+ from datetime import datetime
4
+ from typing import Optional
5
+
6
+ from rich.console import Console
7
+ from rich.progress import (
8
+ BarColumn,
9
+ Progress,
10
+ SpinnerColumn,
11
+ TaskID,
12
+ TextColumn,
13
+ TimeElapsedColumn,
14
+ )
15
+ from rich.table import Table
16
+
17
+
18
+ class WorkflowProgress:
19
+ """Display workflow execution progress with rich formatting."""
20
+
21
+ def __init__(self, workflow_name: str, total_steps: int):
22
+ """Initialize progress tracker.
23
+
24
+ Args:
25
+ workflow_name: Name of the workflow being executed
26
+ total_steps: Total number of steps in the workflow
27
+ """
28
+ self.workflow_name = workflow_name
29
+ self.total_steps = total_steps
30
+ self.current_step = 0
31
+ self.started_at = datetime.now()
32
+ self.console = Console()
33
+ self.progress = Progress(
34
+ SpinnerColumn(),
35
+ TextColumn("[progress.description]{task.description}"),
36
+ BarColumn(),
37
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
38
+ TimeElapsedColumn(),
39
+ console=self.console,
40
+ )
41
+ self.task_id: Optional[TaskID] = None
42
+
43
+ def start(self):
44
+ """Start the progress display."""
45
+ self.task_id = self.progress.add_task(
46
+ f"[cyan]Executing {self.workflow_name}", total=self.total_steps
47
+ )
48
+ self.progress.start()
49
+
50
+ def update(self, step_name: str):
51
+ """Update progress to next step.
52
+
53
+ Args:
54
+ step_name: Name of the step that just completed
55
+ """
56
+ self.current_step += 1
57
+
58
+ if self.task_id is not None:
59
+ self.progress.update(
60
+ self.task_id,
61
+ completed=self.current_step,
62
+ description=f"[cyan]Step: {step_name}",
63
+ )
64
+
65
+ def complete(self):
66
+ """Mark the workflow as complete."""
67
+ self.progress.stop()
68
+ elapsed = datetime.now() - self.started_at
69
+ self.console.print(
70
+ f"[green]Workflow '{self.workflow_name}' completed in {elapsed}[/green]"
71
+ )
72
+
73
+ def error(self, message: str):
74
+ """Display an error message.
75
+
76
+ Args:
77
+ message: Error message to display
78
+ """
79
+ self.progress.stop()
80
+ self.console.print(f"[red]Error: {message}[/red]")
81
+
82
+
83
+ def create_status_table(workflows: list) -> Table:
84
+ """Create a formatted status table for workflows.
85
+
86
+ Args:
87
+ workflows: List of workflow dictionaries
88
+
89
+ Returns:
90
+ Rich Table object
91
+ """
92
+ table = Table(title="Workflow Status", show_header=True)
93
+ table.add_column("Name", style="cyan", width=30)
94
+ table.add_column("Status", justify="center", width=15)
95
+ table.add_column("Created", style="green", width=20)
96
+
97
+ for wf in workflows:
98
+ status_style = {
99
+ "RUNNING": "[yellow]RUNNING[/yellow]",
100
+ "COMPLETED": "[green]COMPLETED[/green]",
101
+ "FAILED": "[red]FAILED[/red]",
102
+ "CANCELED": "[dim]CANCELED[/dim]",
103
+ }.get(wf.get("status", "Unknown"), wf.get("status", "Unknown"))
104
+
105
+ table.add_row(
106
+ wf.get("name", "Unknown"), status_style, wf.get("created_at", "Unknown")
107
+ )
108
+
109
+ return table
src/lib/utils.py ADDED
@@ -0,0 +1,25 @@
1
+ import os
2
+ from typing import List
3
+
4
+ from ..common.config import MIGRATION_DOWNGRADES, MIGRATION_UPGRADES
5
+ from ..schemas.database import Migration
6
+
7
+
8
+ def get_migrations(direction: str) -> List[Migration]:
9
+ migrations = []
10
+ migration_path = MIGRATION_UPGRADES if direction == "up" else MIGRATION_DOWNGRADES
11
+ files = sorted(os.listdir(migration_path))
12
+ for file in files:
13
+ if file.endswith(".sql"):
14
+ with open(os.path.join(migration_path, file), "r", encoding="utf-8") as f:
15
+ sql = f.read()
16
+ migrations.append(Migration(name=file, sql=sql))
17
+ return migrations
18
+
19
+
20
+ def get_upgrade_migrations() -> List[Migration]:
21
+ return get_migrations("up")
22
+
23
+
24
+ def get_downgrade_migrations() -> List[Migration]:
25
+ return get_migrations("down")
@@ -0,0 +1,5 @@
1
+ -- Revert to SQLite defaults (no explicit PRAGMA resets needed)
2
+ -- SQLite will use defaults when connection is closed/reopened
3
+
4
+ -- Note: These settings are connection-specific and will revert to defaults
5
+ -- when the database connection is closed. No explicit downgrade needed.
@@ -0,0 +1,3 @@
1
+ -- Drop workflows table
2
+
3
+ DROP TABLE IF EXISTS workflows;
@@ -0,0 +1,3 @@
1
+ -- Drop events table
2
+
3
+ DROP TABLE IF EXISTS events;
@@ -0,0 +1,3 @@
1
+ -- Drop tasks table
2
+
3
+ DROP TABLE IF EXISTS tasks;
@@ -0,0 +1,5 @@
1
+ -- Drop indexes
2
+
3
+ DROP INDEX IF EXISTS idx_events_workflow_id;
4
+ DROP INDEX IF EXISTS idx_tasks_pending;
5
+ DROP INDEX IF EXISTS idx_workflows_status;
@@ -0,0 +1,4 @@
1
+ -- Drop triggers
2
+
3
+ DROP TRIGGER IF EXISTS trg_workflows_updated;
4
+ DROP TRIGGER IF EXISTS trg_tasks_updated;
@@ -0,0 +1 @@
1
+ DROP TABLE IF EXISTS logs;
@@ -0,0 +1,11 @@
1
+ -- Enable WAL for concurrent readers/writers
2
+ PRAGMA journal_mode = WAL;
3
+
4
+ -- Good durability/performance tradeoff for workflows
5
+ PRAGMA synchronous = NORMAL;
6
+
7
+ -- Wait for write locks instead of failing
8
+ PRAGMA busy_timeout = 5000;
9
+
10
+ -- Enforce foreign keys (off by default in SQLite)
11
+ PRAGMA foreign_keys = ON;
@@ -0,0 +1,15 @@
1
+ -- Workflow instances (metadata + cached status)
2
+
3
+ CREATE TABLE IF NOT EXISTS workflows (
4
+ id TEXT PRIMARY KEY,
5
+ name TEXT NOT NULL,
6
+ description TEXT,
7
+ version TEXT NOT NULL,
8
+ module TEXT NOT NULL,
9
+ status TEXT NOT NULL CHECK (
10
+ status IN ('RUNNING', 'COMPLETED', 'FAILED', 'CANCELLED')
11
+ ),
12
+ input JSON NOT NULL,
13
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
14
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
15
+ );
@@ -0,0 +1,13 @@
1
+ -- Append-only event log (SOURCE OF TRUTH)
2
+
3
+ CREATE TABLE IF NOT EXISTS events (
4
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
5
+ workflow_id TEXT NOT NULL,
6
+ type TEXT NOT NULL,
7
+ payload JSON,
8
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
9
+
10
+ FOREIGN KEY (workflow_id)
11
+ REFERENCES workflows(id)
12
+ ON DELETE CASCADE
13
+ );
@@ -0,0 +1,23 @@
1
+ -- Durable execution queue (steps, activities, timers)
2
+
3
+ CREATE TABLE IF NOT EXISTS tasks (
4
+ id TEXT PRIMARY KEY,
5
+ workflow_id TEXT NOT NULL,
6
+ kind TEXT NOT NULL CHECK (
7
+ kind IN ('STEP', 'ACTIVITY', 'TIMER')
8
+ ),
9
+ target TEXT NOT NULL,
10
+ run_at TIMESTAMP NOT NULL,
11
+ status TEXT NOT NULL CHECK (
12
+ status IN ('PENDING', 'RUNNING', 'COMPLETED', 'FAILED')
13
+ ),
14
+ attempts INTEGER NOT NULL DEFAULT 0,
15
+ max_attempts INTEGER NOT NULL DEFAULT 3,
16
+ last_error TEXT,
17
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
18
+ updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
19
+
20
+ FOREIGN KEY (workflow_id)
21
+ REFERENCES workflows(id)
22
+ ON DELETE CASCADE
23
+ );
@@ -0,0 +1,11 @@
1
+ -- Fast event replay
2
+ CREATE INDEX IF NOT EXISTS idx_events_workflow_id
3
+ ON events(workflow_id);
4
+
5
+ -- Worker polling (critical)
6
+ CREATE INDEX IF NOT EXISTS idx_tasks_pending
7
+ ON tasks(status, run_at);
8
+
9
+ -- Workflow listing / inspection
10
+ CREATE INDEX IF NOT EXISTS idx_workflows_status
11
+ ON workflows(status);
@@ -0,0 +1,19 @@
1
+ CREATE TRIGGER IF NOT EXISTS trg_workflows_updated
2
+ AFTER UPDATE ON workflows
3
+ FOR EACH ROW
4
+ BEGIN
5
+ UPDATE workflows
6
+ SET updated_at = CURRENT_TIMESTAMP
7
+ WHERE id = OLD.id;
8
+ END;
9
+
10
+ -- Trigger to update 'updated_at' timestamp on tasks table updates --
11
+
12
+ CREATE TRIGGER IF NOT EXISTS trg_tasks_updated
13
+ AFTER UPDATE ON tasks
14
+ FOR EACH ROW
15
+ BEGIN
16
+ UPDATE tasks
17
+ SET updated_at = CURRENT_TIMESTAMP
18
+ WHERE id = OLD.id;
19
+ END;
@@ -0,0 +1,10 @@
1
+ CREATE TABLE IF NOT EXISTS logs (
2
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
3
+ workflow_id TEXT NOT NULL,
4
+ level TEXT NOT NULL,
5
+ message TEXT NOT NULL,
6
+ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
7
+ FOREIGN KEY (workflow_id)
8
+ REFERENCES workflows(id)
9
+ ON DELETE CASCADE
10
+ );
File without changes
@@ -0,0 +1,13 @@
1
+ from typing import Any, List, TypedDict, TypeVar
2
+
3
+ FuncReturn = TypeVar("FuncReturn")
4
+
5
+
6
+ class ActivityMetadata(TypedDict):
7
+ name: str
8
+ description: str
9
+ retry_count: int
10
+ args: List[Any]
11
+ timeout_seconds: int
12
+ func: str
13
+ module: str
@@ -0,0 +1,17 @@
1
+ from typing import TypedDict
2
+
3
+ from src.schemas.workflow import Step
4
+
5
+
6
+ class Migration(TypedDict):
7
+ name: str
8
+ sql: str
9
+
10
+
11
+ class WorkflowInput(TypedDict):
12
+ name: str
13
+ description: str
14
+ version: str
15
+ status: str
16
+ module: str
17
+ steps: list[Step]
src/schemas/events.py ADDED
@@ -0,0 +1,70 @@
1
+ from typing import Any, Dict, Literal, Optional, TypedDict
2
+
3
+
4
+ class Event(TypedDict):
5
+ """Represents a workflow event in the event store.
6
+
7
+ Events are immutable records of things that happened during workflow
8
+ execution, forming the single source of truth for workflow state.
9
+
10
+ Attributes:
11
+ type: Event type identifier (e.g., 'WORKFLOW_STARTED', 'ACTIVITY_COMPLETED')
12
+ payload: Event-specific data payload containing relevant information
13
+ """
14
+
15
+ type: str
16
+ payload: Dict[str, Any]
17
+
18
+
19
+ class WorkflowFailurePayload(TypedDict, total=False):
20
+ """Payload structure for workflow failure events.
21
+
22
+ Used when a workflow encounters an unrecoverable error during execution.
23
+
24
+ Attributes:
25
+ error: Error message describing what went wrong
26
+ step: Name of the step where the failure occurred
27
+ reason: High-level reason for the failure
28
+ traceback: Full Python traceback for debugging
29
+ """
30
+
31
+ error: str
32
+ step: str
33
+ reason: str
34
+ traceback: str
35
+
36
+
37
+ class ActivityFailurePayload(TypedDict, total=False):
38
+ """Payload structure for activity failure events.
39
+
40
+ Used when an activity fails and may be retried based on its configuration.
41
+
42
+ Attributes:
43
+ activity: Name of the failed activity
44
+ error: Error message from the activity execution
45
+ attempt: Current attempt number (for retry tracking)
46
+ """
47
+
48
+ activity: str
49
+ error: str
50
+ attempt: int
51
+
52
+
53
+ class ExtractedError(TypedDict, total=False):
54
+ """Structured error information extracted from workflow or activity failures.
55
+
56
+ Provides a normalized view of errors for monitoring and debugging purposes.
57
+
58
+ Attributes:
59
+ source: Whether the error originated from workflow logic or an activity
60
+ message: Human-readable error message
61
+ step: Workflow step name (if error occurred in workflow logic)
62
+ activity: Activity name (if error occurred in an activity)
63
+ details: Additional context-specific error information
64
+ """
65
+
66
+ source: Literal["WORKFLOW", "ACTIVITY"]
67
+ message: str
68
+ step: Optional[str]
69
+ activity: Optional[str]
70
+ details: Dict[str, Any]
src/schemas/tasks.py ADDED
@@ -0,0 +1,58 @@
1
+ from datetime import datetime
2
+ from typing import Literal, TypedDict
3
+
4
+ # Task kind types matching database CHECK constraint
5
+ TaskKind = Literal["STEP", "ACTIVITY", "TIMER"]
6
+
7
+ # Task status types matching database CHECK constraint
8
+ TaskStatus = Literal["PENDING", "RUNNING", "COMPLETED", "FAILED"]
9
+
10
+
11
+ class Task(TypedDict):
12
+ """
13
+ Task schema matching the database tasks table structure.
14
+
15
+ Tasks represent units of work in the workflow execution queue,
16
+ including workflow steps, activity executions, and timer events.
17
+ """
18
+
19
+ id: str
20
+ workflow_id: str
21
+ kind: TaskKind
22
+ target: str
23
+ run_at: str
24
+ status: TaskStatus
25
+ attempts: int
26
+ max_attempts: int
27
+ last_error: str | None
28
+ created_at: str
29
+ updated_at: str
30
+
31
+
32
+ class TaskInput(TypedDict):
33
+ """
34
+ Input schema for creating new tasks.
35
+
36
+ Excludes auto-generated fields like timestamps and attempts.
37
+ """
38
+
39
+ id: str
40
+ workflow_id: str
41
+ kind: TaskKind
42
+ target: str
43
+ run_at: datetime
44
+ status: TaskStatus
45
+ max_attempts: int
46
+
47
+
48
+ class TaskUpdate(TypedDict, total=False):
49
+ """
50
+ Schema for updating existing tasks.
51
+
52
+ All fields are optional to allow partial updates.
53
+ """
54
+
55
+ status: TaskStatus
56
+ attempts: int
57
+ last_error: str | None
58
+ updated_at: datetime
@@ -0,0 +1,33 @@
1
+ from datetime import datetime
2
+ from enum import Enum
3
+ from typing import Awaitable, Callable, TypedDict, TypeVar
4
+
5
+ StateT = TypeVar("StateT", bound=dict)
6
+
7
+ Func = TypeVar("Func", bound=Callable[..., Awaitable[object]])
8
+
9
+ InputT = TypeVar("InputT", bound=dict)
10
+
11
+ ClsT = TypeVar("ClsT")
12
+
13
+
14
+ class Step(TypedDict):
15
+ name: str
16
+ description: str
17
+ fn: str
18
+
19
+
20
+ class WorkflowStatus(str, Enum):
21
+ RUNNING = "RUNNING"
22
+ COMPLETED = "COMPLETED"
23
+ FAILED = "FAILED"
24
+ CANCELED = "CANCELED"
25
+
26
+
27
+ class WorkflowInfo(TypedDict):
28
+ id: str
29
+ name: str
30
+ status: WorkflowStatus
31
+ module: str
32
+ created_at: datetime
33
+ updated_at: datetime