horsies 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. horsies/__init__.py +115 -0
  2. horsies/core/__init__.py +0 -0
  3. horsies/core/app.py +552 -0
  4. horsies/core/banner.py +144 -0
  5. horsies/core/brokers/__init__.py +5 -0
  6. horsies/core/brokers/listener.py +444 -0
  7. horsies/core/brokers/postgres.py +864 -0
  8. horsies/core/cli.py +624 -0
  9. horsies/core/codec/serde.py +575 -0
  10. horsies/core/errors.py +535 -0
  11. horsies/core/logging.py +90 -0
  12. horsies/core/models/__init__.py +0 -0
  13. horsies/core/models/app.py +268 -0
  14. horsies/core/models/broker.py +79 -0
  15. horsies/core/models/queues.py +23 -0
  16. horsies/core/models/recovery.py +101 -0
  17. horsies/core/models/schedule.py +229 -0
  18. horsies/core/models/task_pg.py +307 -0
  19. horsies/core/models/tasks.py +332 -0
  20. horsies/core/models/workflow.py +1988 -0
  21. horsies/core/models/workflow_pg.py +245 -0
  22. horsies/core/registry/tasks.py +101 -0
  23. horsies/core/scheduler/__init__.py +26 -0
  24. horsies/core/scheduler/calculator.py +267 -0
  25. horsies/core/scheduler/service.py +569 -0
  26. horsies/core/scheduler/state.py +260 -0
  27. horsies/core/task_decorator.py +615 -0
  28. horsies/core/types/status.py +38 -0
  29. horsies/core/utils/imports.py +203 -0
  30. horsies/core/utils/loop_runner.py +44 -0
  31. horsies/core/worker/current.py +17 -0
  32. horsies/core/worker/worker.py +1967 -0
  33. horsies/core/workflows/__init__.py +23 -0
  34. horsies/core/workflows/engine.py +2344 -0
  35. horsies/core/workflows/recovery.py +501 -0
  36. horsies/core/workflows/registry.py +97 -0
  37. horsies/py.typed +0 -0
  38. horsies-0.1.0a1.dist-info/METADATA +31 -0
  39. horsies-0.1.0a1.dist-info/RECORD +42 -0
  40. horsies-0.1.0a1.dist-info/WHEEL +5 -0
  41. horsies-0.1.0a1.dist-info/entry_points.txt +2 -0
  42. horsies-0.1.0a1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,245 @@
1
+ """SQLAlchemy models for workflow persistence."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime, timezone
6
+ from typing import Optional
7
+
8
+ from sqlalchemy import String, Text, Integer, DateTime, ForeignKey, UniqueConstraint
9
+ from sqlalchemy.dialects.postgresql import ARRAY, JSONB
10
+ from sqlalchemy.orm import Mapped, mapped_column
11
+
12
+ from horsies.core.models.task_pg import Base
13
+
14
+
15
+ class WorkflowModel(Base):
16
+ """
17
+ SQLAlchemy model for workflow instances.
18
+
19
+ Tracks the overall state of a workflow execution, including:
20
+ - Current status (PENDING, RUNNING, COMPLETED, FAILED, PAUSED, CANCELLED)
21
+ - Error handling policy (fail or pause on task error)
22
+ - Explicit output task (if specified)
23
+ - Final result and any errors
24
+ - Parent workflow relationship (for nested/subworkflows)
25
+ """
26
+
27
+ __tablename__ = 'horsies_workflows'
28
+
29
+ # Primary key
30
+ id: Mapped[str] = mapped_column(
31
+ String(36), primary_key=True
32
+ ) # UUID stored as string for consistency with tasks
33
+
34
+ # Workflow metadata
35
+ name: Mapped[str] = mapped_column(String(255), nullable=False)
36
+ status: Mapped[str] = mapped_column(
37
+ String(50), nullable=False, default='PENDING', index=True
38
+ )
39
+ on_error: Mapped[str] = mapped_column(String(50), nullable=False, default='fail')
40
+
41
+ # Output task configuration
42
+ output_task_index: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
43
+
44
+ # Success policy (serialized as JSONB with task indices)
45
+ # Format: {"cases": [{"required_indices": [0, 2]}], "optional_indices": [1]}
46
+ success_policy: Mapped[Optional[dict[str, list[int]]]] = mapped_column(
47
+ JSONB, nullable=True
48
+ )
49
+
50
+ # Results and errors
51
+ result: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
52
+ error: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
53
+
54
+ # -------------------------------------------------------------------------
55
+ # Workflow definition identity (for import-based recovery/conditions)
56
+ # -------------------------------------------------------------------------
57
+
58
+ workflow_def_module: Mapped[Optional[str]] = mapped_column(
59
+ String(512), nullable=True
60
+ )
61
+ workflow_def_qualname: Mapped[Optional[str]] = mapped_column(
62
+ String(512), nullable=True
63
+ )
64
+
65
+ # -------------------------------------------------------------------------
66
+ # Subworkflow support: parent-child relationship
67
+ # -------------------------------------------------------------------------
68
+
69
+ # Parent workflow (if this is a subworkflow)
70
+ parent_workflow_id: Mapped[Optional[str]] = mapped_column(
71
+ String(36),
72
+ ForeignKey('horsies_workflows.id', ondelete='CASCADE'),
73
+ nullable=True,
74
+ index=True,
75
+ )
76
+
77
+ # Index of the SubWorkflowNode in the parent workflow
78
+ parent_task_index: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
79
+
80
+ # Nesting depth (0 = root, 1 = child, 2 = grandchild, etc.)
81
+ depth: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
82
+
83
+ # Root workflow ID for efficient queries across nesting levels
84
+ root_workflow_id: Mapped[Optional[str]] = mapped_column(
85
+ String(36), nullable=True, index=True
86
+ )
87
+
88
+ # -------------------------------------------------------------------------
89
+ # Timestamps
90
+ # -------------------------------------------------------------------------
91
+
92
+ created_at: Mapped[datetime] = mapped_column(
93
+ DateTime(timezone=True), nullable=False, default=datetime.now(timezone.utc)
94
+ )
95
+ started_at: Mapped[Optional[datetime]] = mapped_column(
96
+ DateTime(timezone=True), nullable=True
97
+ )
98
+ completed_at: Mapped[Optional[datetime]] = mapped_column(
99
+ DateTime(timezone=True), nullable=True
100
+ )
101
+ updated_at: Mapped[datetime] = mapped_column(
102
+ DateTime(timezone=True),
103
+ nullable=False,
104
+ default=datetime.now(timezone.utc),
105
+ onupdate=datetime.now(timezone.utc),
106
+ index=True,
107
+ )
108
+
109
+
110
+ class WorkflowTaskModel(Base):
111
+ """
112
+ SQLAlchemy model for workflow task nodes.
113
+
114
+ Represents a single task within a workflow DAG, including:
115
+ - Task specification (name, args, kwargs, queue, priority)
116
+ - Dependencies (array of task indices this task waits for)
117
+ - Data flow configuration (args_from mapping, workflow_ctx_from)
118
+ - Execution state and result
119
+ - Link to actual task in tasks table once enqueued
120
+ """
121
+
122
+ __tablename__ = 'horsies_workflow_tasks'
123
+
124
+ # Primary key
125
+ id: Mapped[str] = mapped_column(String(36), primary_key=True)
126
+
127
+ # Workflow reference
128
+ workflow_id: Mapped[str] = mapped_column(
129
+ String(36),
130
+ ForeignKey('horsies_workflows.id', ondelete='CASCADE'),
131
+ nullable=False,
132
+ index=True,
133
+ )
134
+
135
+ # Position in workflow
136
+ task_index: Mapped[int] = mapped_column(Integer, nullable=False)
137
+ node_id: Mapped[Optional[str]] = mapped_column(
138
+ String(128), nullable=True, index=True
139
+ )
140
+
141
+ # Task specification
142
+ task_name: Mapped[str] = mapped_column(String(255), nullable=False)
143
+ task_args: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
144
+ task_kwargs: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
145
+ queue_name: Mapped[str] = mapped_column(
146
+ String(100), nullable=False, default='default'
147
+ )
148
+ priority: Mapped[int] = mapped_column(Integer, nullable=False, default=100)
149
+
150
+ # DAG structure: indices of tasks this task waits for
151
+ dependencies: Mapped[list[int]] = mapped_column(
152
+ ARRAY(Integer), nullable=False, default=[]
153
+ )
154
+
155
+ # Data flow: {"kwarg_name": task_index, ...}
156
+ args_from: Mapped[Optional[dict[str, int]]] = mapped_column(JSONB, nullable=True)
157
+
158
+ # Context injection: node_ids to include in WorkflowContext
159
+ workflow_ctx_from: Mapped[Optional[list[str]]] = mapped_column(
160
+ ARRAY(String), nullable=True
161
+ )
162
+
163
+ # If True, task runs even if dependencies failed (receives failed TaskResults)
164
+ allow_failed_deps: Mapped[bool] = mapped_column(default=False, nullable=False)
165
+
166
+ # Join semantics: "all" (default), "any", or "quorum"
167
+ join_type: Mapped[str] = mapped_column(String(10), nullable=False, default='all')
168
+
169
+ # For join_type="quorum": minimum number of dependencies that must succeed
170
+ min_success: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
171
+
172
+ # Task options (retry policy, auto_retry_for, etc.) - serialized JSON
173
+ task_options: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
174
+
175
+ # Execution state
176
+ status: Mapped[str] = mapped_column(
177
+ String(50), nullable=False, default='PENDING', index=True
178
+ )
179
+
180
+ # Link to actual task once enqueued (for TaskNode only)
181
+ task_id: Mapped[Optional[str]] = mapped_column(
182
+ String(36), nullable=True, index=True
183
+ )
184
+
185
+ # -------------------------------------------------------------------------
186
+ # SubWorkflowNode support
187
+ # -------------------------------------------------------------------------
188
+
189
+ # True if this node is a SubWorkflowNode (not a TaskNode)
190
+ is_subworkflow: Mapped[bool] = mapped_column(default=False, nullable=False)
191
+
192
+ # Link to child workflow (for SubWorkflowNode)
193
+ sub_workflow_id: Mapped[Optional[str]] = mapped_column(
194
+ String(36),
195
+ ForeignKey('horsies_workflows.id', ondelete='SET NULL'),
196
+ nullable=True,
197
+ index=True,
198
+ )
199
+
200
+ # Child workflow definition name (for SubWorkflowNode)
201
+ sub_workflow_name: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
202
+
203
+ # Import path for subworkflow definition (fallback if registry not loaded)
204
+ sub_workflow_module: Mapped[Optional[str]] = mapped_column(
205
+ String(512), nullable=True
206
+ )
207
+ sub_workflow_qualname: Mapped[Optional[str]] = mapped_column(
208
+ String(512), nullable=True
209
+ )
210
+
211
+ # Retry mode for subworkflow (rerun_failed_only, rerun_all, no_rerun)
212
+ sub_workflow_retry_mode: Mapped[Optional[str]] = mapped_column(
213
+ String(50), nullable=True
214
+ )
215
+
216
+ # Summary of subworkflow execution (serialized SubWorkflowSummary)
217
+ sub_workflow_summary: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
218
+
219
+ # -------------------------------------------------------------------------
220
+ # Results and errors
221
+ # -------------------------------------------------------------------------
222
+
223
+ result: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
224
+ error: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
225
+
226
+ # Timestamps
227
+ created_at: Mapped[datetime] = mapped_column(
228
+ DateTime(timezone=True), nullable=False, default=datetime.now(timezone.utc)
229
+ )
230
+ started_at: Mapped[Optional[datetime]] = mapped_column(
231
+ DateTime(timezone=True), nullable=True
232
+ )
233
+ completed_at: Mapped[Optional[datetime]] = mapped_column(
234
+ DateTime(timezone=True), nullable=True
235
+ )
236
+
237
+ # Unique constraint: one task per index per workflow
238
+ __table_args__ = (
239
+ UniqueConstraint('workflow_id', 'task_index', name='uq_horsies_workflow_task_index'),
240
+ )
241
+
242
+
243
+ # Note: The following index should be created via raw SQL for optimal performance:
244
+ # CREATE INDEX IF NOT EXISTS idx_horsies_workflow_tasks_deps ON horsies_workflow_tasks USING GIN(dependencies);
245
+ # This is handled in the broker's schema initialization.
@@ -0,0 +1,101 @@
1
+ # app/core/registry/tasks.py
2
+ from __future__ import annotations
3
+ from typing import Dict, Iterator, MutableMapping, Generic, TypeVar
4
+ from horsies.core.errors import RegistryError, ErrorCode
5
+
6
+ T = TypeVar('T')
7
+
8
+
9
+ class NotRegistered(RegistryError):
10
+ """Raised when a task name is not present in the registry."""
11
+
12
+ def __init__(self, task_name: str) -> None:
13
+ super().__init__(
14
+ message=f"task '{task_name}' not registered",
15
+ code=ErrorCode.TASK_NOT_REGISTERED,
16
+ notes=[f"requested task: '{task_name}'"],
17
+ help_text='ensure the task is defined with @app.task() before use\nor make sure that task is discovered by the app',
18
+ )
19
+ self.task_name = task_name
20
+
21
+
22
+ class DuplicateTaskNameError(RegistryError):
23
+ """Raised when a task name is registered more than once within the same app."""
24
+
25
+ def __init__(self, task_name: str, context: str = '') -> None:
26
+ super().__init__(
27
+ message=f"duplicate task name '{task_name}'",
28
+ code=ErrorCode.TASK_DUPLICATE_NAME,
29
+ notes=[context] if context else [],
30
+ help_text='each task name must be unique within a horsies instance',
31
+ )
32
+ self.task_name = task_name
33
+
34
+
35
+ class TaskRegistry(MutableMapping[str, T], Generic[T]):
36
+ """Registry mapping task name -> task object.
37
+
38
+ Tracks source locations to detect duplicate registrations:
39
+ - Same name + same source: silently skip (re-import scenario)
40
+ - Same name + different source: raise DuplicateTaskNameError
41
+ """
42
+
43
+ def __init__(self, initial: Dict[str, T] | None = None) -> None:
44
+ self._data: Dict[str, T] = dict(initial or {})
45
+ self._sources: Dict[str, str] = {} # task_name -> "file:lineno"
46
+
47
+ def __getitem__(self, key: str) -> T:
48
+ try:
49
+ return self._data[key]
50
+ except KeyError:
51
+ raise NotRegistered(key)
52
+
53
+ def __setitem__(self, key: str, value: T) -> None:
54
+ """Discourage direct assignment; enforce uniqueness like register()."""
55
+ if key in self._data:
56
+ raise DuplicateTaskNameError(key, 'detected via direct assignment')
57
+ self._data[key] = value
58
+
59
+ def __delitem__(self, key: str) -> None:
60
+ del self._data[key]
61
+ self._sources.pop(key, None)
62
+
63
+ def __iter__(self) -> Iterator[str]:
64
+ return iter(self._data)
65
+
66
+ def __len__(self) -> int:
67
+ return len(self._data)
68
+
69
+ # --- convenience ---
70
+ def register(self, task: T, *, name: str, source: str | None = None) -> T:
71
+ """Insert a task under `name`, enforcing uniqueness per app.
72
+
73
+ Args:
74
+ task: The task object to register.
75
+ name: The unique name for the task.
76
+ source: Optional source location string (e.g., "file.py:42").
77
+ Used to detect re-imports vs. true duplicates.
78
+
79
+ Returns:
80
+ The registered task (existing if re-import, new otherwise).
81
+
82
+ Raises:
83
+ DuplicateTaskNameError: If same name registered from different source.
84
+ """
85
+ if name in self._data:
86
+ existing_source = self._sources.get(name)
87
+ if existing_source and source and existing_source == source:
88
+ # Same source location - this is a re-import, skip silently
89
+ return self._data[name]
90
+ raise DuplicateTaskNameError(name, 'task with this name already exists')
91
+ self._data[name] = task
92
+ if source:
93
+ self._sources[name] = source
94
+ return task
95
+
96
+ def unregister(self, name: str) -> None:
97
+ self._data.pop(name, None)
98
+ self._sources.pop(name, None)
99
+
100
+ def keys_list(self) -> list[str]:
101
+ return list(self._data.keys())
@@ -0,0 +1,26 @@
1
+ # horsies/core/scheduler/__init__.py
2
+ """
3
+ Scheduler module for executing scheduled tasks.
4
+
5
+ Main components:
6
+ - Scheduler: Main service for running scheduled tasks
7
+ - ScheduleStateManager: Database state management
8
+ - calculate_next_run: Next run time calculation
9
+
10
+ Example usage:
11
+ from horsies.core.scheduler import Scheduler
12
+
13
+ scheduler = Scheduler(app)
14
+ await scheduler.run_forever()
15
+ """
16
+
17
+ from horsies.core.scheduler.service import Scheduler
18
+ from horsies.core.scheduler.state import ScheduleStateManager
19
+ from horsies.core.scheduler.calculator import calculate_next_run, should_run_now
20
+
21
+ __all__ = [
22
+ 'Scheduler',
23
+ 'ScheduleStateManager',
24
+ 'calculate_next_run',
25
+ 'should_run_now',
26
+ ]
@@ -0,0 +1,267 @@
1
+ # horsies/core/scheduler/calculator.py
2
+ from __future__ import annotations
3
+ from datetime import datetime, timedelta, timezone
4
+ from zoneinfo import ZoneInfo
5
+ from typing import Optional
6
+ from horsies.core.models.schedule import (
7
+ SchedulePattern,
8
+ IntervalSchedule,
9
+ HourlySchedule,
10
+ DailySchedule,
11
+ WeeklySchedule,
12
+ MonthlySchedule,
13
+ Weekday,
14
+ )
15
+
16
+
17
+ def calculate_next_run(
18
+ pattern: SchedulePattern, from_time: datetime, tz_str: str = 'UTC'
19
+ ) -> datetime:
20
+ """
21
+ Calculate the next run time for a schedule pattern.
22
+
23
+ Args:
24
+ pattern: Schedule pattern (interval, hourly, daily, weekly, monthly)
25
+ from_time: Calculate next run after this time (should be UTC-aware)
26
+ tz_str: Timezone for schedule evaluation (e.g., "UTC", "America/New_York")
27
+
28
+ Returns:
29
+ Next run time as UTC-aware datetime
30
+
31
+ Raises:
32
+ ValueError: If timezone is invalid or pattern type is unknown
33
+ """
34
+ # Ensure from_time is UTC-aware
35
+ if from_time.tzinfo is None:
36
+ raise ValueError('from_time must be timezone-aware')
37
+
38
+ # Validate timezone string
39
+ try:
40
+ tz = ZoneInfo(tz_str)
41
+ except Exception as e:
42
+ raise ValueError(f"Invalid timezone '{tz_str}': {e}")
43
+
44
+ # Convert from_time to target timezone for schedule calculations
45
+ local_time = from_time.astimezone(tz)
46
+
47
+ # Calculate next run based on pattern type (exhaustive match-case)
48
+ match pattern:
49
+ case IntervalSchedule():
50
+ next_run = _calculate_interval(pattern, from_time)
51
+ case HourlySchedule():
52
+ next_run = _calculate_hourly(pattern, local_time, tz)
53
+ case DailySchedule():
54
+ next_run = _calculate_daily(pattern, local_time, tz)
55
+ case WeeklySchedule():
56
+ next_run = _calculate_weekly(pattern, local_time, tz)
57
+ case MonthlySchedule():
58
+ next_run = _calculate_monthly(pattern, local_time, tz)
59
+
60
+ # Ensure result is UTC-aware
61
+ if next_run.tzinfo is None:
62
+ raise RuntimeError('Calculated next_run is not timezone-aware')
63
+
64
+ return next_run.astimezone(timezone.utc)
65
+
66
+
67
+ def _calculate_interval(pattern: IntervalSchedule, from_time: datetime) -> datetime:
68
+ """Calculate next run for interval-based schedule."""
69
+ total_seconds = pattern.total_seconds()
70
+ next_run = from_time + timedelta(seconds=total_seconds)
71
+ return next_run
72
+
73
+
74
+ def _calculate_hourly(
75
+ pattern: HourlySchedule, local_time: datetime, tz: ZoneInfo
76
+ ) -> datetime:
77
+ """Calculate next run for hourly schedule."""
78
+ # Start with current hour at the target minute/second
79
+ candidate = local_time.replace(
80
+ minute=pattern.minute, second=pattern.second, microsecond=0
81
+ )
82
+
83
+ # If we've already passed this time in the current hour, move to next hour
84
+ if candidate <= local_time:
85
+ candidate = candidate + timedelta(hours=1)
86
+
87
+ return candidate
88
+
89
+
90
+ def _calculate_daily(
91
+ pattern: DailySchedule, local_time: datetime, tz: ZoneInfo
92
+ ) -> datetime:
93
+ """Calculate next run for daily schedule, tolerating DST transitions."""
94
+ for day_offset in (0, 1, 2): # try today, tomorrow, day after (in case of DST gaps)
95
+ candidate_date = local_time + timedelta(days=day_offset)
96
+ try:
97
+ candidate = candidate_date.replace(
98
+ hour=pattern.time.hour,
99
+ minute=pattern.time.minute,
100
+ second=pattern.time.second,
101
+ microsecond=0,
102
+ )
103
+ except Exception:
104
+ continue # invalid local time (e.g., DST gap), try next day
105
+ if candidate <= local_time:
106
+ continue
107
+ return candidate
108
+ # Fallback: roll forward one more day if all attempts failed
109
+ return (local_time + timedelta(days=1)).replace(
110
+ hour=pattern.time.hour,
111
+ minute=pattern.time.minute,
112
+ second=pattern.time.second,
113
+ microsecond=0,
114
+ )
115
+
116
+
117
+ def _calculate_weekly(
118
+ pattern: WeeklySchedule, local_time: datetime, tz: ZoneInfo
119
+ ) -> datetime:
120
+ """Calculate next run for weekly schedule, tolerating DST transitions."""
121
+ # Map Weekday enum to Python weekday() values (0=Monday, 6=Sunday)
122
+ weekday_map = {
123
+ Weekday.MONDAY: 0,
124
+ Weekday.TUESDAY: 1,
125
+ Weekday.WEDNESDAY: 2,
126
+ Weekday.THURSDAY: 3,
127
+ Weekday.FRIDAY: 4,
128
+ Weekday.SATURDAY: 5,
129
+ Weekday.SUNDAY: 6,
130
+ }
131
+
132
+ target_weekdays = sorted([weekday_map[d] for d in pattern.days])
133
+ current_weekday = local_time.weekday()
134
+
135
+ # Start with today at the target time
136
+ try:
137
+ candidate = local_time.replace(
138
+ hour=pattern.time.hour,
139
+ minute=pattern.time.minute,
140
+ second=pattern.time.second,
141
+ microsecond=0,
142
+ )
143
+ except Exception:
144
+ candidate = local_time
145
+
146
+ # Find next matching weekday
147
+ if current_weekday in target_weekdays and candidate > local_time:
148
+ # Today matches and time hasn't passed yet
149
+ return candidate
150
+
151
+ # Find next target weekday
152
+ days_ahead = None
153
+ for target_day in target_weekdays:
154
+ if target_day > current_weekday:
155
+ days_ahead = target_day - current_weekday
156
+ break
157
+
158
+ # If no future day this week, wrap to first day next week
159
+ if days_ahead is None:
160
+ days_ahead = (7 - current_weekday) + target_weekdays[0]
161
+
162
+ candidate = candidate + timedelta(days=days_ahead)
163
+
164
+ # If the target time is invalid/ambiguous (DST), retry on the computed day by rebuilding datetime
165
+ for _ in range(2):
166
+ try:
167
+ adjusted = candidate.replace(
168
+ hour=pattern.time.hour,
169
+ minute=pattern.time.minute,
170
+ second=pattern.time.second,
171
+ microsecond=0,
172
+ )
173
+ return adjusted
174
+ except Exception:
175
+ candidate = candidate + timedelta(days=1)
176
+ return candidate
177
+
178
+
179
+ def _calculate_monthly(
180
+ pattern: MonthlySchedule, local_time: datetime, tz: ZoneInfo
181
+ ) -> datetime:
182
+ """Calculate next run for monthly schedule, tolerating missing days and DST."""
183
+ # Start with current month at the target day and time
184
+ try:
185
+ candidate = local_time.replace(
186
+ day=pattern.day,
187
+ hour=pattern.time.hour,
188
+ minute=pattern.time.minute,
189
+ second=pattern.time.second,
190
+ microsecond=0,
191
+ )
192
+ except ValueError:
193
+ # Day doesn't exist in current month (e.g., day=31 in February)
194
+ # Skip to next month
195
+ candidate = _next_valid_monthly_date(local_time, pattern, tz)
196
+ return candidate
197
+ except Exception:
198
+ # DST-related invalid time: rebuild on the same date ignoring current time component
199
+ candidate = _next_valid_monthly_date(
200
+ local_time - timedelta(days=1), pattern, tz
201
+ )
202
+ return candidate
203
+
204
+ # If we've already passed this time this month, move to next month
205
+ if candidate <= local_time:
206
+ candidate = _next_valid_monthly_date(local_time, pattern, tz)
207
+
208
+ return candidate
209
+
210
+
211
+ def _next_valid_monthly_date(
212
+ local_time: datetime, pattern: MonthlySchedule, tz: ZoneInfo
213
+ ) -> datetime:
214
+ """Find next valid monthly date, skipping months where day doesn't exist."""
215
+ # Start with next month
216
+ if local_time.month == 12:
217
+ candidate_year = local_time.year + 1
218
+ candidate_month = 1
219
+ else:
220
+ candidate_year = local_time.year
221
+ candidate_month = local_time.month + 1
222
+
223
+ # Try up to 12 months ahead to find valid date
224
+ for _ in range(12):
225
+ try:
226
+ candidate = datetime(
227
+ year=candidate_year,
228
+ month=candidate_month,
229
+ day=pattern.day,
230
+ hour=pattern.time.hour,
231
+ minute=pattern.time.minute,
232
+ second=pattern.time.second,
233
+ microsecond=0,
234
+ tzinfo=tz,
235
+ )
236
+ return candidate
237
+ except ValueError:
238
+ # Day doesn't exist in this month, try next month
239
+ if candidate_month == 12:
240
+ candidate_year += 1
241
+ candidate_month = 1
242
+ else:
243
+ candidate_month += 1
244
+
245
+ # Should never reach here unless pattern.day is invalid (>31)
246
+ raise ValueError(
247
+ f'Could not find valid date for day={pattern.day} within 12 months'
248
+ )
249
+
250
+
251
+ def should_run_now(next_run_at: Optional[datetime], check_time: datetime) -> bool:
252
+ """
253
+ Determine if a schedule should run at the current check time.
254
+
255
+ Args:
256
+ next_run_at: Scheduled next run time (UTC-aware)
257
+ check_time: Current time to check against (UTC-aware)
258
+
259
+ Returns:
260
+ True if schedule should run now
261
+ """
262
+ if next_run_at is None:
263
+ # First run - should execute
264
+ return True
265
+
266
+ # Run if next_run_at is at or before current time
267
+ return next_run_at <= check_time