quebec 0.2.1__cp39-abi3-win32.whl → 0.2.3__cp39-abi3-win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quebec/__init__.py CHANGED
@@ -1,5 +1,6 @@
1
- from .quebec import * # NOQA
1
+ from .quebec import * # NOQA
2
2
  from . import quebec
3
+ from . import sqlalchemy # NOQA
3
4
  from .quebec import Quebec, ActiveJob
4
5
  import logging
5
6
  import time
@@ -30,8 +31,8 @@ class JobBuilder:
30
31
 
31
32
  def _calculate_scheduled_at(self) -> Optional[datetime]:
32
33
  """Calculate scheduled_at from wait or wait_until options."""
33
- wait = self.options.get('wait')
34
- wait_until = self.options.get('wait_until')
34
+ wait = self.options.get("wait")
35
+ wait_until = self.options.get("wait_until")
35
36
 
36
37
  if wait_until is not None:
37
38
  if isinstance(wait_until, datetime):
@@ -53,19 +54,19 @@ class JobBuilder:
53
54
 
54
55
  return None
55
56
 
56
- def perform_later(self, qc: 'Quebec', *args, **kwargs) -> 'ActiveJob':
57
+ def perform_later(self, qc: "Quebec", *args, **kwargs) -> "ActiveJob":
57
58
  """Enqueue the job with configured options."""
58
59
  scheduled_at = self._calculate_scheduled_at()
59
60
 
60
61
  # Pass internal options via kwargs (will be filtered out before serialization)
61
62
  if scheduled_at is not None:
62
- kwargs['_scheduled_at'] = scheduled_at.timestamp()
63
+ kwargs["_scheduled_at"] = scheduled_at.timestamp()
63
64
 
64
- if 'queue' in self.options:
65
- kwargs['_queue'] = self.options['queue']
65
+ if "queue" in self.options:
66
+ kwargs["_queue"] = self.options["queue"]
66
67
 
67
- if 'priority' in self.options:
68
- kwargs['_priority'] = self.options['priority']
68
+ if "priority" in self.options:
69
+ kwargs["_priority"] = self.options["priority"]
69
70
 
70
71
  # Call the original perform_later
71
72
  return self.job_class.perform_later(qc, *args, **kwargs)
@@ -73,18 +74,22 @@ class JobBuilder:
73
74
 
74
75
  class NoNewOverrideMeta(type):
75
76
  def __new__(cls, name, bases, dct):
76
- if '__new__' in dct:
77
+ if "__new__" in dct:
77
78
  raise TypeError(f"Overriding __new__ is not allowed in class {name}")
78
- if '__init__' in dct:
79
+ if "__init__" in dct:
79
80
  raise TypeError(f"Overriding __init__ is not allowed in class {name}")
80
81
  return super().__new__(cls, name, bases, dct)
81
82
 
83
+
82
84
  class BaseClass(ActiveJob, metaclass=NoNewOverrideMeta):
83
85
  @classmethod
84
- def set(cls, wait: Union[int, float, timedelta] = None,
85
- wait_until: datetime = None,
86
- queue: str = None,
87
- priority: int = None) -> JobBuilder:
86
+ def set(
87
+ cls,
88
+ wait: Union[int, float, timedelta] = None,
89
+ wait_until: datetime = None,
90
+ queue: str = None,
91
+ priority: int = None,
92
+ ) -> JobBuilder:
88
93
  """Configure job options before enqueueing.
89
94
 
90
95
  Args:
@@ -103,13 +108,13 @@ class BaseClass(ActiveJob, metaclass=NoNewOverrideMeta):
103
108
  """
104
109
  options = {}
105
110
  if wait is not None:
106
- options['wait'] = wait
111
+ options["wait"] = wait
107
112
  if wait_until is not None:
108
- options['wait_until'] = wait_until
113
+ options["wait_until"] = wait_until
109
114
  if queue is not None:
110
- options['queue'] = queue
115
+ options["queue"] = queue
111
116
  if priority is not None:
112
- options['priority'] = priority
117
+ options["priority"] = priority
113
118
  return JobBuilder(cls, **options)
114
119
 
115
120
 
@@ -142,7 +147,9 @@ class ThreadedRunner:
142
147
  except (queue.ShutDown, KeyboardInterrupt):
143
148
  break
144
149
  except Exception as e:
145
- logger.error(f"Unexpected exception in ThreadedRunner: {e}", exc_info=True)
150
+ logger.error(
151
+ f"Unexpected exception in ThreadedRunner: {e}", exc_info=True
152
+ )
146
153
  finally:
147
154
  self.cleanup()
148
155
 
@@ -151,7 +158,7 @@ class ThreadedRunner:
151
158
  def cleanup(self):
152
159
  """Cleanup after job execution"""
153
160
  try:
154
- if self.execution and hasattr(self.execution, 'cleanup'):
161
+ if self.execution and hasattr(self.execution, "cleanup"):
155
162
  self.execution.cleanup()
156
163
  except Exception as e:
157
164
  logger.error(f"Error in cleanup: {e}", exc_info=True)
@@ -197,11 +204,11 @@ def _quebec_start(
197
204
  self.spawn_all()
198
205
  else:
199
206
  for component in spawn:
200
- if component == 'worker':
207
+ if component == "worker":
201
208
  self.spawn_job_claim_poller()
202
- elif component == 'dispatcher':
209
+ elif component == "dispatcher":
203
210
  self.spawn_dispatcher()
204
- elif component == 'scheduler':
211
+ elif component == "scheduler":
205
212
  self.spawn_scheduler()
206
213
  else:
207
214
  raise ValueError(f"Unknown component: {component}")
@@ -225,15 +232,15 @@ def _quebec_start(
225
232
  # Start worker threads as daemon so program can exit after start()
226
233
  worker_threads = []
227
234
  for i in range(threads):
228
- t = threading.Thread(target=run_worker, name=f'quebec-worker-{i}', daemon=True)
235
+ t = threading.Thread(target=run_worker, name=f"quebec-worker-{i}", daemon=True)
229
236
  t.start()
230
237
  worker_threads.append(t)
231
238
 
232
239
  # Store state by instance id
233
240
  _quebec_state[id(self)] = {
234
- 'shutdown_event': shutdown_event,
235
- 'job_queue': job_queue,
236
- 'worker_threads': worker_threads,
241
+ "shutdown_event": shutdown_event,
242
+ "job_queue": job_queue,
243
+ "worker_threads": worker_threads,
237
244
  }
238
245
 
239
246
  return self # Enable chaining: qc.start().wait()
@@ -254,14 +261,14 @@ def _quebec_wait(self):
254
261
  raise RuntimeError("Quebec not started. Call start() first.")
255
262
 
256
263
  try:
257
- while not state['shutdown_event'].is_set():
264
+ while not state["shutdown_event"].is_set():
258
265
  time.sleep(0.5)
259
266
  except KeyboardInterrupt:
260
- logger.debug('KeyboardInterrupt, shutting down...')
267
+ logger.debug("KeyboardInterrupt, shutting down...")
261
268
  self.graceful_shutdown()
262
269
  finally:
263
270
  # Wait for worker threads to finish
264
- for t in state['worker_threads']:
271
+ for t in state["worker_threads"]:
265
272
  t.join(timeout=5.0)
266
273
  _quebec_state.pop(id(self), None)
267
274
 
quebec/logger.py CHANGED
@@ -3,8 +3,13 @@ import contextvars
3
3
  from datetime import datetime, timezone
4
4
  from typing import Optional
5
5
 
6
- job_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar("job_id", default=None)
7
- queue_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar("queue", default=None)
6
+ job_id_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
7
+ "job_id", default=None
8
+ )
9
+ queue_var: contextvars.ContextVar[Optional[str]] = contextvars.ContextVar(
10
+ "queue", default=None
11
+ )
12
+
8
13
 
9
14
  class ContextFilter(logging.Filter):
10
15
  def filter(self, record: logging.LogRecord) -> bool:
@@ -16,17 +21,18 @@ class ContextFilter(logging.Filter):
16
21
  record.queue = None
17
22
  return True
18
23
 
24
+
19
25
  class QuebecFormatter(logging.Formatter):
20
26
  def formatTime(self, record, datefmt=None):
21
27
  dt = datetime.fromtimestamp(record.created, tz=timezone.utc)
22
- return dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
28
+ return dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
23
29
 
24
30
  def format(self, record: logging.LogRecord) -> str:
25
31
  record.asctime = self.formatTime(record)
26
32
  record.message = record.getMessage()
27
- jid = getattr(record, 'job_id', None)
28
- queue = getattr(record, 'queue', None)
29
- if jid not in (None, '', '-'):
33
+ jid = getattr(record, "job_id", None)
34
+ queue = getattr(record, "queue", None)
35
+ if jid not in (None, "", "-"):
30
36
  ctx = f' {{queue="{queue}" jid="{jid}" tid="{record.thread}"}}:'
31
37
  else:
32
38
  ctx = ""
@@ -45,3 +51,188 @@ def setup_logging(level: int = logging.INFO, *, replace_root: bool = True) -> No
45
51
  handler.setFormatter(QuebecFormatter())
46
52
  handler.addFilter(ContextFilter())
47
53
  root.addHandler(handler)
54
+
55
+
56
+ def _add_job_id(logger, method_name, event_dict):
57
+ """Processor to add job_id from contextvars."""
58
+ jid = job_id_var.get(None)
59
+ if jid:
60
+ event_dict["jid"] = jid
61
+ return event_dict
62
+
63
+
64
+ def _rename_event_to_message(logger, method_name, event_dict):
65
+ """Rename 'event' to 'message' for Rust tracing-logfmt compatibility."""
66
+ if "event" in event_dict:
67
+ event_dict["message"] = event_dict.pop("event")
68
+ return event_dict
69
+
70
+
71
+ # Module-level flag to prevent duplicate warnings
72
+ _styles_warning_shown = False
73
+
74
+
75
+ class TracingConsoleRenderer:
76
+ """Renderer matching Rust tracing console format.
77
+
78
+ Format: 2026-01-20T14:22:53Z INFO target: lineno: message key=value
79
+ """
80
+
81
+ def __init__(self, colors: bool = True):
82
+ import os
83
+ import sys
84
+
85
+ self._use_colors = os.environ.get("QUEBEC_COLOR") == "always" or (
86
+ os.environ.get("QUEBEC_COLOR") != "never" and colors and sys.stdout.isatty()
87
+ )
88
+ self._styles = self._load_styles() if self._use_colors else None
89
+
90
+ def _load_styles(self) -> Optional[dict]:
91
+ """Load styles from structlog's ConsoleRenderer.
92
+
93
+ Note: Uses private attributes (_styles, _level_styles) which may change
94
+ in future structlog versions. Falls back to plain output if unavailable.
95
+ """
96
+ try:
97
+ import structlog
98
+
99
+ cr = structlog.dev.ConsoleRenderer(colors=True)
100
+ return {
101
+ "reset": cr._styles.reset,
102
+ "dim": cr._styles.timestamp,
103
+ "bright": cr._styles.bright,
104
+ "levels": cr._level_styles,
105
+ }
106
+ except (AttributeError, Exception) as e:
107
+ global _styles_warning_shown
108
+ if not _styles_warning_shown:
109
+ import sys
110
+
111
+ print(
112
+ f"quebec: failed to load structlog color styles ({e}), falling back to plain output",
113
+ file=sys.stderr,
114
+ )
115
+ _styles_warning_shown = True
116
+ return None
117
+
118
+ def __call__(self, logger, method_name, event_dict):
119
+ timestamp = event_dict.pop("timestamp", "")
120
+ level = event_dict.pop("level", "info").upper()
121
+ event = event_dict.pop("event", "")
122
+ lineno = event_dict.pop("lineno", "")
123
+ target = event_dict.pop("target", "quebec")
124
+
125
+ extra = self._format_kv(event_dict)
126
+ lineno_part = f"{lineno}: " if lineno else ""
127
+
128
+ if self._styles:
129
+ s = self._styles
130
+ ts = f"{s['dim']}{timestamp}{s['reset']}"
131
+ lvl_style = s["levels"].get(method_name, "")
132
+ lvl = f"{lvl_style}{level:>5}{s['reset']}"
133
+ return f"{ts} {lvl} {target}: {lineno_part}{event}{extra}"
134
+
135
+ return f"{timestamp} {level:>5} {target}: {lineno_part}{event}{extra}"
136
+
137
+ @staticmethod
138
+ def _format_kv(event_dict: dict) -> str:
139
+ """Format key-value pairs for console output.
140
+
141
+ Escapes quotes and handles values with whitespace per logfmt conventions.
142
+ """
143
+ if not event_dict:
144
+ return ""
145
+ parts = []
146
+ for k, v in event_dict.items():
147
+ if isinstance(v, str):
148
+ # Escape internal quotes and wrap in quotes if contains whitespace/quotes
149
+ escaped = v.replace("\\", "\\\\").replace('"', '\\"')
150
+ parts.append(f'{k}="{escaped}"')
151
+ else:
152
+ parts.append(f"{k}={v}")
153
+ return " " + " ".join(parts)
154
+
155
+
156
+ def setup_structlog(level: int = logging.INFO, *, format: str | None = None) -> None:
157
+ """Configure structlog with job_id context support.
158
+
159
+ Args:
160
+ level: Logging level (default INFO)
161
+ format: Output format - "console" (colored), "json", or "logfmt".
162
+ Defaults to QUEBEC_LOG_FORMAT env var, or "console" if not set.
163
+
164
+ Example:
165
+ from quebec.logger import setup_structlog, get_structlog
166
+ setup_structlog(level=logging.DEBUG)
167
+ setup_structlog(format="logfmt") # logfmt output
168
+ setup_structlog(format="json") # JSON output
169
+ log = get_structlog()
170
+ log.info("job started", queue="default")
171
+ """
172
+ import os
173
+
174
+ if format is None:
175
+ format = os.environ.get("QUEBEC_LOG_FORMAT", "console")
176
+ try:
177
+ import structlog
178
+ except ImportError:
179
+ raise ImportError("structlog is required. Install with: pip install structlog")
180
+
181
+ shared_processors = [
182
+ structlog.contextvars.merge_contextvars,
183
+ _add_job_id,
184
+ structlog.processors.add_log_level,
185
+ structlog.processors.CallsiteParameterAdder(
186
+ [
187
+ structlog.processors.CallsiteParameter.LINENO,
188
+ ]
189
+ ),
190
+ ]
191
+
192
+ if format == "json":
193
+ processors = shared_processors + [
194
+ structlog.processors.TimeStamper(fmt="iso"),
195
+ structlog.processors.JSONRenderer(),
196
+ ]
197
+ elif format == "logfmt":
198
+ # Use ts=/message= to match Rust tracing-logfmt output
199
+ processors = shared_processors + [
200
+ structlog.processors.TimeStamper(fmt="iso", key="ts"),
201
+ _rename_event_to_message,
202
+ structlog.processors.LogfmtRenderer(
203
+ key_order=["ts", "level", "message"],
204
+ sort_keys=True,
205
+ ),
206
+ ]
207
+ else: # console
208
+ processors = shared_processors + [
209
+ structlog.processors.TimeStamper(fmt="iso"),
210
+ TracingConsoleRenderer(colors=True),
211
+ ]
212
+
213
+ structlog.configure(
214
+ processors=processors,
215
+ wrapper_class=structlog.make_filtering_bound_logger(level),
216
+ context_class=dict,
217
+ logger_factory=structlog.PrintLoggerFactory(),
218
+ cache_logger_on_first_use=True,
219
+ )
220
+
221
+
222
+ def get_structlog(name: Optional[str] = None):
223
+ """Get a structlog logger instance.
224
+
225
+ Example:
226
+ log = get_structlog(__name__)
227
+ log.info("processing", job_id=123, queue="default")
228
+ """
229
+ try:
230
+ import structlog
231
+ except ImportError:
232
+ raise ImportError("structlog is required. Install with: pip install structlog")
233
+
234
+ # Bind the logger name as 'target' to match Rust tracing output
235
+ logger = structlog.get_logger()
236
+ if name:
237
+ logger = logger.bind(target=name)
238
+ return logger
quebec/quebec.pyd CHANGED
Binary file
quebec/sqlalchemy.py ADDED
@@ -0,0 +1,408 @@
1
+ """SQLAlchemy models for Quebec database tables.
2
+
3
+ These models mirror the Rust SeaORM entities and can be used for querying
4
+ the Quebec database directly from Python using SQLAlchemy.
5
+
6
+ Example usage:
7
+ from sqlalchemy import create_engine
8
+ from sqlalchemy.orm import Session
9
+ from quebec.models import Job, ReadyExecution
10
+
11
+ engine = create_engine("postgresql://localhost/quebec")
12
+ with Session(engine) as session:
13
+ # Query ready jobs with their job details
14
+ ready = session.query(ReadyExecution).join(Job).filter(
15
+ ReadyExecution.queue_name == "default"
16
+ ).all()
17
+
18
+ for r in ready:
19
+ print(f"Job {r.job.class_name} ready in queue {r.queue_name}")
20
+ """
21
+
22
+ from datetime import datetime
23
+ from typing import Optional
24
+
25
+ from sqlalchemy import (
26
+ BigInteger,
27
+ Boolean,
28
+ DateTime,
29
+ ForeignKey,
30
+ Index,
31
+ Integer,
32
+ String,
33
+ Text,
34
+ text,
35
+ )
36
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship
37
+
38
+
39
+ class Base(DeclarativeBase):
40
+ """Base class for all Quebec models."""
41
+
42
+ pass
43
+
44
+
45
+ class Job(Base):
46
+ """Main job table containing job definitions and state.
47
+
48
+ Relationships:
49
+ - ready_execution: One-to-one with ReadyExecution
50
+ - claimed_execution: One-to-one with ClaimedExecution
51
+ - blocked_execution: One-to-one with BlockedExecution
52
+ - scheduled_execution: One-to-one with ScheduledExecution
53
+ - failed_execution: One-to-one with FailedExecution
54
+ - recurring_execution: One-to-one with RecurringExecution
55
+ """
56
+
57
+ __tablename__ = "solid_queue_jobs"
58
+
59
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
60
+ queue_name: Mapped[str] = mapped_column(String(255), nullable=False)
61
+ class_name: Mapped[str] = mapped_column(String(255), nullable=False)
62
+ arguments: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
63
+ priority: Mapped[int] = mapped_column(
64
+ Integer, nullable=False, server_default=text("0")
65
+ )
66
+ failed_attempts: Mapped[int] = mapped_column(
67
+ Integer, nullable=False, server_default=text("0")
68
+ )
69
+ active_job_id: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
70
+ scheduled_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True)
71
+ finished_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True)
72
+ concurrency_key: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
73
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
74
+ updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
75
+
76
+ # Relationships
77
+ ready_execution: Mapped[Optional["ReadyExecution"]] = relationship(
78
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
79
+ )
80
+ claimed_execution: Mapped[Optional["ClaimedExecution"]] = relationship(
81
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
82
+ )
83
+ blocked_execution: Mapped[Optional["BlockedExecution"]] = relationship(
84
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
85
+ )
86
+ scheduled_execution: Mapped[Optional["ScheduledExecution"]] = relationship(
87
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
88
+ )
89
+ failed_execution: Mapped[Optional["FailedExecution"]] = relationship(
90
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
91
+ )
92
+ recurring_execution: Mapped[Optional["RecurringExecution"]] = relationship(
93
+ back_populates="job", uselist=False, cascade="all, delete-orphan"
94
+ )
95
+
96
+ __table_args__ = (
97
+ Index("idx_solid_queue_jobs_queue_priority", "queue_name", "priority"),
98
+ Index("idx_solid_queue_jobs_class_name", "class_name"),
99
+ Index("idx_solid_queue_jobs_finished_at", "finished_at"),
100
+ )
101
+
102
+ def __repr__(self) -> str:
103
+ return f"<Job(id={self.id}, class_name={self.class_name!r}, queue={self.queue_name!r})>"
104
+
105
+
106
+ class ReadyExecution(Base):
107
+ """Jobs that are ready to be executed.
108
+
109
+ Jobs move here when they are enqueued and ready for immediate execution.
110
+ Workers claim jobs from this table using FOR UPDATE SKIP LOCKED.
111
+ """
112
+
113
+ __tablename__ = "solid_queue_ready_executions"
114
+
115
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
116
+ job_id: Mapped[int] = mapped_column(
117
+ BigInteger,
118
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
119
+ unique=True,
120
+ nullable=False,
121
+ )
122
+ queue_name: Mapped[str] = mapped_column(String(255), nullable=False)
123
+ priority: Mapped[int] = mapped_column(
124
+ Integer, nullable=False, server_default=text("0")
125
+ )
126
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
127
+
128
+ job: Mapped["Job"] = relationship(back_populates="ready_execution")
129
+
130
+ __table_args__ = (
131
+ Index(
132
+ "idx_solid_queue_ready_executions_queue_priority",
133
+ "queue_name",
134
+ "priority",
135
+ ),
136
+ )
137
+
138
+ def __repr__(self) -> str:
139
+ return f"<ReadyExecution(id={self.id}, job_id={self.job_id}, queue={self.queue_name!r})>"
140
+
141
+
142
+ class ClaimedExecution(Base):
143
+ """Jobs that are currently being executed by a worker.
144
+
145
+ When a worker claims a job, it moves from ready_executions to here.
146
+ The process_id links to the worker process that claimed the job.
147
+ """
148
+
149
+ __tablename__ = "solid_queue_claimed_executions"
150
+
151
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
152
+ job_id: Mapped[int] = mapped_column(
153
+ BigInteger,
154
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
155
+ unique=True,
156
+ nullable=False,
157
+ )
158
+ process_id: Mapped[Optional[int]] = mapped_column(BigInteger, nullable=True)
159
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
160
+
161
+ job: Mapped["Job"] = relationship(back_populates="claimed_execution")
162
+
163
+ def __repr__(self) -> str:
164
+ return f"<ClaimedExecution(id={self.id}, job_id={self.job_id}, process_id={self.process_id})>"
165
+
166
+
167
+ class BlockedExecution(Base):
168
+ """Jobs that are blocked due to concurrency limits.
169
+
170
+ When a job has a concurrency_key and the semaphore limit is reached,
171
+ the job is placed here until a slot becomes available.
172
+ """
173
+
174
+ __tablename__ = "solid_queue_blocked_executions"
175
+
176
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
177
+ job_id: Mapped[int] = mapped_column(
178
+ BigInteger,
179
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
180
+ unique=True,
181
+ nullable=False,
182
+ )
183
+ queue_name: Mapped[str] = mapped_column(String(255), nullable=False)
184
+ priority: Mapped[int] = mapped_column(
185
+ Integer, nullable=False, server_default=text("0")
186
+ )
187
+ concurrency_key: Mapped[str] = mapped_column(String(255), nullable=False)
188
+ expires_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
189
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
190
+
191
+ job: Mapped["Job"] = relationship(back_populates="blocked_execution")
192
+
193
+ __table_args__ = (
194
+ Index(
195
+ "idx_solid_queue_blocked_executions_concurrency_key",
196
+ "concurrency_key",
197
+ ),
198
+ Index("idx_solid_queue_blocked_executions_expires_at", "expires_at"),
199
+ )
200
+
201
+ def __repr__(self) -> str:
202
+ return f"<BlockedExecution(id={self.id}, job_id={self.job_id}, key={self.concurrency_key!r})>"
203
+
204
+
205
+ class ScheduledExecution(Base):
206
+ """Jobs that are scheduled for future execution.
207
+
208
+ Jobs with a scheduled_at time in the future are placed here.
209
+ The dispatcher moves them to ready_executions when the time comes.
210
+ """
211
+
212
+ __tablename__ = "solid_queue_scheduled_executions"
213
+
214
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
215
+ job_id: Mapped[int] = mapped_column(
216
+ BigInteger,
217
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
218
+ unique=True,
219
+ nullable=False,
220
+ )
221
+ queue_name: Mapped[str] = mapped_column(String(255), nullable=False)
222
+ priority: Mapped[int] = mapped_column(
223
+ Integer, nullable=False, server_default=text("0")
224
+ )
225
+ scheduled_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
226
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
227
+
228
+ job: Mapped["Job"] = relationship(back_populates="scheduled_execution")
229
+
230
+ __table_args__ = (
231
+ Index("idx_solid_queue_scheduled_executions_scheduled_at", "scheduled_at"),
232
+ )
233
+
234
+ def __repr__(self) -> str:
235
+ return f"<ScheduledExecution(id={self.id}, job_id={self.job_id}, scheduled_at={self.scheduled_at})>"
236
+
237
+
238
+ class FailedExecution(Base):
239
+ """Jobs that have failed execution.
240
+
241
+ When a job fails and exhausts retries (or has no retry strategy),
242
+ it is moved here with the error message.
243
+ """
244
+
245
+ __tablename__ = "solid_queue_failed_executions"
246
+
247
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
248
+ job_id: Mapped[int] = mapped_column(
249
+ BigInteger,
250
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
251
+ unique=True,
252
+ nullable=False,
253
+ )
254
+ error: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
255
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
256
+
257
+ job: Mapped["Job"] = relationship(back_populates="failed_execution")
258
+
259
+ def __repr__(self) -> str:
260
+ return f"<FailedExecution(id={self.id}, job_id={self.job_id})>"
261
+
262
+
263
+ class RecurringExecution(Base):
264
+ """Tracks which recurring task occurrences have been executed.
265
+
266
+ Used to prevent duplicate execution of recurring jobs when
267
+ multiple scheduler instances are running.
268
+ """
269
+
270
+ __tablename__ = "solid_queue_recurring_executions"
271
+
272
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
273
+ job_id: Mapped[int] = mapped_column(
274
+ BigInteger,
275
+ ForeignKey("solid_queue_jobs.id", ondelete="CASCADE"),
276
+ unique=True,
277
+ nullable=False,
278
+ )
279
+ task_key: Mapped[str] = mapped_column(String(255), nullable=False)
280
+ run_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
281
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
282
+
283
+ job: Mapped["Job"] = relationship(back_populates="recurring_execution")
284
+
285
+ __table_args__ = (
286
+ Index(
287
+ "index_solid_queue_recurring_executions_on_task_key_and_run_at",
288
+ "task_key",
289
+ "run_at",
290
+ unique=True,
291
+ ),
292
+ )
293
+
294
+ def __repr__(self) -> str:
295
+ return f"<RecurringExecution(id={self.id}, task_key={self.task_key!r}, run_at={self.run_at})>"
296
+
297
+
298
+ class RecurringTask(Base):
299
+ """Recurring task definitions loaded from schedule configuration.
300
+
301
+ Defines cron-like schedules for periodic job execution.
302
+ """
303
+
304
+ __tablename__ = "solid_queue_recurring_tasks"
305
+
306
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
307
+ key: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
308
+ schedule: Mapped[str] = mapped_column(String(255), nullable=False)
309
+ command: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
310
+ class_name: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
311
+ arguments: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
312
+ queue_name: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
313
+ priority: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
314
+ static: Mapped[bool] = mapped_column(
315
+ Boolean, nullable=False, server_default=text("false")
316
+ )
317
+ description: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
318
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
319
+ updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
320
+
321
+ def __repr__(self) -> str:
322
+ return f"<RecurringTask(id={self.id}, key={self.key!r}, schedule={self.schedule!r})>"
323
+
324
+
325
+ class Process(Base):
326
+ """Worker, dispatcher, and scheduler process registration.
327
+
328
+ Each running process registers itself here and updates its heartbeat
329
+ periodically. Used for process monitoring and orphan detection.
330
+ """
331
+
332
+ __tablename__ = "solid_queue_processes"
333
+
334
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
335
+ kind: Mapped[str] = mapped_column(String(255), nullable=False)
336
+ last_heartbeat_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
337
+ supervisor_id: Mapped[Optional[int]] = mapped_column(BigInteger, nullable=True)
338
+ pid: Mapped[int] = mapped_column(Integer, nullable=False)
339
+ hostname: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
340
+ metadata_: Mapped[Optional[str]] = mapped_column("metadata", Text, nullable=True)
341
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
342
+ name: Mapped[str] = mapped_column(String(255), nullable=False)
343
+
344
+ __table_args__ = (
345
+ Index("idx_solid_queue_processes_kind", "kind"),
346
+ Index("idx_solid_queue_processes_last_heartbeat", "last_heartbeat_at"),
347
+ )
348
+
349
+ def __repr__(self) -> str:
350
+ return f"<Process(id={self.id}, kind={self.kind!r}, pid={self.pid}, hostname={self.hostname!r})>"
351
+
352
+
353
+ class Semaphore(Base):
354
+ """Concurrency semaphores for limiting parallel job execution.
355
+
356
+ Each unique concurrency_key has a semaphore with a value (available slots)
357
+ and a limit. When value reaches 0, jobs with that key are blocked.
358
+ """
359
+
360
+ __tablename__ = "solid_queue_semaphores"
361
+
362
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
363
+ key: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
364
+ value: Mapped[int] = mapped_column(
365
+ Integer, nullable=False, server_default=text("0")
366
+ )
367
+ expires_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
368
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
369
+ updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
370
+
371
+ __table_args__ = (Index("idx_solid_queue_semaphores_expires_at", "expires_at"),)
372
+
373
+ def __repr__(self) -> str:
374
+ return f"<Semaphore(id={self.id}, key={self.key!r}, value={self.value})>"
375
+
376
+
377
+ class Pause(Base):
378
+ """Queue pause state.
379
+
380
+ When a queue is paused, no jobs from that queue will be processed
381
+ until the pause is removed.
382
+ """
383
+
384
+ __tablename__ = "solid_queue_pauses"
385
+
386
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
387
+ queue_name: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
388
+ created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
389
+
390
+ def __repr__(self) -> str:
391
+ return f"<Pause(id={self.id}, queue_name={self.queue_name!r})>"
392
+
393
+
394
+ # Convenience exports
395
+ __all__ = [
396
+ "Base",
397
+ "Job",
398
+ "ReadyExecution",
399
+ "ClaimedExecution",
400
+ "BlockedExecution",
401
+ "ScheduledExecution",
402
+ "FailedExecution",
403
+ "RecurringExecution",
404
+ "RecurringTask",
405
+ "Process",
406
+ "Semaphore",
407
+ "Pause",
408
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quebec
3
- Version: 0.2.1
3
+ Version: 0.2.3
4
4
  Classifier: Programming Language :: Rust
5
5
  Classifier: Programming Language :: Python :: Implementation :: CPython
6
6
  Classifier: Programming Language :: Python :: Implementation :: PyPy
@@ -17,13 +17,13 @@ Classifier: Operating System :: Microsoft :: Windows
17
17
  Classifier: Operating System :: POSIX
18
18
  Classifier: Operating System :: Unix
19
19
  Classifier: Operating System :: MacOS
20
- Requires-Dist: pytest>=7.0.0 ; extra == 'test'
21
- Requires-Dist: pytest-cov ; extra == 'test'
22
20
  Requires-Dist: sphinx>=7.0 ; extra == 'docs'
23
21
  Requires-Dist: shibuya ; extra == 'docs'
24
22
  Requires-Dist: myst-parser ; extra == 'docs'
25
- Provides-Extra: test
23
+ Requires-Dist: pytest>=7.0.0 ; extra == 'test'
24
+ Requires-Dist: pytest-cov ; extra == 'test'
26
25
  Provides-Extra: docs
26
+ Provides-Extra: test
27
27
  License-File: LICENSE
28
28
  Summary: Quebec is a simple background task queue for processing asynchronous tasks.
29
29
  Keywords: solid_queue,postgresql,mysql,sqlite,queue
@@ -0,0 +1,8 @@
1
+ quebec\__init__.py,sha256=5SmUfnETyLt5fJMuXdKY4TSFF7AOr3CQF0E_PVD1S-c,10756
2
+ quebec\logger.py,sha256=u4PgwFjfq_tqkRm6sYU7rvfEOhw2jC3D9lhe3XyFwRU,8269
3
+ quebec\quebec.pyd,sha256=lDebxl3fdgNYo47HUDPQMoZCCt0tIPaSVKG185jZHG4,20989952
4
+ quebec\sqlalchemy.py,sha256=ftw7QU7xl0fLUSuKUmk90IfCPXu3k0CjjQTsucmHCXE,15243
5
+ quebec-0.2.3.dist-info\METADATA,sha256=qjusSzEliU96bIufi893zbXmoxtQN0Y0VTb9pJtAkm0,5061
6
+ quebec-0.2.3.dist-info\WHEEL,sha256=R9yL9L75E0L3ZWd23Kt3_bdVV0VNkcHjlSm3_njNpM4,91
7
+ quebec-0.2.3.dist-info\licenses\LICENSE,sha256=EMUpCdp2I-buVSjzgRTpd6TZDSnUcm1Pi4w8vOiwsQk,1095
8
+ quebec-0.2.3.dist-info\RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: maturin (1.10.2)
2
+ Generator: maturin (1.11.5)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp39-abi3-win32
@@ -1,7 +0,0 @@
1
- quebec-0.2.1.dist-info/METADATA,sha256=yXF8dzOOSwTX5sv_sZTdfrEAt5ZTZC0vVN-724RjdwA,5061
2
- quebec-0.2.1.dist-info/WHEEL,sha256=l-zZhOZ2a5ZXnfk1pQvieCLMom_QD7Et38SJZ8iK5h8,91
3
- quebec-0.2.1.dist-info/licenses/LICENSE,sha256=EMUpCdp2I-buVSjzgRTpd6TZDSnUcm1Pi4w8vOiwsQk,1095
4
- quebec/__init__.py,sha256=382fYpy2xb2oAoEG-sRJVLXVjHr7ltkWcy7WBhSv3zM,10665
5
- quebec/logger.py,sha256=gX0O1S77HWEQ2bkTFBV3kolSonvTP0p41xYV7WLmBNY,1806
6
- quebec/quebec.pyd,sha256=wa_w_9KK9cwfOMwcJPn-ulUCmb5hpkpbDh71vhKt5Dc,20173312
7
- quebec-0.2.1.dist-info/RECORD,,