plain.jobs 0.43.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plain/jobs/cli.py ADDED
@@ -0,0 +1,204 @@
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+ import logging
5
+ import signal
6
+ from typing import Any
7
+
8
+ import click
9
+
10
+ from plain.cli import register_cli
11
+ from plain.runtime import settings
12
+ from plain.utils import timezone
13
+
14
+ from .models import JobProcess, JobRequest, JobResult
15
+ from .registry import jobs_registry
16
+ from .scheduling import load_schedule
17
+ from .workers import Worker
18
+
19
+ logger = logging.getLogger("plain.jobs")
20
+
21
+
22
+ @register_cli("jobs")
23
+ @click.group()
24
+ def cli() -> None:
25
+ """Background job management"""
26
+
27
+
28
+ @cli.command()
29
+ @click.option(
30
+ "queues",
31
+ "--queue",
32
+ default=["default"],
33
+ multiple=True,
34
+ type=str,
35
+ help="Queue to process",
36
+ )
37
+ @click.option(
38
+ "--max-processes",
39
+ "max_processes",
40
+ default=None,
41
+ type=int,
42
+ envvar="PLAIN_JOBS_WORKER_MAX_PROCESSES",
43
+ )
44
+ @click.option(
45
+ "--max-jobs-per-process",
46
+ "max_jobs_per_process",
47
+ default=None,
48
+ type=int,
49
+ envvar="PLAIN_JOBS_WORKER_MAX_JOBS_PER_PROCESS",
50
+ )
51
+ @click.option(
52
+ "--max-pending-per-process",
53
+ "max_pending_per_process",
54
+ default=10,
55
+ type=int,
56
+ envvar="PLAIN_JOBS_WORKER_MAX_PENDING_PER_PROCESS",
57
+ )
58
+ @click.option(
59
+ "--stats-every",
60
+ "stats_every",
61
+ default=60,
62
+ type=int,
63
+ envvar="PLAIN_JOBS_WORKER_STATS_EVERY",
64
+ )
65
+ @click.option(
66
+ "--reload",
67
+ is_flag=True,
68
+ help="Watch files and auto-reload worker on changes",
69
+ )
70
+ def worker(
71
+ queues: tuple[str, ...],
72
+ max_processes: int | None,
73
+ max_jobs_per_process: int | None,
74
+ max_pending_per_process: int,
75
+ stats_every: int,
76
+ reload: bool,
77
+ ) -> None:
78
+ """Run the job worker"""
79
+ jobs_schedule = load_schedule(settings.JOBS_SCHEDULE)
80
+
81
+ if reload:
82
+ from plain.internal.reloader import Reloader
83
+
84
+ # Track whether we should continue restarting
85
+ should_restart = {"value": True}
86
+ current_worker = {"instance": None}
87
+
88
+ def file_changed(filename: str) -> None:
89
+ if current_worker["instance"]:
90
+ current_worker["instance"].shutdown()
91
+
92
+ def signal_shutdown(signalnum: int, _: Any) -> None:
93
+ should_restart["value"] = False
94
+ if current_worker["instance"]:
95
+ current_worker["instance"].shutdown()
96
+
97
+ # Allow the worker to be stopped gracefully on SIGTERM/SIGINT
98
+ signal.signal(signal.SIGTERM, signal_shutdown)
99
+ signal.signal(signal.SIGINT, signal_shutdown)
100
+
101
+ # Start file watcher once, outside the loop
102
+ reloader = Reloader(callback=file_changed, watch_html=False)
103
+ reloader.start()
104
+
105
+ while should_restart["value"]:
106
+ worker = Worker(
107
+ queues=list(queues),
108
+ jobs_schedule=jobs_schedule,
109
+ max_processes=max_processes,
110
+ max_jobs_per_process=max_jobs_per_process,
111
+ max_pending_per_process=max_pending_per_process,
112
+ stats_every=stats_every,
113
+ )
114
+ current_worker["instance"] = worker
115
+
116
+ # Start processing jobs (blocks until shutdown)
117
+ worker.run()
118
+
119
+ else:
120
+ worker = Worker(
121
+ queues=list(queues),
122
+ jobs_schedule=jobs_schedule,
123
+ max_processes=max_processes,
124
+ max_jobs_per_process=max_jobs_per_process,
125
+ max_pending_per_process=max_pending_per_process,
126
+ stats_every=stats_every,
127
+ )
128
+
129
+ def _shutdown(signalnum: int, _: Any) -> None:
130
+ logger.info("Job worker shutdown signal received signalnum=%s", signalnum)
131
+ worker.shutdown()
132
+
133
+ # Allow the worker to be stopped gracefully on SIGTERM
134
+ signal.signal(signal.SIGTERM, _shutdown)
135
+ signal.signal(signal.SIGINT, _shutdown)
136
+
137
+ # Start processing jobs
138
+ worker.run()
139
+
140
+
141
+ @cli.command()
142
+ def clear() -> None:
143
+ """Clear completed job results"""
144
+ cutoff = timezone.now() - datetime.timedelta(
145
+ seconds=settings.JOBS_RESULTS_RETENTION
146
+ )
147
+ click.echo(f"Clearing job results created before {cutoff}")
148
+ results = JobResult.query.filter(created_at__lt=cutoff).delete()
149
+ click.echo(f"Deleted {results[0]} jobs")
150
+
151
+
152
+ @cli.command()
153
+ def stats() -> None:
154
+ """Show job queue statistics"""
155
+ pending = JobRequest.query.count()
156
+ processing = JobProcess.query.count()
157
+
158
+ successful = JobResult.query.successful().count()
159
+ errored = JobResult.query.errored().count()
160
+ lost = JobResult.query.lost().count()
161
+
162
+ click.secho(f"Pending: {pending}", bold=True)
163
+ click.secho(f"Processing: {processing}", bold=True)
164
+ click.secho(f"Successful: {successful}", bold=True, fg="green")
165
+ click.secho(f"Errored: {errored}", bold=True, fg="red")
166
+ click.secho(f"Lost: {lost}", bold=True, fg="yellow")
167
+
168
+
169
+ @cli.command()
170
+ def purge() -> None:
171
+ """Delete all pending and running jobs"""
172
+ if not click.confirm(
173
+ "Are you sure you want to clear all running and pending jobs? This will delete all current Jobs and JobRequests"
174
+ ):
175
+ return
176
+
177
+ deleted = JobRequest.query.all().delete()[0]
178
+ click.echo(f"Deleted {deleted} job requests")
179
+
180
+ deleted = JobProcess.query.all().delete()[0]
181
+ click.echo(f"Deleted {deleted} jobs")
182
+
183
+
184
+ @cli.command()
185
+ @click.argument("job_class_name", type=str)
186
+ def run(job_class_name: str) -> None:
187
+ """Run a job directly without a worker"""
188
+ job = jobs_registry.load_job(job_class_name, {"args": [], "kwargs": {}})
189
+ click.secho("Loaded job: ", bold=True, nl=False)
190
+ print(job)
191
+ job.run()
192
+
193
+
194
+ @cli.command("list")
195
+ def list_jobs() -> None:
196
+ """List all registered jobs"""
197
+ for name, job_class in jobs_registry.jobs.items():
198
+ click.secho(f"{name}", bold=True, nl=False)
199
+ # Get description from class docstring
200
+ description = job_class.__doc__.strip() if job_class.__doc__ else ""
201
+ if description:
202
+ click.secho(f": {description}", dim=True)
203
+ else:
204
+ click.echo("")
plain/jobs/config.py ADDED
@@ -0,0 +1,19 @@
1
+ from importlib import import_module
2
+
3
+ from plain.packages import PackageConfig, packages_registry, register_config
4
+
5
+ from .registry import jobs_registry
6
+
7
+
8
+ @register_config
9
+ class Config(PackageConfig):
10
+ package_label = "plainjobs"
11
+
12
+ def ready(self) -> None:
13
+ # Trigger register calls to fire by importing the modules
14
+ packages_registry.autodiscover_modules("jobs", include_app=True)
15
+
16
+ # Also need to make sure out internal jobs are registered
17
+ import_module("plain.jobs.scheduling")
18
+
19
+ jobs_registry.ready = True
@@ -0,0 +1,6 @@
1
+ JOBS_RESULTS_RETENTION: int = 60 * 60 * 24 * 7 # One week
2
+ JOBS_TIMEOUT: int = 60 * 60 * 24 # One day
3
+ JOBS_MIDDLEWARE: list[str] = [
4
+ "plain.jobs.middleware.AppLoggerMiddleware",
5
+ ]
6
+ JOBS_SCHEDULE: list[tuple[str, str]] = []
@@ -0,0 +1,34 @@
1
+ class DeferJob(Exception):
2
+ """Signal that a job should be deferred and re-tried later.
3
+
4
+ Unlike regular exceptions that indicate errors, DeferJob is used for expected
5
+ delays like:
6
+ - Waiting for external resources (API rate limits, data not ready)
7
+ - Polling for status changes
8
+ - Temporary unavailability
9
+
10
+ Example:
11
+ # Finite retries - will fail if data never becomes ready
12
+ if not data.is_ready():
13
+ raise DeferJob(delay=60, increment_retries=True)
14
+
15
+ # Infinite retries - safe for rate limits
16
+ if rate_limited():
17
+ raise DeferJob(delay=300, increment_retries=False)
18
+ """
19
+
20
+ def __init__(self, *, delay: int, increment_retries: bool = False):
21
+ self.delay = delay
22
+ self.increment_retries = increment_retries
23
+ super().__init__(f"Job deferred for {delay} seconds")
24
+
25
+
26
+ class DeferError(Exception):
27
+ """Raised when a deferred job cannot be re-enqueued.
28
+
29
+ This typically happens when concurrency limits prevent the job from being
30
+ re-queued. The transaction will be rolled back and the job will remain
31
+ in its current state, then be converted to ERRORED status for retry.
32
+ """
33
+
34
+ pass
plain/jobs/jobs.py ADDED
@@ -0,0 +1,368 @@
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+ import inspect
5
+ from abc import ABCMeta, abstractmethod
6
+ from contextlib import AbstractContextManager, nullcontext
7
+ from typing import TYPE_CHECKING, Any
8
+
9
+ from opentelemetry import trace
10
+ from opentelemetry.semconv._incubating.attributes.code_attributes import (
11
+ CODE_FILEPATH,
12
+ CODE_LINENO,
13
+ )
14
+ from opentelemetry.semconv._incubating.attributes.messaging_attributes import (
15
+ MESSAGING_DESTINATION_NAME,
16
+ MESSAGING_MESSAGE_ID,
17
+ MESSAGING_OPERATION_NAME,
18
+ MESSAGING_OPERATION_TYPE,
19
+ MESSAGING_SYSTEM,
20
+ MessagingOperationTypeValues,
21
+ )
22
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
23
+ from opentelemetry.trace import SpanKind, format_span_id, format_trace_id
24
+
25
+ from plain import models
26
+ from plain.models import transaction
27
+ from plain.models.db import db_connection
28
+ from plain.utils import timezone
29
+
30
+ from .locks import postgres_advisory_lock
31
+ from .registry import JobParameters, jobs_registry
32
+
33
+ if TYPE_CHECKING:
34
+ from .models import JobProcess, JobRequest
35
+
36
+
37
+ tracer = trace.get_tracer("plain.jobs")
38
+
39
+
40
+ class JobType(ABCMeta):
41
+ """
42
+ Metaclass allows us to capture the original args/kwargs
43
+ used to instantiate the job, so we can store them in the database
44
+ when we schedule the job.
45
+ """
46
+
47
+ def __call__(self, *args: Any, **kwargs: Any) -> Job:
48
+ instance = super().__call__(*args, **kwargs)
49
+ instance._init_args = args
50
+ instance._init_kwargs = kwargs
51
+ return instance
52
+
53
+
54
+ class Job(metaclass=JobType):
55
+ # Set by JobType metaclass when the job is instantiated
56
+ _init_args: tuple[Any, ...]
57
+ _init_kwargs: dict[str, Any]
58
+
59
+ # Set by JobProcess when the job is executed
60
+ # Useful for jobs that need to query and exclude themselves
61
+ job_process: JobProcess | None = None
62
+
63
+ @abstractmethod
64
+ def run(self) -> None:
65
+ pass
66
+
67
+ def run_in_worker(
68
+ self,
69
+ *,
70
+ queue: str | None = None,
71
+ delay: int | datetime.timedelta | datetime.datetime | None = None,
72
+ priority: int | None = None,
73
+ retries: int | None = None,
74
+ retry_attempt: int = 0,
75
+ concurrency_key: str | None = None,
76
+ ) -> JobRequest | None:
77
+ from .models import JobRequest
78
+
79
+ job_class_name = jobs_registry.get_job_class_name(self.__class__)
80
+
81
+ if queue is None:
82
+ queue = self.default_queue()
83
+
84
+ with tracer.start_as_current_span(
85
+ f"run_in_worker {job_class_name}",
86
+ kind=SpanKind.PRODUCER,
87
+ attributes={
88
+ MESSAGING_SYSTEM: "plain.jobs",
89
+ MESSAGING_OPERATION_TYPE: MessagingOperationTypeValues.SEND.value,
90
+ MESSAGING_OPERATION_NAME: "run_in_worker",
91
+ MESSAGING_DESTINATION_NAME: queue,
92
+ },
93
+ ) as span:
94
+ try:
95
+ # Try to automatically annotate the source of the job
96
+ caller = inspect.stack()[1]
97
+ source = f"{caller.filename}:{caller.lineno}"
98
+ span.set_attributes(
99
+ {
100
+ CODE_FILEPATH: caller.filename,
101
+ CODE_LINENO: caller.lineno,
102
+ }
103
+ )
104
+ except (IndexError, AttributeError):
105
+ source = ""
106
+
107
+ parameters = JobParameters.to_json(self._init_args, self._init_kwargs)
108
+
109
+ if priority is None:
110
+ priority = self.default_priority()
111
+
112
+ if retries is None:
113
+ retries = self.default_retries()
114
+
115
+ if delay is None:
116
+ start_at = None
117
+ elif isinstance(delay, int):
118
+ start_at = timezone.now() + datetime.timedelta(seconds=delay)
119
+ elif isinstance(delay, datetime.timedelta):
120
+ start_at = timezone.now() + delay
121
+ elif isinstance(delay, datetime.datetime):
122
+ start_at = delay
123
+ else:
124
+ raise ValueError(f"Invalid delay: {delay}")
125
+
126
+ if concurrency_key is None:
127
+ concurrency_key = self.default_concurrency_key()
128
+
129
+ # Capture current trace context
130
+ current_span = trace.get_current_span()
131
+ span_context = current_span.get_span_context()
132
+
133
+ # Only include trace context if the span is being recorded (sampled)
134
+ # This ensures jobs are only linked to traces that are actually being collected
135
+ if current_span.is_recording() and span_context.is_valid:
136
+ trace_id = f"0x{format_trace_id(span_context.trace_id)}"
137
+ span_id = f"0x{format_span_id(span_context.span_id)}"
138
+ else:
139
+ trace_id = None
140
+ span_id = None
141
+
142
+ # Use transaction with optional locking for race-free enqueue
143
+ with transaction.atomic():
144
+ # Acquire lock via context manager (or nullcontext if None)
145
+ with self.get_enqueue_lock(concurrency_key) or nullcontext():
146
+ # Check with lock held (if using locks)
147
+ if not self.should_enqueue(concurrency_key):
148
+ span.set_attribute(ERROR_TYPE, "ShouldNotEnqueue")
149
+ return None
150
+
151
+ # Create job with lock held
152
+ job_request = JobRequest(
153
+ job_class=job_class_name,
154
+ parameters=parameters,
155
+ start_at=start_at,
156
+ source=source,
157
+ queue=queue,
158
+ priority=priority,
159
+ retries=retries,
160
+ retry_attempt=retry_attempt,
161
+ concurrency_key=concurrency_key,
162
+ trace_id=trace_id,
163
+ span_id=span_id,
164
+ )
165
+ job_request.save()
166
+
167
+ span.set_attribute(
168
+ MESSAGING_MESSAGE_ID,
169
+ str(job_request.uuid),
170
+ )
171
+
172
+ # Add job UUID to current span for bidirectional linking
173
+ span.set_attribute("job.uuid", str(job_request.uuid))
174
+ span.set_status(trace.StatusCode.OK)
175
+
176
+ return job_request
177
+
178
+ def get_requested_jobs(
179
+ self, *, concurrency_key: str | None = None, include_retries: bool = False
180
+ ) -> models.QuerySet:
181
+ """
182
+ Get pending jobs (JobRequest) for this job class.
183
+
184
+ Args:
185
+ concurrency_key: Optional concurrency_key to filter by. If None, uses self.job_process.concurrency_key (if available) or self.default_concurrency_key()
186
+ include_retries: If False (default), exclude retry attempts from results
187
+ """
188
+ from .models import JobRequest
189
+
190
+ job_class_name = jobs_registry.get_job_class_name(self.__class__)
191
+
192
+ if concurrency_key is None:
193
+ if self.job_process:
194
+ concurrency_key = self.job_process.concurrency_key
195
+ else:
196
+ concurrency_key = self.default_concurrency_key()
197
+
198
+ filters = {"job_class": job_class_name}
199
+ if concurrency_key:
200
+ filters["concurrency_key"] = concurrency_key
201
+
202
+ qs = JobRequest.query.filter(**filters)
203
+
204
+ if not include_retries:
205
+ qs = qs.filter(retry_attempt=0)
206
+
207
+ return qs
208
+
209
+ def get_processing_jobs(
210
+ self,
211
+ *,
212
+ concurrency_key: str | None = None,
213
+ include_retries: bool = False,
214
+ include_self: bool = False,
215
+ ) -> models.QuerySet:
216
+ """
217
+ Get currently processing jobs (JobProcess) for this job class.
218
+
219
+ Args:
220
+ concurrency_key: Optional concurrency_key to filter by. If None, uses self.job_process.concurrency_key (if available) or self.default_concurrency_key()
221
+ include_retries: If False (default), exclude retry attempts from results
222
+ """
223
+ from .models import JobProcess
224
+
225
+ job_class_name = jobs_registry.get_job_class_name(self.__class__)
226
+
227
+ if concurrency_key is None:
228
+ if self.job_process:
229
+ concurrency_key = self.job_process.concurrency_key
230
+ else:
231
+ concurrency_key = self.default_concurrency_key()
232
+
233
+ filters = {"job_class": job_class_name}
234
+ if concurrency_key:
235
+ filters["concurrency_key"] = concurrency_key
236
+
237
+ qs = JobProcess.query.filter(**filters)
238
+
239
+ if not include_retries:
240
+ qs = qs.filter(retry_attempt=0)
241
+
242
+ if not include_self and self.job_process:
243
+ qs = qs.exclude(id=self.job_process.id)
244
+
245
+ return qs
246
+
247
+ def should_enqueue(self, concurrency_key: str) -> bool:
248
+ """
249
+ Called before enqueueing job. Return False to skip.
250
+
251
+ Args:
252
+ concurrency_key: The resolved concurrency_key (from default_concurrency_key() or override)
253
+
254
+ Default behavior:
255
+ - If concurrency_key is empty: no restrictions (always enqueue)
256
+ - If concurrency_key is set: enforce uniqueness (only one job with this key can be pending or processing)
257
+
258
+ Override to implement custom concurrency control:
259
+ - Concurrency limits
260
+ - Rate limits
261
+ - Custom business logic
262
+
263
+ Example:
264
+ def should_enqueue(self, concurrency_key):
265
+ # Max 3 processing, 1 pending per concurrency_key
266
+ processing = self.get_processing_jobs(concurrency_key).count()
267
+ pending = self.get_requested_jobs(concurrency_key).count()
268
+ return processing < 3 and pending < 1
269
+ """
270
+ if not concurrency_key:
271
+ # No key = no uniqueness check
272
+ return True
273
+
274
+ # Key set = enforce uniqueness (include retries for strong guarantee)
275
+ return (
276
+ self.get_processing_jobs(
277
+ concurrency_key=concurrency_key, include_retries=True
278
+ ).count()
279
+ == 0
280
+ and self.get_requested_jobs(
281
+ concurrency_key=concurrency_key, include_retries=True
282
+ ).count()
283
+ == 0
284
+ )
285
+
286
+ def default_concurrency_key(self) -> str:
287
+ """
288
+ Default identifier for this job.
289
+
290
+ Use for:
291
+ - Deduplication
292
+ - Grouping related jobs
293
+ - Concurrency control
294
+
295
+ Return empty string (default) for no grouping.
296
+ Can be overridden per-call via concurrency_key parameter in run_in_worker().
297
+ """
298
+ return ""
299
+
300
+ def default_queue(self) -> str:
301
+ """Default queue for this job. Can be overridden in run_in_worker()."""
302
+ return "default"
303
+
304
+ def default_priority(self) -> int:
305
+ """
306
+ Default priority for this job. Can be overridden in run_in_worker().
307
+
308
+ Higher numbers run first: 10 > 5 > 0 > -5 > -10
309
+ - Use positive numbers for high priority jobs
310
+ - Use negative numbers for low priority jobs
311
+ - Default is 0
312
+ """
313
+ return 0
314
+
315
+ def default_retries(self) -> int:
316
+ """Default number of retry attempts. Can be overridden in run_in_worker()."""
317
+ return 0
318
+
319
+ def calculate_retry_delay(self, attempt: int) -> int:
320
+ """
321
+ Calculate a delay in seconds before the next retry attempt.
322
+
323
+ On the first retry, attempt will be 1.
324
+ """
325
+ return 0
326
+
327
+ def get_enqueue_lock(
328
+ self, concurrency_key: str
329
+ ) -> AbstractContextManager[None] | None:
330
+ """
331
+ Return a context manager for the enqueue lock, or None for no locking.
332
+
333
+ Default: PostgreSQL advisory lock (None on SQLite/MySQL or empty concurrency_key).
334
+ Override to provide custom locking (Redis, etcd, etc.).
335
+
336
+ The returned context manager is used to wrap the should_enqueue() check
337
+ and job creation, ensuring atomicity.
338
+
339
+ Example with Redis:
340
+ def get_enqueue_lock(self, concurrency_key):
341
+ import redis
342
+ return redis_client.lock(f"job:{concurrency_key}", timeout=5)
343
+
344
+ Example with custom implementation:
345
+ from contextlib import contextmanager
346
+
347
+ @contextmanager
348
+ def get_enqueue_lock(self, concurrency_key):
349
+ my_lock.acquire(concurrency_key)
350
+ try:
351
+ yield
352
+ finally:
353
+ my_lock.release(concurrency_key)
354
+
355
+ To disable locking:
356
+ def get_enqueue_lock(self, concurrency_key):
357
+ return None
358
+ """
359
+ # No locking if no concurrency_key
360
+ if not concurrency_key:
361
+ return None
362
+
363
+ # PostgreSQL: use advisory locks
364
+ if db_connection.vendor == "postgresql":
365
+ return postgres_advisory_lock(self, concurrency_key)
366
+
367
+ # Other databases: no locking
368
+ return None
plain/jobs/locks.py ADDED
@@ -0,0 +1,42 @@
1
+ """Lock implementations for job enqueueing."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ from collections.abc import Iterator
7
+ from contextlib import contextmanager
8
+ from typing import TYPE_CHECKING
9
+
10
+ if TYPE_CHECKING:
11
+ from .jobs import Job
12
+
13
+
14
+ @contextmanager
15
+ def postgres_advisory_lock(job: Job, concurrency_key: str) -> Iterator[None]:
16
+ """
17
+ PostgreSQL advisory lock context manager.
18
+
19
+ Generates lock key from job class + concurrency_key, acquires advisory lock.
20
+ Uses pg_advisory_xact_lock which is automatically released when the
21
+ transaction commits or rolls back. No explicit release needed.
22
+
23
+ Args:
24
+ job: Job instance (used to get job class name)
25
+ concurrency_key: Job grouping key
26
+ """
27
+ from plain.jobs.registry import jobs_registry
28
+ from plain.models.db import db_connection
29
+
30
+ # Generate lock key from job class + concurrency_key
31
+ job_class_name = jobs_registry.get_job_class_name(job.__class__)
32
+ lock_key = f"{job_class_name}::{concurrency_key}"
33
+
34
+ # Convert lock key to int64 for PostgreSQL advisory lock
35
+ hash_bytes = hashlib.md5(lock_key.encode()).digest()
36
+ lock_id = int.from_bytes(hash_bytes[:8], "big", signed=True)
37
+
38
+ # Acquire advisory lock (auto-released on transaction end)
39
+ with db_connection.cursor() as cursor:
40
+ cursor.execute("SELECT pg_advisory_xact_lock(%s)", [lock_id])
41
+
42
+ yield # Lock is held here