rrq 0.2.5__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rrq/{rrq.py → cli.py} RENAMED
@@ -20,10 +20,11 @@ from .worker import RRQWorker
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
+
23
24
  # Helper to load settings for commands
24
25
  def _load_app_settings(settings_object_path: str | None = None) -> RRQSettings:
25
26
  """Load the settings object from the given path.
26
- If not provided, the RRQ_SETTINGS environment variable will be used.
27
+ If not provided, the RRQ_SETTINGS environment variable will be used.
27
28
  If the environment variable is not set, will create a default settings object.
28
29
  RRQ Setting objects, automatically pick up ENVIRONMENT variables starting with RRQ_.
29
30
 
@@ -53,10 +54,22 @@ def _load_app_settings(settings_object_path: str | None = None) -> RRQSettings:
53
54
 
54
55
  return settings_object
55
56
  except ImportError:
56
- click.echo(click.style(f"ERROR: Could not import settings object '{settings_object_path}'. Make sure it is in PYTHONPATH.", fg="red"), err=True)
57
+ click.echo(
58
+ click.style(
59
+ f"ERROR: Could not import settings object '{settings_object_path}'. Make sure it is in PYTHONPATH.",
60
+ fg="red",
61
+ ),
62
+ err=True,
63
+ )
57
64
  sys.exit(1)
58
65
  except Exception as e:
59
- click.echo(click.style(f"ERROR: Unexpected error processing settings object '{settings_object_path}': {e}", fg="red"), err=True)
66
+ click.echo(
67
+ click.style(
68
+ f"ERROR: Unexpected error processing settings object '{settings_object_path}': {e}",
69
+ fg="red",
70
+ ),
71
+ err=True,
72
+ )
60
73
  sys.exit(1)
61
74
 
62
75
 
@@ -73,13 +86,25 @@ async def check_health_async_impl(settings_object_path: str | None = None) -> bo
73
86
  logger.debug(f"Successfully connected to Redis: {rrq_settings.redis_dsn}")
74
87
 
75
88
  health_key_pattern = f"{HEALTH_KEY_PREFIX}*"
76
- worker_keys = [key_bytes.decode("utf-8") async for key_bytes in job_store.redis.scan_iter(match=health_key_pattern)]
89
+ worker_keys = [
90
+ key_bytes.decode("utf-8")
91
+ async for key_bytes in job_store.redis.scan_iter(match=health_key_pattern)
92
+ ]
77
93
 
78
94
  if not worker_keys:
79
- click.echo(click.style("Worker Health Check: FAIL (No active workers found)", fg="red"))
95
+ click.echo(
96
+ click.style(
97
+ "Worker Health Check: FAIL (No active workers found)", fg="red"
98
+ )
99
+ )
80
100
  return False
81
101
 
82
- click.echo(click.style(f"Worker Health Check: Found {len(worker_keys)} active worker(s):", fg="green"))
102
+ click.echo(
103
+ click.style(
104
+ f"Worker Health Check: Found {len(worker_keys)} active worker(s):",
105
+ fg="green",
106
+ )
107
+ )
83
108
  for key in worker_keys:
84
109
  worker_id = key.split(HEALTH_KEY_PREFIX)[1]
85
110
  health_data, ttl = await job_store.get_worker_health(worker_id)
@@ -95,28 +120,49 @@ async def check_health_async_impl(settings_object_path: str | None = None) -> bo
95
120
  f" TTL: {ttl if ttl is not None else 'N/A'} seconds"
96
121
  )
97
122
  else:
98
- click.echo(f" - Worker ID: {click.style(worker_id, bold=True)} - Health data missing/invalid. TTL: {ttl if ttl is not None else 'N/A'}s")
123
+ click.echo(
124
+ f" - Worker ID: {click.style(worker_id, bold=True)} - Health data missing/invalid. TTL: {ttl if ttl is not None else 'N/A'}s"
125
+ )
99
126
  return True
100
127
  except redis.exceptions.ConnectionError as e:
101
128
  logger.error(f"Redis connection failed during health check: {e}", exc_info=True)
102
- click.echo(click.style(f"Worker Health Check: FAIL - Redis connection error: {e}", fg="red"))
129
+ click.echo(
130
+ click.style(
131
+ f"Worker Health Check: FAIL - Redis connection error: {e}", fg="red"
132
+ )
133
+ )
103
134
  return False
104
135
  except Exception as e:
105
- logger.error(f"An unexpected error occurred during health check: {e}", exc_info=True)
106
- click.echo(click.style(f"Worker Health Check: FAIL - Unexpected error: {e}", fg="red"))
136
+ logger.error(
137
+ f"An unexpected error occurred during health check: {e}", exc_info=True
138
+ )
139
+ click.echo(
140
+ click.style(f"Worker Health Check: FAIL - Unexpected error: {e}", fg="red")
141
+ )
107
142
  return False
108
143
  finally:
109
144
  if job_store:
110
145
  await job_store.aclose()
111
146
 
147
+
112
148
  # --- Process Management ---
113
- def start_rrq_worker_subprocess(is_detached: bool = False, settings_object_path: str | None = None) -> subprocess.Popen | None:
114
- """Start an RRQ worker process."""
149
+ def start_rrq_worker_subprocess(
150
+ is_detached: bool = False,
151
+ settings_object_path: str | None = None,
152
+ queues: list[str] | None = None,
153
+ ) -> subprocess.Popen | None:
154
+ """Start an RRQ worker process, optionally for specific queues."""
115
155
  command = ["rrq", "worker", "run"]
116
156
  if settings_object_path:
117
157
  command.extend(["--settings", settings_object_path])
118
158
  else:
119
- raise ValueError("start_rrq_worker_subprocess called without settings_object_path!")
159
+ raise ValueError(
160
+ "start_rrq_worker_subprocess called without settings_object_path!"
161
+ )
162
+ # Add queue filters if specified
163
+ if queues:
164
+ for q in queues:
165
+ command.extend(["--queue", q])
120
166
 
121
167
  logger.info(f"Starting worker subprocess with command: {' '.join(command)}")
122
168
  if is_detached:
@@ -139,35 +185,55 @@ def start_rrq_worker_subprocess(is_detached: bool = False, settings_object_path:
139
185
  return process
140
186
 
141
187
 
142
- def terminate_worker_process(process: subprocess.Popen | None, logger: logging.Logger) -> None:
188
+ def terminate_worker_process(
189
+ process: subprocess.Popen | None, logger: logging.Logger
190
+ ) -> None:
143
191
  if not process or process.pid is None:
144
192
  logger.debug("No active worker process to terminate.")
145
193
  return
146
194
 
147
195
  try:
148
196
  if process.poll() is not None:
149
- logger.debug(f"Worker process {process.pid} already terminated (poll returned exit code: {process.returncode}).")
197
+ logger.debug(
198
+ f"Worker process {process.pid} already terminated (poll returned exit code: {process.returncode})."
199
+ )
150
200
  return
151
201
 
152
202
  pgid = os.getpgid(process.pid)
153
- logger.info(f"Terminating worker process group for PID {process.pid} (PGID {pgid})...")
203
+ logger.info(
204
+ f"Terminating worker process group for PID {process.pid} (PGID {pgid})..."
205
+ )
154
206
  os.killpg(pgid, signal.SIGTERM)
155
207
  process.wait(timeout=5)
156
208
  except subprocess.TimeoutExpired:
157
- logger.warning(f"Worker process {process.pid} did not terminate gracefully (SIGTERM timeout), sending SIGKILL.")
209
+ logger.warning(
210
+ f"Worker process {process.pid} did not terminate gracefully (SIGTERM timeout), sending SIGKILL."
211
+ )
158
212
  with suppress(ProcessLookupError):
159
213
  os.killpg(os.getpgid(process.pid), signal.SIGKILL)
160
214
  except Exception as e:
161
215
  logger.error(f"Unexpected error checking worker process {process.pid}: {e}")
162
216
 
163
217
 
164
- async def watch_rrq_worker_impl(watch_path: str, settings_object_path: str | None = None) -> None:
218
+ async def watch_rrq_worker_impl(
219
+ watch_path: str,
220
+ settings_object_path: str | None = None,
221
+ queues: list[str] | None = None,
222
+ ) -> None:
165
223
  if not settings_object_path:
166
- click.echo(click.style("ERROR: 'rrq worker watch' requires --settings to be specified.", fg="red"), err=True)
224
+ click.echo(
225
+ click.style(
226
+ "ERROR: 'rrq worker watch' requires --settings to be specified.",
227
+ fg="red",
228
+ ),
229
+ err=True,
230
+ )
167
231
  sys.exit(1)
168
232
 
169
233
  abs_watch_path = os.path.abspath(watch_path)
170
- click.echo(f"Watching for file changes in {abs_watch_path} to restart RRQ worker (app settings: {settings_object_path})...")
234
+ click.echo(
235
+ f"Watching for file changes in {abs_watch_path} to restart RRQ worker (app settings: {settings_object_path})..."
236
+ )
171
237
  worker_process: subprocess.Popen | None = None
172
238
  loop = asyncio.get_event_loop()
173
239
  shutdown_event = asyncio.Event()
@@ -184,20 +250,28 @@ async def watch_rrq_worker_impl(watch_path: str, settings_object_path: str | Non
184
250
  signal.signal(signal.SIGTERM, sig_handler)
185
251
 
186
252
  try:
187
- worker_process = start_rrq_worker_subprocess(is_detached=False, settings_object_path=settings_object_path)
253
+ worker_process = start_rrq_worker_subprocess(
254
+ is_detached=False,
255
+ settings_object_path=settings_object_path,
256
+ queues=queues,
257
+ )
188
258
  async for changes in awatch(abs_watch_path, stop_event=shutdown_event):
189
- if shutdown_event.is_set():
259
+ if shutdown_event.is_set():
190
260
  break
191
- if not changes:
261
+ if not changes:
192
262
  continue
193
263
 
194
264
  logger.info(f"File changes detected: {changes}. Restarting RRQ worker...")
195
265
  if worker_process is not None:
196
266
  terminate_worker_process(worker_process, logger)
197
267
  await asyncio.sleep(1)
198
- if shutdown_event.is_set():
268
+ if shutdown_event.is_set():
199
269
  break
200
- worker_process = start_rrq_worker_subprocess(is_detached=False, settings_object_path=settings_object_path)
270
+ worker_process = start_rrq_worker_subprocess(
271
+ is_detached=False,
272
+ settings_object_path=settings_object_path,
273
+ queues=queues,
274
+ )
201
275
  except Exception as e:
202
276
  logger.error(f"Error in watch_rrq_worker: {e}", exc_info=True)
203
277
  finally:
@@ -213,7 +287,8 @@ async def watch_rrq_worker_impl(watch_path: str, settings_object_path: str | Non
213
287
 
214
288
  # --- Click CLI Definitions ---
215
289
 
216
- CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
290
+ CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
291
+
217
292
 
218
293
  @click.group(context_settings=CONTEXT_SETTINGS)
219
294
  def rrq():
@@ -226,7 +301,6 @@ def rrq():
226
301
  pass
227
302
 
228
303
 
229
-
230
304
  @rrq.group("worker")
231
305
  def worker_cli():
232
306
  """Manage RRQ workers (run, watch)."""
@@ -234,41 +308,66 @@ def worker_cli():
234
308
 
235
309
 
236
310
  @worker_cli.command("run")
237
- @click.option("--burst", is_flag=True, help="Run worker in burst mode (process one job/batch then exit). Not Implemented yet.")
238
- @click.option("--detach", is_flag=True, help="Run the worker in the background (detached).")
239
311
  @click.option(
240
- "--settings",
241
- "settings_object_path",
242
- type=str,
243
- required=False,
244
- default=None,
245
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
312
+ "--burst",
313
+ is_flag=True,
314
+ help="Run worker in burst mode (process one job/batch then exit).",
246
315
  )
247
- def worker_run_command(burst: bool, detach: bool, settings_object_path: str):
316
+ @click.option(
317
+ "--queue",
318
+ "queues",
319
+ type=str,
320
+ multiple=True,
321
+ help="Queue(s) to poll. Defaults to settings.default_queue_name.",
322
+ )
323
+ @click.option(
324
+ "--settings",
325
+ "settings_object_path",
326
+ type=str,
327
+ required=False,
328
+ default=None,
329
+ help=(
330
+ "Python settings path for application worker settings "
331
+ "(e.g., myapp.worker_config.rrq_settings). "
332
+ "The specified settings object must include a `job_registry: JobRegistry`."
333
+ ),
334
+ )
335
+ def worker_run_command(
336
+ burst: bool,
337
+ queues: tuple[str, ...],
338
+ settings_object_path: str,
339
+ ):
248
340
  """Run an RRQ worker process. Requires --settings."""
249
341
  rrq_settings = _load_app_settings(settings_object_path)
250
342
 
251
- if detach:
252
- logger.info("Attempting to start worker in detached (background) mode...")
253
- process = start_rrq_worker_subprocess(is_detached=True, settings_object_path=settings_object_path)
254
- click.echo(f"Worker initiated in background (PID: {process.pid}). Check logs for status.")
255
- return
256
-
257
- if burst:
258
- raise NotImplementedError("Burst mode is not implemented yet.")
343
+ # Determine queues to poll
344
+ queues_arg = list(queues) if queues else None
345
+ # Run worker in foreground (burst or continuous mode)
259
346
 
260
- logger.info(f"Starting RRQ Worker (Burst: {burst}, App Settings: {settings_object_path})")
347
+ logger.info(
348
+ f"Starting RRQ Worker (Burst: {burst}, App Settings: {settings_object_path})"
349
+ )
261
350
 
262
351
  if not rrq_settings.job_registry:
263
- click.echo(click.style("ERROR: No 'job_registry_app'. You must provide a JobRegistry instance in settings.", fg="red"), err=True)
352
+ click.echo(
353
+ click.style(
354
+ "ERROR: No 'job_registry_app'. You must provide a JobRegistry instance in settings.",
355
+ fg="red",
356
+ ),
357
+ err=True,
358
+ )
264
359
  sys.exit(1)
265
360
 
266
- logger.debug(f"Registered handlers (from effective registry): {rrq_settings.job_registry.get_registered_functions()}")
361
+ logger.debug(
362
+ f"Registered handlers (from effective registry): {rrq_settings.job_registry.get_registered_functions()}"
363
+ )
267
364
  logger.debug(f"Effective RRQ settings for worker: {rrq_settings}")
268
365
 
269
366
  worker_instance = RRQWorker(
270
367
  settings=rrq_settings,
271
368
  job_registry=rrq_settings.job_registry,
369
+ queues=queues_arg,
370
+ burst=burst,
272
371
  )
273
372
 
274
373
  loop = asyncio.get_event_loop()
@@ -296,33 +395,126 @@ def worker_run_command(burst: bool, detach: bool, settings_object_path: str):
296
395
  show_default=True,
297
396
  )
298
397
  @click.option(
299
- "--settings",
300
- "settings_object_path",
301
- type=str,
302
- required=False,
303
- default=None,
304
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
398
+ "--settings",
399
+ "settings_object_path",
400
+ type=str,
401
+ required=False,
402
+ default=None,
403
+ help=(
404
+ "Python settings path for application worker settings "
405
+ "(e.g., myapp.worker_config.rrq_settings). "
406
+ "The specified settings object must define a `job_registry: JobRegistry`."
407
+ ),
408
+ )
409
+ @click.option(
410
+ "--queue",
411
+ "queues",
412
+ type=str,
413
+ multiple=True,
414
+ help="Queue(s) to poll when restarting worker. Defaults to settings.default_queue_name.",
305
415
  )
306
- def worker_watch_command(path: str, settings_object_path: str):
416
+ def worker_watch_command(
417
+ path: str,
418
+ settings_object_path: str,
419
+ queues: tuple[str, ...],
420
+ ):
307
421
  """Run the RRQ worker with auto-restart on file changes in PATH. Requires --settings."""
308
- asyncio.run(watch_rrq_worker_impl(path, settings_object_path=settings_object_path))
422
+ # Run watch with optional queue filters
423
+ asyncio.run(
424
+ watch_rrq_worker_impl(
425
+ path,
426
+ settings_object_path=settings_object_path,
427
+ queues=list(queues) if queues else None,
428
+ )
429
+ )
430
+
431
+
432
+ # --- DLQ Requeue CLI Command (delegates to JobStore) ---
309
433
 
310
434
 
311
435
  @rrq.command("check")
312
436
  @click.option(
313
- "--settings",
314
- "settings_object_path",
315
- type=str,
316
- required=False,
317
- default=None,
318
- help="Python settings path for application worker settings (e.g., myapp.worker_config.rrq_settings)."
437
+ "--settings",
438
+ "settings_object_path",
439
+ type=str,
440
+ required=False,
441
+ default=None,
442
+ help=(
443
+ "Python settings path for application worker settings "
444
+ "(e.g., myapp.worker_config.rrq_settings). "
445
+ "Must include `job_registry: JobRegistry` to identify workers."
446
+ ),
319
447
  )
320
448
  def check_command(settings_object_path: str):
321
449
  """Perform a health check on active RRQ worker(s). Requires --settings."""
322
450
  click.echo("Performing RRQ health check...")
323
- healthy = asyncio.run(check_health_async_impl(settings_object_path=settings_object_path))
451
+ healthy = asyncio.run(
452
+ check_health_async_impl(settings_object_path=settings_object_path)
453
+ )
324
454
  if healthy:
325
455
  click.echo(click.style("Health check PASSED.", fg="green"))
326
456
  else:
327
457
  click.echo(click.style("Health check FAILED.", fg="red"))
328
458
  sys.exit(1)
459
+
460
+
461
+ @rrq.group("dlq")
462
+ def dlq_cli():
463
+ """Manage the Dead Letter Queue (DLQ)."""
464
+ pass
465
+
466
+
467
+ @dlq_cli.command("requeue")
468
+ @click.option(
469
+ "--settings",
470
+ "settings_object_path",
471
+ type=str,
472
+ required=False,
473
+ default=None,
474
+ help=(
475
+ "Python settings path for application worker settings "
476
+ "(e.g., myapp.worker_config.rrq_settings). "
477
+ "Must include `job_registry: JobRegistry` if requeueing requires handler resolution."
478
+ ),
479
+ )
480
+ @click.option(
481
+ "--dlq-name",
482
+ "dlq_name",
483
+ type=str,
484
+ required=False,
485
+ default=None,
486
+ help="Name of the DLQ (without prefix). Defaults to settings.default_dlq_name.",
487
+ )
488
+ @click.option(
489
+ "--queue",
490
+ "target_queue",
491
+ type=str,
492
+ required=False,
493
+ default=None,
494
+ help="Name of the target queue (without prefix). Defaults to settings.default_queue_name.",
495
+ )
496
+ @click.option(
497
+ "--limit",
498
+ type=int,
499
+ required=False,
500
+ default=None,
501
+ help="Maximum number of DLQ jobs to requeue; all if not set.",
502
+ )
503
+ def dlq_requeue_command(
504
+ settings_object_path: str,
505
+ dlq_name: str,
506
+ target_queue: str,
507
+ limit: int,
508
+ ):
509
+ """Requeue jobs from the dead letter queue back into a live queue."""
510
+ rrq_settings = _load_app_settings(settings_object_path)
511
+ dlq_to_use = dlq_name or rrq_settings.default_dlq_name
512
+ queue_to_use = target_queue or rrq_settings.default_queue_name
513
+ job_store = JobStore(settings=rrq_settings)
514
+ click.echo(
515
+ f"Requeuing jobs from DLQ '{dlq_to_use}' to queue '{queue_to_use}' (limit: {limit or 'all'})..."
516
+ )
517
+ count = asyncio.run(job_store.requeue_dlq(dlq_to_use, queue_to_use, limit))
518
+ click.echo(
519
+ f"Requeued {count} job(s) from DLQ '{dlq_to_use}' to queue '{queue_to_use}'."
520
+ )
rrq/client.py CHANGED
@@ -77,26 +77,44 @@ class RRQClient:
77
77
  The created Job object if successfully enqueued, or None if enqueueing was denied
78
78
  (e.g., due to a unique key conflict).
79
79
  """
80
- # print(
81
- # f"DEBUG RRQClient.enqueue: function_name='{function_name}', args={args}, kwargs={kwargs}"
82
- # ) # DEBUG
83
-
80
+ # Determine job ID and enqueue timestamp
84
81
  job_id_to_use = _job_id or str(uuid.uuid4())
82
+ enqueue_time_utc = datetime.now(UTC)
85
83
 
84
+ # Compute unique lock TTL: cover deferral window if any
85
+ lock_ttl_seconds = self.settings.default_unique_job_lock_ttl_seconds
86
+ if _defer_by is not None:
87
+ # Defer relative to now
88
+ defer_secs = max(0, int(_defer_by.total_seconds()))
89
+ lock_ttl_seconds = max(lock_ttl_seconds, defer_secs + 1)
90
+ elif _defer_until is not None:
91
+ # Defer until specific datetime
92
+ dt = _defer_until
93
+ # Normalize to UTC
94
+ if dt.tzinfo is None:
95
+ dt = dt.replace(tzinfo=UTC)
96
+ elif dt.tzinfo != UTC:
97
+ dt = dt.astimezone(UTC)
98
+ diff = (dt - enqueue_time_utc).total_seconds()
99
+ if diff > 0:
100
+ lock_ttl_seconds = max(lock_ttl_seconds, int(diff) + 1)
101
+
102
+ unique_acquired = False
103
+ # Acquire unique lock if requested, with TTL covering defer window
86
104
  if _unique_key:
87
105
  lock_acquired = await self.job_store.acquire_unique_job_lock(
88
106
  unique_key=_unique_key,
89
- job_id=job_id_to_use, # Store current job_id in lock for traceability
90
- lock_ttl_seconds=self.settings.default_unique_job_lock_ttl_seconds,
107
+ job_id=job_id_to_use,
108
+ lock_ttl_seconds=lock_ttl_seconds,
91
109
  )
92
110
  if not lock_acquired:
93
111
  logger.info(
94
112
  f"Job with unique key '{_unique_key}' already active or recently run. Enqueue denied."
95
113
  )
96
114
  return None
115
+ unique_acquired = True
97
116
 
98
117
  queue_name_to_use = _queue_name or self.settings.default_queue_name
99
- enqueue_time_utc = datetime.now(UTC)
100
118
 
101
119
  # Create the Job instance with all provided details and defaults
102
120
  job = Job(
@@ -126,9 +144,6 @@ class RRQClient:
126
144
  queue_name=queue_name_to_use, # Store the target queue name
127
145
  )
128
146
 
129
- # Save the full job definition
130
- await self.job_store.save_job_definition(job)
131
-
132
147
  # Determine the score for the sorted set (queue)
133
148
  # Score is a millisecond timestamp for when the job should be processed.
134
149
  score_dt = enqueue_time_utc # Default to immediate processing
@@ -145,13 +160,21 @@ class RRQClient:
145
160
  score_dt = score_dt.astimezone(UTC)
146
161
 
147
162
  score_timestamp_ms = int(score_dt.timestamp() * 1000)
148
-
149
- # Add the job ID to the processing queue
150
- await self.job_store.add_job_to_queue(
151
- queue_name_to_use,
152
- job.id,
153
- float(score_timestamp_ms), # Redis ZADD score must be float
154
- )
163
+ # Record when the job is next scheduled to run (for deferred execution)
164
+ job.next_scheduled_run_time = score_dt
165
+
166
+ # Save the full job definition and add to queue (ensure unique lock is released on error)
167
+ try:
168
+ await self.job_store.save_job_definition(job)
169
+ await self.job_store.add_job_to_queue(
170
+ queue_name_to_use,
171
+ job.id,
172
+ float(score_timestamp_ms),
173
+ )
174
+ except Exception:
175
+ if unique_acquired:
176
+ await self.job_store.release_unique_job_lock(_unique_key)
177
+ raise
155
178
 
156
179
  logger.debug(
157
180
  f"Enqueued job {job.id} ('{job.function_name}') to queue '{queue_name_to_use}' with score {score_timestamp_ms}"
rrq/constants.py CHANGED
@@ -15,6 +15,7 @@ DEFAULT_DLQ_NAME: str = "rrq:dlq:default"
15
15
  # Redis key prefixes
16
16
  JOB_KEY_PREFIX: str = "rrq:job:"
17
17
  QUEUE_KEY_PREFIX: str = "rrq:queue:" # For ZSETs holding job IDs
18
+ DLQ_KEY_PREFIX: str = "rrq:dlq:" # For lists holding Dead Letter Queue job IDs
18
19
  ACTIVE_JOBS_PREFIX: str = (
19
20
  "rrq:active:" # For lists of active jobs per worker (optional, for recovery)
20
21
  )
rrq/job.py CHANGED
@@ -53,8 +53,6 @@ class Job(BaseModel):
53
53
  default_factory=lambda: datetime.now(UTC),
54
54
  description="Timestamp (UTC) when the job was initially enqueued.",
55
55
  )
56
- # score: Optional[float] = None # The score in the ZSET, derived from defer_until/defer_by
57
- # Not stored in the job hash directly, but used for queueing.
58
56
 
59
57
  status: JobStatus = Field(
60
58
  default=JobStatus.PENDING, description="Current status of the job."
@@ -107,27 +105,3 @@ class Job(BaseModel):
107
105
  default=None,
108
106
  description="The name of the Dead Letter Queue this job will be moved to if it fails permanently.",
109
107
  )
110
-
111
- # For model_config to allow arbitrary types if result is complex and not Pydantic model
112
- # class Config:
113
- # arbitrary_types_allowed = True
114
-
115
- # def to_redis_hash(self) -> dict[str, Any]:
116
- # """Prepares the job model for storage as a Redis hash.
117
- # Pydantic's model_dump is good, but we might want to ensure all values are easily
118
- # storable as strings or simple types for Redis, or handle serialization here.
119
- # For now, model_dump with json_encoders should suffice with a good serializer.
120
- # """
121
- # # Using model_dump ensures that Pydantic models are properly serialized (e.g., datetimes to ISO strings)
122
- # # We will use a JSON serializer in JobStore that handles Pydantic models correctly.
123
- # return self.model_dump(exclude_none=True)
124
-
125
- # @classmethod
126
- # def from_redis_hash(cls, data: dict[str, Any]) -> "Job":
127
- # """Reconstructs a Job instance from data retrieved from a Redis hash."""""""""
128
- # # Pydantic will handle parsing basic types. Datetimes are expected to be ISO strings.
129
- # # Handle potential None values for args/kwargs if they were excluded from dump
130
- # # data.setdefault("args", None) # Removed
131
- # # data.setdefault("kwargs", None) # Removed
132
- # return cls(**data)
133
- pass # Add pass if class body becomes empty after removing methods, or remove if not needed
rrq/registry.py CHANGED
@@ -2,8 +2,6 @@
2
2
 
3
3
  from typing import Any, Callable, Optional
4
4
 
5
- # Potentially: from collections.abc import Callable if more specific async callable needed
6
-
7
5
 
8
6
  class JobRegistry:
9
7
  """Manages the registration and retrieval of job handler functions.
@@ -74,4 +72,3 @@ class JobRegistry:
74
72
 
75
73
 
76
74
  # Global instance for convenience, though applications might manage their own.
77
- # job_registry = JobRegistry()
rrq/settings.py CHANGED
@@ -5,7 +5,6 @@ Settings can be loaded from environment variables (with a prefix of `RRQ_`) or
5
5
  from a .env file. Sensible defaults are provided for most settings.
6
6
  """
7
7
 
8
- # Import Callable and Awaitable for type hinting hooks
9
8
  from typing import Awaitable, Callable, Optional
10
9
 
11
10
  from pydantic import Field
@@ -95,7 +94,8 @@ class RRQSettings(BaseSettings):
95
94
  description="Grace period (in seconds) for active job tasks to finish during worker shutdown.",
96
95
  )
97
96
  job_registry: Optional[JobRegistry] = Field(
98
- default=None, description="Job registry instance, typically provided by the application."
97
+ default=None,
98
+ description="Job registry instance, typically provided by the application.",
99
99
  )
100
100
  model_config = SettingsConfigDict(
101
101
  env_prefix="RRQ_",
@@ -104,4 +104,3 @@ class RRQSettings(BaseSettings):
104
104
  # env_file=".env",
105
105
  # env_file_encoding='utf-8'
106
106
  )
107
-
rrq/store.py CHANGED
@@ -38,6 +38,23 @@ class JobStore:
38
38
  self.redis = AsyncRedis.from_url(
39
39
  settings.redis_dsn, decode_responses=False
40
40
  ) # Work with bytes initially
41
+
42
+ def _format_queue_key(self, queue_name: str) -> str:
43
+ """Normalize a queue name or key into a Redis key for ZSET queues."""
44
+
45
+ # If already a full key, use it directly
46
+ if queue_name.startswith(QUEUE_KEY_PREFIX):
47
+ return queue_name
48
+ return f"{QUEUE_KEY_PREFIX}{queue_name}"
49
+
50
+ def _format_dlq_key(self, dlq_name: str) -> str:
51
+ """Normalize a DLQ name or key into a Redis key for DLQ lists."""
52
+ from .constants import DLQ_KEY_PREFIX
53
+
54
+ # If already a full key, use it directly
55
+ if dlq_name.startswith(DLQ_KEY_PREFIX):
56
+ return dlq_name
57
+ return f"{DLQ_KEY_PREFIX}{dlq_name}"
41
58
 
42
59
  async def aclose(self):
43
60
  """Closes the Redis connection pool associated with this store."""
@@ -83,9 +100,6 @@ class JobStore:
83
100
  job: The Job object to save.
84
101
  """
85
102
  job_key = f"{JOB_KEY_PREFIX}{job.id}"
86
- # print(
87
- # f"DEBUG JobStore.save_job_definition (ENTRY): job.id={job.id}, job.job_args={job.job_args}, job.job_kwargs={job.job_kwargs}, type(job.job_args)={type(job.job_args)}"
88
- # )
89
103
 
90
104
  # Dump model excluding fields handled manually
91
105
  job_data_dict = job.model_dump(
@@ -111,9 +125,6 @@ class JobStore:
111
125
  if "id" not in final_mapping_for_hset:
112
126
  final_mapping_for_hset["id"] = job.id
113
127
 
114
- # print(
115
- # f"!!! RRQ_JOB_STORE_SAVE (PRINT) (JobID:{job.id}) -> Mapping for HSET: { {k: str(v)[:50] + '...' if isinstance(v, str) and len(v) > 50 else v for k, v in final_mapping_for_hset.items()} }"
116
- # )
117
128
  if final_mapping_for_hset: # Avoid HSET with empty mapping
118
129
  await self.redis.hset(job_key, mapping=final_mapping_for_hset)
119
130
  logger.debug(f"Saved job definition for {job.id} to Redis hash {job_key}.")
@@ -153,7 +164,6 @@ class JobStore:
153
164
  if job_args_str and job_args_str.lower() != "null":
154
165
  try:
155
166
  job_args_list = json.loads(job_args_str)
156
- # print(f"DEBUG get_job_def: Parsed job_args_list = {job_args_list}")
157
167
  except json.JSONDecodeError:
158
168
  logger.error(
159
169
  f"Failed to JSON decode 'job_args' for job {job_id} from string: '{job_args_str}'",
@@ -163,7 +173,6 @@ class JobStore:
163
173
  if job_kwargs_str and job_kwargs_str.lower() != "null":
164
174
  try:
165
175
  job_kwargs_dict = json.loads(job_kwargs_str)
166
- # print(f"DEBUG get_job_def: Parsed job_kwargs_dict = {job_kwargs_dict}")
167
176
  except json.JSONDecodeError:
168
177
  logger.error(
169
178
  f"Failed to JSON decode 'job_kwargs' for job {job_id} from string: '{job_kwargs_str}'",
@@ -196,12 +205,6 @@ class JobStore:
196
205
  )
197
206
  validated_job.result = result_obj
198
207
 
199
- # print(
200
- # f"DEBUG get_job_def (POST-MANUAL-ASSIGN): job_id={job_id}, job.job_args='{validated_job.job_args}', type={type(validated_job.job_args)}"
201
- # )
202
- # print(
203
- # f"!!! RRQ_JOB_STORE_GET (POST_CREATE_VALIDATED) -> JobID:{validated_job.id}, Status:{validated_job.status.value if validated_job.status else None}, Retries:{validated_job.current_retries}"
204
- # )
205
208
  logger.debug(f"Successfully retrieved and parsed job {validated_job.id}")
206
209
  return validated_job
207
210
  except Exception as e_val:
@@ -224,7 +227,7 @@ class JobStore:
224
227
  job_id: The ID of the job to add.
225
228
  score: The score (float) determining the job's position/priority in the queue.
226
229
  """
227
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
230
+ queue_key = self._format_queue_key(queue_name)
228
231
  await self.redis.zadd(
229
232
  queue_key, {job_id.encode("utf-8"): score}
230
233
  ) # Store job_id as bytes
@@ -243,7 +246,7 @@ class JobStore:
243
246
  Returns:
244
247
  A list of job IDs as strings.
245
248
  """
246
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
249
+ queue_key = self._format_queue_key(queue_name)
247
250
  job_ids_bytes = await self.redis.zrange(queue_key, start, end)
248
251
  return [job_id.decode("utf-8") for job_id in job_ids_bytes]
249
252
 
@@ -259,7 +262,7 @@ class JobStore:
259
262
  """
260
263
  if count <= 0:
261
264
  return []
262
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
265
+ queue_key = self._format_queue_key(queue_name)
263
266
  now_ms = int(datetime.now(UTC).timestamp() * 1000)
264
267
  # Fetch jobs with score from -inf up to current time, limit by count
265
268
  job_ids_bytes = await self.redis.zrangebyscore(
@@ -348,7 +351,7 @@ class JobStore:
348
351
  completion_time: The timestamp when the job failed permanently.
349
352
  """
350
353
  job_key = f"{JOB_KEY_PREFIX}{job_id}"
351
- dlq_redis_key = f"{QUEUE_KEY_PREFIX}{dlq_name}"
354
+ dlq_redis_key = self._format_dlq_key(dlq_name)
352
355
 
353
356
  # Ensure complex fields are properly handled if needed (error could be complex)
354
357
  # For now, assuming simple string error message
@@ -365,6 +368,42 @@ class JobStore:
365
368
  pipe.expire(job_key, DEFAULT_DLQ_RESULT_TTL_SECONDS)
366
369
  results = await pipe.execute()
367
370
  logger.info(f"Moved job {job_id} to DLQ '{dlq_redis_key}'. Results: {results}")
371
+
372
+ async def requeue_dlq(
373
+ self,
374
+ dlq_name: str,
375
+ target_queue: str,
376
+ limit: int | None = None,
377
+ ) -> int:
378
+ """Requeue jobs from the Dead Letter Queue back into a live queue.
379
+
380
+ Pops jobs from the DLQ list and adds them to the target queue with current timestamp.
381
+
382
+ Args:
383
+ dlq_name: Name of the DLQ (without prefix).
384
+ target_queue: Name of the target queue (without prefix).
385
+ limit: Maximum number of jobs to requeue; all if None.
386
+
387
+ Returns:
388
+ Number of jobs requeued.
389
+ """
390
+ jobs_requeued = 0
391
+ dlq_key = self._format_dlq_key(dlq_name)
392
+ # Continue popping until limit is reached or DLQ is empty
393
+ while limit is None or jobs_requeued < limit:
394
+ job_id_bytes = await self.redis.rpop(dlq_key)
395
+ if not job_id_bytes:
396
+ break
397
+ job_id = job_id_bytes.decode("utf-8")
398
+ # Use current time for re-enqueue score
399
+ now_ms = int(datetime.now(UTC).timestamp() * 1000)
400
+ await self.add_job_to_queue(
401
+ self._format_queue_key(target_queue),
402
+ job_id,
403
+ now_ms,
404
+ )
405
+ jobs_requeued += 1
406
+ return jobs_requeued
368
407
 
369
408
  async def get_job_lock_owner(self, job_id: str) -> Optional[str]:
370
409
  """Gets the current owner (worker ID) of a job's processing lock, if held.
@@ -389,7 +428,7 @@ class JobStore:
389
428
  Returns:
390
429
  The number of elements removed (0 or 1).
391
430
  """
392
- queue_key = f"{QUEUE_KEY_PREFIX}{queue_name}"
431
+ queue_key = self._format_queue_key(queue_name)
393
432
  removed_count = await self.redis.zrem(queue_key, job_id.encode("utf-8"))
394
433
  count = int(removed_count) # Ensure int
395
434
  if count > 0:
@@ -469,16 +508,12 @@ class JobStore:
469
508
 
470
509
  # Serialize result to JSON string
471
510
  try:
472
- # Use pydantic-core for robust serialization if available, else standard json
511
+ # Use pydantic JSON serialization if available, else standard JSON dump
473
512
  if hasattr(result, "model_dump_json"):
474
513
  result_str = result.model_dump_json()
475
- elif isinstance(result, str):
476
- result_str = result # Store plain strings directly if not JSON-like?
477
- # Let's stick to JSON encoding everything for consistency in save/load.
478
- # If it's already a string, json.dumps adds quotes.
479
- result_str = json.dumps(result)
480
514
  else:
481
- result_str = json.dumps(result, default=str) # Handle datetimes etc.
515
+ # Always JSON-encode the result, converting unknown types to strings
516
+ result_str = json.dumps(result, default=str)
482
517
  except TypeError as e:
483
518
  logger.error(
484
519
  f"Failed to serialize result for job {job_id}: {e}", exc_info=True
rrq/worker.py CHANGED
@@ -48,6 +48,7 @@ class RRQWorker:
48
48
  job_registry: JobRegistry,
49
49
  queues: Optional[list[str]] = None,
50
50
  worker_id: Optional[str] = None,
51
+ burst: bool = False,
51
52
  ):
52
53
  """Initializes the RRQWorker.
53
54
 
@@ -73,6 +74,8 @@ class RRQWorker:
73
74
  worker_id
74
75
  or f"{DEFAULT_WORKER_ID_PREFIX}{os.getpid()}_{uuid.uuid4().hex[:6]}"
75
76
  )
77
+ # Burst mode: process existing jobs then exit
78
+ self.burst = burst
76
79
 
77
80
  self._semaphore = asyncio.Semaphore(self.settings.worker_concurrency)
78
81
  self._running_tasks: set[asyncio.Task] = set()
@@ -144,7 +147,14 @@ class RRQWorker:
144
147
  f"Worker {self.worker_id} polling for up to {jobs_to_fetch} jobs..."
145
148
  )
146
149
  self.status = "polling"
147
- await self._poll_for_jobs(jobs_to_fetch)
150
+ # Poll for jobs and get count of jobs started
151
+ fetched_count = await self._poll_for_jobs(jobs_to_fetch)
152
+ # In burst mode, exit when no new jobs and no tasks running
153
+ if self.burst and fetched_count == 0 and not self._running_tasks:
154
+ logger.info(
155
+ f"Worker {self.worker_id} burst mode complete: no more jobs."
156
+ )
157
+ break
148
158
  else:
149
159
  if self.status != "idle (concurrency limit)":
150
160
  logger.debug(
@@ -224,6 +234,8 @@ class RRQWorker:
224
234
  exc_info=True,
225
235
  )
226
236
  await asyncio.sleep(1) # Avoid tight loop on polling error
237
+ # For burst mode, return number of jobs fetched in this poll
238
+ return fetched_count
227
239
 
228
240
  async def _try_process_job(self, job_id: str, queue_name: str) -> bool:
229
241
  """Attempts to lock, fetch definition, and start the execution task for a specific job.
@@ -805,7 +817,8 @@ class RRQWorker:
805
817
  delay_seconds = min(max_delay, base_delay * (2 ** (retry_attempt - 1)))
806
818
  delay_ms = int(delay_seconds * 1000)
807
819
  logger.debug(
808
- f"Calculated backoff for job {job.id} (attempt {retry_attempt}): {delay_ms}ms"
820
+ f"Calculated backoff for job {job.id} (attempt {retry_attempt}): "
821
+ f"base_delay={base_delay}s, max_delay={max_delay}s -> {delay_ms}ms"
809
822
  )
810
823
  return delay_ms
811
824
 
@@ -893,5 +906,6 @@ class RRQWorker:
893
906
  if self.client: # Check if client exists before closing
894
907
  await self.client.close()
895
908
  if self.job_store:
896
- await self.job_store.close()
909
+ # Close the Redis connection pool
910
+ await self.job_store.aclose()
897
911
  logger.info(f"[{self.worker_id}] RRQ worker closed.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rrq
3
- Version: 0.2.5
3
+ Version: 0.3.5
4
4
  Summary: RRQ is a Python library for creating reliable job queues using Redis and asyncio
5
5
  Project-URL: Homepage, https://github.com/getresq/rrq
6
6
  Project-URL: Bug Tracker, https://github.com/getresq/rrq/issues
@@ -21,17 +21,12 @@ Requires-Dist: redis[hiredis]<6,>=4.2.0
21
21
  Requires-Dist: watchfiles>=0.19.0
22
22
  Provides-Extra: dev
23
23
  Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
24
+ Requires-Dist: pytest-cov>=6.0.0; extra == 'dev'
24
25
  Requires-Dist: pytest>=8.3.5; extra == 'dev'
25
26
  Description-Content-Type: text/markdown
26
27
 
27
28
  # RRQ: Reliable Redis Queue
28
29
 
29
- ____ ____ ___
30
- | _ \ | _ \ / _ \
31
- | |_) | | |_) | | | | |
32
- | _ < | _ < | |_| |
33
- |_| \_\ |_| \_\ \__\_\
34
-
35
30
  RRQ is a Python library for creating reliable job queues using Redis and `asyncio`, inspired by [ARQ (Async Redis Queue)](https://github.com/samuelcolvin/arq). It focuses on providing at-least-once job processing semantics with features like automatic retries, job timeouts, dead-letter queues, and graceful worker shutdown.
36
31
 
37
32
  ## Core Components
@@ -58,10 +53,20 @@ RRQ is a Python library for creating reliable job queues using Redis and `asynci
58
53
  * **Worker Health Checks**: Workers periodically update a health key in Redis with a TTL, allowing monitoring systems to track active workers.
59
54
  * **Deferred Execution**: Jobs can be scheduled to run at a future time using `_defer_by` or `_defer_until`.
60
55
  *Note: Using deferral with a specific `_job_id` will effectively reschedule the job associated with that ID to the new time, overwriting its previous definition and score. It does not create multiple distinct scheduled jobs with the same ID.*
56
+ *To batch multiple enqueue calls into a single deferred job (and prevent duplicates within the defer window), combine `_unique_key` with `_defer_by`. For example:*
57
+
58
+ ```python
59
+ await client.enqueue(
60
+ "process_updates",
61
+ item_id=123,
62
+ _unique_key="update:123",
63
+ _defer_by=10,
64
+ )
65
+ ```
61
66
 
62
67
  ## Basic Usage
63
68
 
64
- *(See [`rrq_example.py`](examples/rrq_example.py) in the project root for a runnable example)*
69
+ *(See [`rrq_example.py`](https://github.com/GetResQ/rrq/tree/master/example) in the project root for a runnable example)*
65
70
 
66
71
  **1. Define Handlers:**
67
72
 
@@ -165,10 +170,9 @@ rrq <command> [options]
165
170
 
166
171
  - **`worker run`**: Run an RRQ worker process to process jobs from queues.
167
172
  ```bash
168
- rrq worker run [--burst] [--detach] --settings <settings_path>
173
+ rrq worker run [--burst] --settings <settings_path>
169
174
  ```
170
175
  - `--burst`: Run in burst mode (process one job/batch then exit).
171
- - `--detach`: Run the worker in the background.
172
176
  - `--settings`: Python settings path for application worker settings (e.g., `myapp.worker_config.rrq_settings`).
173
177
 
174
178
  - **`worker watch`**: Run an RRQ worker with auto-restart on file changes in a specified directory.
@@ -0,0 +1,15 @@
1
+ rrq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ rrq/cli.py,sha256=B9ra9ipZGRto423uG1vGEP-_jLshfl2XrCTNqixBMiQ,16562
3
+ rrq/client.py,sha256=5_bmZ05LKIfY9WFSKU-nYawEupsnrnHT2HewXfC2Ahg,7831
4
+ rrq/constants.py,sha256=F_uZgBI3h00MctnEjBjiCGMrg5jUaz5Bz9I1vkyqNrs,1654
5
+ rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
6
+ rrq/job.py,sha256=eUbl33QDqDMXPKpo-0dl0Mp29LWWmtbBgRw0sclcwJ4,4011
7
+ rrq/registry.py,sha256=E9W_zx3QiKTBwMOGearaNpDKBDB87JIn0RlMQ3sAcP0,2925
8
+ rrq/settings.py,sha256=BPKP4XjG7z475gqRgHZt4-IvvOs8uZefq4fPfD2Bepk,4350
9
+ rrq/store.py,sha256=teO0Af8hzBiu7-dFn6_2lz5X90LAZXmtg0VDZuQoAwk,24972
10
+ rrq/worker.py,sha256=y0UTziZVh4QbOPv24b8cqbm_xDBM0HtJLwPNYsJPWnE,40706
11
+ rrq-0.3.5.dist-info/METADATA,sha256=0sqVBY1QbqT2GJshYbz_nkKPYfStjZVKE3ICDvv7jdU,9224
12
+ rrq-0.3.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
+ rrq-0.3.5.dist-info/entry_points.txt,sha256=f8eFjk2ygDSyu9USwXGj5IM8xeyQqZgDa1rSrCj4Mis,36
14
+ rrq-0.3.5.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
15
+ rrq-0.3.5.dist-info/RECORD,,
@@ -1,2 +1,2 @@
1
1
  [console_scripts]
2
- rrq = rrq.rrq:rrq
2
+ rrq = rrq.cli:rrq
@@ -1,15 +0,0 @@
1
- rrq/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- rrq/client.py,sha256=2CXHxu-5TnTISqYfHPHNiTUKtdlwzSNWlSALC0tivvA,6778
3
- rrq/constants.py,sha256=_BY0iIyztTyoaA8HU43JPG0yGtm_Fv05t-zC3B9al4Q,1574
4
- rrq/exc.py,sha256=NJq3C7pUfcd47AB8kghIN8vdY0l90UrsHQEg4McBHP8,1281
5
- rrq/job.py,sha256=sjABmhevzen2ufTwSbj86lPYQCW_HZLae1EneCB1ZX8,5571
6
- rrq/registry.py,sha256=3wqwwIvqbG4zWRAA8UOuL7_AXOWFHYQA9Mxq6Ad63Xo,3048
7
- rrq/rrq.py,sha256=b3FeT_PU9bN8OfPhC99-i_b1OgIdV6MokyKepi8OFPw,13054
8
- rrq/settings.py,sha256=o9XdJ85mbCwJYDVKjVMTBaK8VFZWMGdN_Av9T3fIY7M,4397
9
- rrq/store.py,sha256=6AZsbK4hnNWMueAFtd0UjPqS8TJOPNLhuSufe214VBM,24352
10
- rrq/worker.py,sha256=3VybAINLiBrnseisuqZt744nFE-6-WD0CBL8mlN437o,39952
11
- rrq-0.2.5.dist-info/METADATA,sha256=yDdof--bDhs02JP35naVvvOMma-TaEhaPU_fj5eEpyE,8984
12
- rrq-0.2.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
- rrq-0.2.5.dist-info/entry_points.txt,sha256=tpzz5voYGwoIv6ir-UBUTmkCl1HVtGLTW7An80RUCIk,36
14
- rrq-0.2.5.dist-info/licenses/LICENSE,sha256=XDvu5hKdS2-_ByiSj3tiu_3zSsrXXoJsgbILGoMpKCw,554
15
- rrq-0.2.5.dist-info/RECORD,,
File without changes