pulpcore 3.89.1__py3-none-any.whl → 3.90.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulpcore might be problematic. Click here for more details.

Files changed (43) hide show
  1. pulp_certguard/app/__init__.py +1 -1
  2. pulp_file/app/__init__.py +1 -1
  3. pulp_file/tests/functional/api/test_filesystem_export.py +220 -0
  4. pulp_file/tests/functional/api/test_pulp_export.py +103 -3
  5. pulpcore/app/apps.py +1 -1
  6. pulpcore/app/importexport.py +18 -2
  7. pulpcore/app/management/commands/shell.py +8 -0
  8. pulpcore/app/migrations/0144_delete_old_appstatus.py +28 -0
  9. pulpcore/app/migrations/0145_domainize_import_export.py +53 -0
  10. pulpcore/app/modelresource.py +61 -21
  11. pulpcore/app/models/__init__.py +2 -5
  12. pulpcore/app/models/exporter.py +7 -1
  13. pulpcore/app/models/fields.py +0 -1
  14. pulpcore/app/models/importer.py +8 -1
  15. pulpcore/app/models/repository.py +16 -0
  16. pulpcore/app/models/status.py +8 -138
  17. pulpcore/app/models/task.py +15 -25
  18. pulpcore/app/serializers/domain.py +1 -1
  19. pulpcore/app/serializers/exporter.py +4 -4
  20. pulpcore/app/serializers/importer.py +2 -2
  21. pulpcore/app/serializers/task.py +11 -8
  22. pulpcore/app/tasks/importer.py +44 -10
  23. pulpcore/app/tasks/repository.py +27 -0
  24. pulpcore/app/viewsets/base.py +18 -14
  25. pulpcore/app/viewsets/domain.py +1 -1
  26. pulpcore/app/viewsets/exporter.py +1 -8
  27. pulpcore/app/viewsets/importer.py +1 -6
  28. pulpcore/app/viewsets/task.py +0 -1
  29. pulpcore/content/instrumentation.py +18 -15
  30. pulpcore/openapi/__init__.py +16 -2
  31. pulpcore/plugin/tasking.py +4 -2
  32. pulpcore/tasking/tasks.py +245 -127
  33. pulpcore/tasking/worker.py +41 -47
  34. pulpcore/tests/functional/api/test_crud_domains.py +7 -0
  35. pulpcore/tests/functional/api/test_tasking.py +2 -2
  36. pulpcore/tests/functional/api/using_plugin/test_crud_repos.py +9 -2
  37. pulpcore/tests/unit/content/test_handler.py +43 -0
  38. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/METADATA +7 -7
  39. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/RECORD +43 -39
  40. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/WHEEL +0 -0
  41. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/entry_points.txt +0 -0
  42. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/licenses/LICENSE +0 -0
  43. {pulpcore-3.89.1.dist-info → pulpcore-3.90.1.dist-info}/top_level.txt +0 -0
pulpcore/tasking/tasks.py CHANGED
@@ -6,8 +6,9 @@ import os
6
6
  import sys
7
7
  import traceback
8
8
  import tempfile
9
- import threading
10
9
  from gettext import gettext as _
10
+ from contextlib import contextmanager
11
+ from asgiref.sync import sync_to_async, async_to_sync
11
12
 
12
13
  from django.conf import settings
13
14
  from django.db import connection
@@ -60,91 +61,129 @@ def execute_task(task):
60
61
  contextvars.copy_context().run(_execute_task, task)
61
62
 
62
63
 
64
+ async def aexecute_task(task):
65
+ # This extra stack is needed to isolate the current_task ContextVar
66
+ await contextvars.copy_context().run(_aexecute_task, task)
67
+
68
+
63
69
  def _execute_task(task):
64
70
  # Store the task id in the context for `Task.current()`.
65
71
  current_task.set(task)
66
72
  task.set_running()
67
73
  domain = get_domain()
68
74
  try:
69
- _logger.info(
70
- "Starting task id: %s in domain: %s, task_type: %s, immediate: %s, deferred: %s",
71
- task.pk,
72
- domain.name,
73
- task.name,
74
- str(task.immediate),
75
- str(task.deferred),
76
- )
77
-
78
- # Execute task
79
- module_name, function_name = task.name.rsplit(".", 1)
80
- module = importlib.import_module(module_name)
81
- func = getattr(module, function_name)
82
- args = task.enc_args or ()
83
- kwargs = task.enc_kwargs or {}
84
- immediate = task.immediate
85
- is_coroutine_fn = asyncio.iscoroutinefunction(func)
86
-
87
- if immediate and not is_coroutine_fn:
88
- raise ValueError("Immediate tasks must be async functions.")
89
-
90
- if is_coroutine_fn:
91
- # both regular and immediate tasks can be coroutines, but only immediate must timeout
92
- _logger.debug("Task is coroutine %s", task.pk)
93
- coro = func(*args, **kwargs)
94
- if immediate:
95
- coro = asyncio.wait_for(coro, timeout=IMMEDIATE_TIMEOUT)
96
- loop = asyncio.get_event_loop()
97
- try:
98
- result = loop.run_until_complete(coro)
99
- except asyncio.TimeoutError:
100
- _logger.info(
101
- "Immediate task %s timed out after %s seconds.", task.pk, IMMEDIATE_TIMEOUT
102
- )
103
- raise RuntimeError(
104
- "Immediate task timed out after {timeout} seconds.".format(
105
- timeout=IMMEDIATE_TIMEOUT,
106
- )
107
- )
108
- else:
109
- result = func(*args, **kwargs)
110
-
75
+ log_task_start(task, domain)
76
+ task_function = get_task_function(task)
77
+ result = task_function()
111
78
  except Exception:
112
79
  exc_type, exc, tb = sys.exc_info()
113
80
  task.set_failed(exc, tb)
114
- _logger.info(
115
- "Task[{task_type}] {task_pk} failed ({exc_type}: {exc}) in domain: {domain}".format(
116
- task_type=task.name,
117
- task_pk=task.pk,
118
- exc_type=exc_type.__name__,
119
- exc=exc,
120
- domain=domain.name,
121
- )
122
- )
123
- _logger.info("\n".join(traceback.format_list(traceback.extract_tb(tb))))
81
+ log_task_failed(task, exc_type, exc, tb, domain)
124
82
  send_task_notification(task)
125
83
  else:
126
84
  task.set_completed(result)
127
- execution_time = task.finished_at - task.started_at
128
- execution_time_us = int(execution_time.total_seconds() * 1_000_000) # μs
129
- _logger.info(
130
- "Task completed %s in domain:"
131
- " %s, task_type: %s, immediate: %s, deferred: %s, execution_time: %s μs",
132
- task.pk,
133
- domain.name,
134
- task.name,
135
- str(task.immediate),
136
- str(task.deferred),
137
- execution_time_us,
138
- )
85
+ log_task_completed(task, domain)
86
+ send_task_notification(task)
87
+ return result
88
+ return None
89
+
90
+
91
+ async def _aexecute_task(task):
92
+ # Store the task id in the context for `Task.current()`.
93
+ current_task.set(task)
94
+ await sync_to_async(task.set_running)()
95
+ domain = get_domain()
96
+ try:
97
+ coroutine = get_task_function(task, ensure_coroutine=True)
98
+ result = await coroutine
99
+ except Exception:
100
+ exc_type, exc, tb = sys.exc_info()
101
+ await sync_to_async(task.set_failed)(exc, tb)
102
+ log_task_failed(task, exc_type, exc, tb, domain)
103
+ send_task_notification(task)
104
+ else:
105
+ await sync_to_async(task.set_completed)(result)
139
106
  send_task_notification(task)
107
+ log_task_completed(task, domain)
108
+ return result
109
+ return None
110
+
140
111
 
112
+ def log_task_start(task, domain):
113
+ _logger.info(
114
+ "Starting task id: %s in domain: %s, task_type: %s, immediate: %s, deferred: %s",
115
+ task.pk,
116
+ domain.name,
117
+ task.name,
118
+ str(task.immediate),
119
+ str(task.deferred),
120
+ )
141
121
 
142
- def running_from_thread_pool() -> bool:
143
- # TODO: this needs an alternative approach ASAP!
144
- # Currently we rely on the weak fact that ThreadPoolExecutor names threads like:
145
- # "ThreadPoolExecutor-0_0"
146
- thread_name = threading.current_thread().name
147
- return "ThreadPoolExecutor" in thread_name
122
+
123
+ def log_task_completed(task, domain):
124
+ execution_time = task.finished_at - task.started_at
125
+ execution_time_us = int(execution_time.total_seconds() * 1_000_000) # μs
126
+ _logger.info(
127
+ "Task completed %s in domain:"
128
+ " %s, task_type: %s, immediate: %s, deferred: %s, execution_time: %s μs",
129
+ task.pk,
130
+ domain.name,
131
+ task.name,
132
+ str(task.immediate),
133
+ str(task.deferred),
134
+ execution_time_us,
135
+ )
136
+
137
+
138
+ def log_task_failed(task, exc_type, exc, tb, domain):
139
+ _logger.info(
140
+ "Task[{task_type}] {task_pk} failed ({exc_type}: {exc}) in domain: {domain}".format(
141
+ task_type=task.name,
142
+ task_pk=task.pk,
143
+ exc_type=exc_type.__name__,
144
+ exc=exc,
145
+ domain=domain.name,
146
+ )
147
+ )
148
+ _logger.info("\n".join(traceback.format_list(traceback.extract_tb(tb))))
149
+
150
+
151
+ def get_task_function(task, ensure_coroutine=False):
152
+ module_name, function_name = task.name.rsplit(".", 1)
153
+ module = importlib.import_module(module_name)
154
+ func = getattr(module, function_name)
155
+ args = task.enc_args or ()
156
+ kwargs = task.enc_kwargs or {}
157
+ immediate = task.immediate
158
+ is_coroutine_fn = asyncio.iscoroutinefunction(func)
159
+
160
+ if immediate and not is_coroutine_fn:
161
+ raise ValueError("Immediate tasks must be async functions.")
162
+
163
+ if ensure_coroutine:
164
+ if not is_coroutine_fn:
165
+ return sync_to_async(func)(*args, **kwargs)
166
+ coro = func(*args, **kwargs)
167
+ if immediate:
168
+ coro = asyncio.wait_for(coro, timeout=IMMEDIATE_TIMEOUT)
169
+ return coro
170
+ else: # ensure normal function
171
+ if not is_coroutine_fn:
172
+ return lambda: func(*args, **kwargs)
173
+
174
+ async def task_wrapper(): # asyncio.wait_for + async_to_sync requires wrapping
175
+ coro = func(*args, **kwargs)
176
+ if immediate:
177
+ coro = asyncio.wait_for(coro, timeout=IMMEDIATE_TIMEOUT)
178
+ try:
179
+ return await coro
180
+ except asyncio.TimeoutError:
181
+ msg_template = "Immediate task %s timed out after %s seconds."
182
+ error_msg = msg_template % (task.pk, IMMEDIATE_TIMEOUT)
183
+ _logger.info(error_msg)
184
+ raise RuntimeError(error_msg)
185
+
186
+ return async_to_sync(task_wrapper)
148
187
 
149
188
 
150
189
  def dispatch(
@@ -194,18 +233,148 @@ def dispatch(
194
233
  ValueError: When `resources` is an unsupported type.
195
234
  """
196
235
 
197
- # Can't run short tasks immediately if running from thread pool
198
- immediate = immediate and not running_from_thread_pool()
236
+ execute_now = immediate and not called_from_content_app()
237
+ assert deferred or immediate, "A task must be at least `deferred` or `immediate`."
238
+ send_wakeup_signal = True if not immediate else False
239
+ function_name = get_function_name(func)
240
+ versions = get_version(versions, function_name)
241
+ colliding_resources, resources = get_resources(exclusive_resources, shared_resources, immediate)
242
+ task_payload = get_task_payload(
243
+ function_name, task_group, args, kwargs, resources, versions, immediate, deferred
244
+ )
245
+ task = Task.objects.create(**task_payload)
246
+ task.refresh_from_db() # The database will have assigned a timestamp for us.
247
+ if execute_now:
248
+ if are_resources_available(colliding_resources, task):
249
+ send_wakeup_signal = True if resources else False
250
+ task.unblock()
251
+ with using_workdir():
252
+ execute_task(task)
253
+ elif deferred: # Resources are blocked and can be deferred
254
+ task.app_lock = None
255
+ task.save()
256
+ else: # Can't be deferred
257
+ task.set_canceling()
258
+ task.set_canceled(TASK_STATES.CANCELED, "Resources temporarily unavailable.")
259
+ if send_wakeup_signal:
260
+ wakeup_worker(TASK_WAKEUP_UNBLOCK)
261
+ return task
262
+
263
+
264
+ async def adispatch(
265
+ func,
266
+ args=None,
267
+ kwargs=None,
268
+ task_group=None,
269
+ exclusive_resources=None,
270
+ shared_resources=None,
271
+ immediate=False,
272
+ deferred=True,
273
+ versions=None,
274
+ ):
275
+ """Async version of dispatch."""
276
+ execute_now = immediate and not called_from_content_app()
199
277
  assert deferred or immediate, "A task must be at least `deferred` or `immediate`."
278
+ function_name = get_function_name(func)
279
+ versions = get_version(versions, function_name)
280
+ colliding_resources, resources = get_resources(exclusive_resources, shared_resources, immediate)
281
+ send_wakeup_signal = False
282
+ task_payload = get_task_payload(
283
+ function_name, task_group, args, kwargs, resources, versions, immediate, deferred
284
+ )
285
+ task = await Task.objects.acreate(**task_payload)
286
+ await task.arefresh_from_db() # The database will have assigned a timestamp for us.
287
+ if execute_now:
288
+ if await async_are_resources_available(colliding_resources, task):
289
+ send_wakeup_signal = True if resources else False
290
+ await task.aunblock()
291
+ with using_workdir():
292
+ await aexecute_task(task)
293
+ elif deferred: # Resources are blocked and can be deferred
294
+ task.app_lock = None
295
+ await task.asave()
296
+ else: # Can't be deferred
297
+ task.set_canceling()
298
+ task.set_canceled(TASK_STATES.CANCELED, "Resources temporarily unavailable.")
299
+ if send_wakeup_signal:
300
+ await sync_to_async(wakeup_worker)(TASK_WAKEUP_UNBLOCK)
301
+ return task
302
+
303
+
304
+ def get_task_payload(
305
+ function_name, task_group, args, kwargs, resources, versions, immediate, deferred
306
+ ):
307
+ payload = {
308
+ "state": TASK_STATES.WAITING,
309
+ "logging_cid": (get_guid()),
310
+ "task_group": task_group,
311
+ "name": function_name,
312
+ "enc_args": args,
313
+ "enc_kwargs": kwargs,
314
+ "parent_task": Task.current(),
315
+ "reserved_resources_record": resources,
316
+ "versions": versions,
317
+ "immediate": immediate,
318
+ "deferred": deferred,
319
+ "profile_options": x_task_diagnostics_var.get(None),
320
+ "app_lock": None if not immediate else AppStatus.objects.current(), # Lazy evaluation...
321
+ }
322
+ return payload
323
+
324
+
325
+ @contextmanager
326
+ def using_workdir():
327
+ cur_dir = os.getcwd()
328
+ with tempfile.TemporaryDirectory(dir=settings.WORKING_DIRECTORY) as working_dir:
329
+ os.chdir(working_dir)
330
+ try:
331
+ yield
332
+ finally:
333
+ # Whether the task fails or not, we should always restore the workdir.
334
+ os.chdir(cur_dir)
335
+
200
336
 
337
+ async def async_are_resources_available(colliding_resources, task: Task) -> bool:
338
+ prior_tasks = Task.objects.filter(
339
+ state__in=TASK_INCOMPLETE_STATES, pulp_created__lt=task.pulp_created
340
+ )
341
+ colliding_resources_taken = await prior_tasks.filter(
342
+ reserved_resources_record__overlap=colliding_resources
343
+ ).aexists()
344
+ return not colliding_resources or not colliding_resources_taken
345
+
346
+
347
+ def are_resources_available(colliding_resources, task: Task) -> bool:
348
+ prior_tasks = Task.objects.filter(
349
+ state__in=TASK_INCOMPLETE_STATES, pulp_created__lt=task.pulp_created
350
+ )
351
+ colliding_resources_taken = prior_tasks.filter(
352
+ reserved_resources_record__overlap=colliding_resources
353
+ ).exists()
354
+ return not colliding_resources or not colliding_resources_taken
355
+
356
+
357
+ def called_from_content_app() -> bool:
358
+ current_app = AppStatus.objects.current()
359
+ return current_app is not None and current_app.app_type == "content"
360
+
361
+
362
+ def get_function_name(func):
201
363
  if callable(func):
202
364
  function_name = f"{func.__module__}.{func.__name__}"
203
365
  else:
204
366
  function_name = func
367
+ return function_name
368
+
205
369
 
370
+ def get_version(versions, function_name):
206
371
  if versions is None:
207
372
  versions = MODULE_PLUGIN_VERSIONS[function_name.split(".", maxsplit=1)[0]]
373
+ return versions
374
+
208
375
 
376
+ def get_resources(exclusive_resources, shared_resources, immediate):
377
+ domain_prn = get_prn(get_domain())
209
378
  if exclusive_resources is None:
210
379
  exclusive_resources = []
211
380
  else:
@@ -216,70 +385,19 @@ def dispatch(
216
385
  shared_resources = _validate_and_get_resources(shared_resources)
217
386
 
218
387
  # A task that is exclusive on a domain will block all tasks within that domain
219
- domain_prn = get_prn(get_domain())
220
388
  if domain_prn not in exclusive_resources:
221
389
  shared_resources.append(domain_prn)
222
390
  resources = exclusive_resources + [f"shared:{resource}" for resource in shared_resources]
223
391
 
224
- notify_workers = False
225
- task = Task.objects.create(
226
- state=TASK_STATES.WAITING,
227
- logging_cid=(get_guid()),
228
- task_group=task_group,
229
- name=function_name,
230
- enc_args=args,
231
- enc_kwargs=kwargs,
232
- parent_task=Task.current(),
233
- reserved_resources_record=resources,
234
- versions=versions,
235
- immediate=immediate,
236
- deferred=deferred,
237
- profile_options=x_task_diagnostics_var.get(None),
238
- app_lock=None if not immediate else AppStatus.objects.current(), # Lazy evaluation...
239
- )
240
- task.refresh_from_db() # The database will have assigned a timestamp for us.
392
+ # Compile a list of resources that must not be taken by other tasks.
393
+ colliding_resources = []
241
394
  if immediate:
242
- prior_tasks = Task.objects.filter(
243
- state__in=TASK_INCOMPLETE_STATES, pulp_created__lt=task.pulp_created
244
- )
245
- # Compile a list of resources that must not be taken by other tasks.
246
395
  colliding_resources = (
247
396
  shared_resources
248
397
  + exclusive_resources
249
398
  + [f"shared:{resource}" for resource in exclusive_resources]
250
399
  )
251
- # Can we execute this task immediately?
252
- if (
253
- not colliding_resources
254
- or not prior_tasks.filter(
255
- reserved_resources_record__overlap=colliding_resources
256
- ).exists()
257
- ):
258
- task.unblock()
259
-
260
- cur_dir = os.getcwd()
261
- with tempfile.TemporaryDirectory(dir=settings.WORKING_DIRECTORY) as working_dir:
262
- os.chdir(working_dir)
263
- try:
264
- execute_task(task)
265
- finally:
266
- # Whether the task fails or not, we should always restore the workdir.
267
- os.chdir(cur_dir)
268
-
269
- if resources:
270
- notify_workers = True
271
- elif deferred:
272
- # Resources are blocked. Let the others handle it.
273
- task.app_lock = None
274
- task.save()
275
- else:
276
- task.set_canceling()
277
- task.set_canceled(TASK_STATES.CANCELED, "Resources temporarily unavailable.")
278
- else:
279
- notify_workers = True
280
- if notify_workers:
281
- wakeup_worker(TASK_WAKEUP_UNBLOCK)
282
- return task
400
+ return colliding_resources, resources
283
401
 
284
402
 
285
403
  def cancel_task(task_id):
@@ -28,7 +28,7 @@ from pulpcore.constants import (
28
28
  )
29
29
  from pulpcore.metrics import init_otel_meter
30
30
  from pulpcore.app.apps import pulp_plugin_configs
31
- from pulpcore.app.models import Worker, Task, AppStatus, ApiAppStatus, ContentAppStatus
31
+ from pulpcore.app.models import Task, AppStatus
32
32
  from pulpcore.app.util import PGAdvisoryLock
33
33
  from pulpcore.exceptions import AdvisoryLockError
34
34
 
@@ -142,7 +142,7 @@ class PulpcoreWorker:
142
142
  elif notification.payload == TASK_WAKEUP_HANDLE:
143
143
  self.wakeup_handle = True
144
144
  else:
145
- _logger.warn("Unknown wakeup call recieved. Reason: '%s'", notification.payload)
145
+ _logger.warning("Unknown wakeup call recieved. Reason: '%s'", notification.payload)
146
146
  # We cannot be sure so assume everything happened.
147
147
  self.wakeup_unblock = not self.auxiliary
148
148
  self.wakeup_handle = True
@@ -188,21 +188,15 @@ class PulpcoreWorker:
188
188
  self.ignored_task_ids.remove(pk)
189
189
 
190
190
  def worker_cleanup(self):
191
- qs = AppStatus.objects.older_than(age=timedelta(days=7))
191
+ qs = AppStatus.objects.missing()
192
192
  for app_worker in qs:
193
- _logger.info(_("Clean missing %s worker %s."), app_worker.app_type, app_worker.name)
193
+ _logger.warning(
194
+ "Cleanup record of missing %s process %s.", app_worker.app_type, app_worker.name
195
+ )
194
196
  qs.delete()
195
- with contextlib.suppress(DatabaseError):
196
- # By now a migration on a newer release may have deleted these tables already.
197
- for cls, cls_name in (
198
- (Worker, "pulp"),
199
- (ApiAppStatus, "api"),
200
- (ContentAppStatus, "content"),
201
- ):
202
- qs = cls.objects.missing(age=timedelta(days=7))
203
- for app_worker in qs:
204
- _logger.info(_("Clean missing %s worker %s."), cls_name, app_worker.name)
205
- qs.delete()
197
+ # This will also serve as a pacemaker because it will be triggered regularly.
198
+ # Don't bother the others.
199
+ self.wakeup_unblock = True
206
200
 
207
201
  def beat(self):
208
202
  if self.app_status.last_heartbeat < timezone.now() - self.heartbeat_period:
@@ -223,7 +217,7 @@ class PulpcoreWorker:
223
217
  # to be able to report on a congested tasking system to produce reliable results.
224
218
  self.record_unblocked_waiting_tasks_metric()
225
219
 
226
- def notify_workers(self, reason="unknown"):
220
+ def notify_workers(self, reason):
227
221
  self.cursor.execute("SELECT pg_notify('pulp_worker_wakeup', %s)", (reason,))
228
222
 
229
223
  def cancel_abandoned_task(self, task, final_state, reason=None):
@@ -283,25 +277,28 @@ class PulpcoreWorker:
283
277
  Also it clears the notification about tasks to be unblocked and sends the notification that
284
278
  new unblocked tasks are made available.
285
279
 
286
- Returns the number of new unblocked tasks.
280
+ Returns None if another worker held the lock, True if unblocked tasks exist, else False.
287
281
  """
288
282
 
289
283
  assert not self.auxiliary
290
284
 
291
- count = 0
292
- self.wakeup_unblock_tasks = False
285
+ self.wakeup_unblock = False
293
286
  with contextlib.suppress(AdvisoryLockError), PGAdvisoryLock(TASK_UNBLOCKING_LOCK):
294
- if count := self._unblock_tasks():
287
+ self._unblock_tasks()
288
+
289
+ if (
290
+ Task.objects.filter(state__in=TASK_INCOMPLETE_STATES, app_lock=None)
291
+ .exclude(unblocked_at=None)
292
+ .exists()
293
+ ):
295
294
  self.notify_workers(TASK_WAKEUP_HANDLE)
296
- return count
295
+ return True
296
+ return False
297
+ return None
297
298
 
298
299
  def _unblock_tasks(self):
299
- """Iterate over waiting tasks and mark them unblocked accordingly.
300
+ """Iterate over waiting tasks and mark them unblocked accordingly."""
300
301
 
301
- Returns the number of new unblocked tasks.
302
- """
303
-
304
- count = 0
305
302
  taken_exclusive_resources = set()
306
303
  taken_shared_resources = set()
307
304
  # When batching this query, be sure to use "pulp_created" as a cursor
@@ -329,9 +326,6 @@ class PulpcoreWorker:
329
326
  task.pulp_domain.name,
330
327
  )
331
328
  task.unblock()
332
- count += 1
333
- # Don't consider this task's resources as held.
334
- continue
335
329
 
336
330
  elif (
337
331
  task.state == TASK_STATES.WAITING
@@ -350,7 +344,6 @@ class PulpcoreWorker:
350
344
  task.pulp_domain.name,
351
345
  )
352
346
  task.unblock()
353
- count += 1
354
347
  elif task.state == TASK_STATES.RUNNING and task.unblocked_at is None:
355
348
  # This should not happen in normal operation.
356
349
  # And it is only an issue if the worker running that task died, because it will
@@ -367,21 +360,22 @@ class PulpcoreWorker:
367
360
  taken_exclusive_resources.update(exclusive_resources)
368
361
  taken_shared_resources.update(shared_resources)
369
362
 
370
- return count
371
-
372
363
  def sleep(self):
373
364
  """Wait for signals on the wakeup channel while heart beating."""
374
365
 
375
366
  _logger.debug(_("Worker %s entering sleep state."), self.name)
376
367
  while not self.shutdown_requested and not self.wakeup_handle:
377
368
  r, w, x = select.select(
378
- [self.sentinel, connection.connection], [], [], self.heartbeat_period.seconds
369
+ [self.sentinel, connection.connection],
370
+ [],
371
+ [],
372
+ 0 if self.wakeup_unblock else self.heartbeat_period.seconds,
379
373
  )
380
374
  self.beat()
381
375
  if connection.connection in r:
382
376
  connection.connection.execute("SELECT 1")
383
- if self.wakeup_unblock:
384
- self.unblock_tasks()
377
+ if self.wakeup_unblock:
378
+ self.unblock_tasks()
385
379
  if self.sentinel in r:
386
380
  os.read(self.sentinel, 256)
387
381
  _logger.debug(_("Worker %s leaving sleep state."), self.name)
@@ -418,21 +412,21 @@ class PulpcoreWorker:
418
412
  [self.sentinel, connection.connection, task_process.sentinel],
419
413
  [],
420
414
  [],
421
- self.heartbeat_period.seconds,
415
+ 0 if self.wakeup_unblock or self.cancel_task else self.heartbeat_period.seconds,
422
416
  )
423
417
  self.beat()
424
418
  if connection.connection in r:
425
419
  connection.connection.execute("SELECT 1")
426
- if self.cancel_task:
427
- _logger.info(
428
- _("Received signal to cancel current task %s in domain: %s."),
429
- task.pk,
430
- domain.name,
431
- )
432
- cancel_state = TASK_STATES.CANCELED
433
- self.cancel_task = False
434
- if self.wakeup_unblock:
435
- self.unblock_tasks()
420
+ if self.cancel_task:
421
+ _logger.info(
422
+ _("Received signal to cancel current task %s in domain: %s."),
423
+ task.pk,
424
+ domain.name,
425
+ )
426
+ cancel_state = TASK_STATES.CANCELED
427
+ self.cancel_task = False
428
+ if self.wakeup_unblock:
429
+ self.unblock_tasks()
436
430
  if task_process.sentinel in r:
437
431
  if not task_process.is_alive():
438
432
  break
@@ -600,7 +594,7 @@ class PulpcoreWorker:
600
594
  if not self.auxiliary:
601
595
  # Attempt to flush the task queue completely.
602
596
  # Stop iteration if no new tasks were found to unblock.
603
- while self.unblock_tasks():
597
+ while self.unblock_tasks() is not False:
604
598
  self.handle_unblocked_tasks()
605
599
  self.handle_unblocked_tasks()
606
600
  else:
@@ -44,6 +44,13 @@ def test_crud_domains(pulpcore_bindings, monitor_task):
44
44
  valid_settings["location"] = "/testing/"
45
45
  assert domain.storage_settings == valid_settings
46
46
 
47
+ # An update request with no changes should return a 200 OK (without dispatching a task)
48
+ response = pulpcore_bindings.DomainsApi.partial_update_with_http_info(
49
+ domain.pulp_href, update_body
50
+ )
51
+ assert response.status_code == 200
52
+ assert response.data == domain
53
+
47
54
  # Delete the domain
48
55
  response = pulpcore_bindings.DomainsApi.delete(domain.pulp_href)
49
56
  monitor_task(response.task)
@@ -484,7 +484,7 @@ class TestImmediateTaskWithNoResource:
484
484
  )
485
485
  task = pulpcore_bindings.TasksApi.read(task_href)
486
486
  assert task.state == "failed"
487
- assert "task timed out after" in task.error["description"]
487
+ assert "timed out after" in task.error["description"]
488
488
 
489
489
 
490
490
  @pytest.fixture
@@ -576,4 +576,4 @@ class TestImmediateTaskWithBlockedResource:
576
576
  exclusive_resources=[COMMON_RESOURCE],
577
577
  )
578
578
  monitor_task(task_href)
579
- assert "task timed out after" in ctx.value.task.error["description"]
579
+ assert "timed out after" in ctx.value.task.error["description"]
@@ -208,6 +208,11 @@ def test_crud_remotes_full_workflow(
208
208
  new_remote = file_bindings.RemotesFileApi.read(remote.pulp_href)
209
209
  _compare_results(data, new_remote)
210
210
 
211
+ # An update request with no changes should return a 200 OK (without dispatching a task)
212
+ response = file_bindings.RemotesFileApi.partial_update_with_http_info(remote.pulp_href, data)
213
+ assert response.status_code == 200
214
+ _compare_results(data, response.data)
215
+
211
216
  # Test that a password can be updated with a PUT request.
212
217
  temp_remote = file_remote_factory(
213
218
  manifest_path=basic_manifest_path, url="http://", password="new"
@@ -226,6 +231,7 @@ def test_crud_remotes_full_workflow(
226
231
  assert exc.stdout.rstrip("\n") == "changed"
227
232
 
228
233
  # Test that password doesn't get unset when not passed with a PUT request.
234
+ # QUESTION: Why not? PUT is supposed to replace the whole entity in place.
229
235
  temp_remote = file_remote_factory(url="http://", password="new")
230
236
  href = temp_remote.pulp_href
231
237
  uuid = re.search(r"/api/v3/remotes/file/file/([\w-]+)/", href).group(1)
@@ -235,8 +241,9 @@ def test_crud_remotes_full_workflow(
235
241
 
236
242
  # test a PUT request without a password
237
243
  remote_update = {"name": temp_remote.name, "url": "http://"}
238
- response = file_bindings.RemotesFileApi.update(href, remote_update)
239
- monitor_task(response.task)
244
+ response = file_bindings.RemotesFileApi.update_with_http_info(href, remote_update)
245
+ assert response.status_code == 200
246
+ _compare_results(remote_update, response.data)
240
247
  exc = run(["pulpcore-manager", "shell", "-c", shell_cmd], text=True, capture_output=True)
241
248
  assert exc.stdout.rstrip("\n") == "new"
242
249