parsl 2025.10.13__py3-none-any.whl → 2025.10.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of parsl might be problematic. Click here for more details.

Files changed (28) hide show
  1. parsl/app/bash.py +1 -1
  2. parsl/concurrent/__init__.py +95 -14
  3. parsl/data_provider/globus.py +3 -1
  4. parsl/dataflow/dflow.py +92 -192
  5. parsl/dataflow/memoization.py +126 -2
  6. parsl/dataflow/states.py +5 -5
  7. parsl/executors/high_throughput/interchange.py +4 -7
  8. parsl/jobs/strategy.py +7 -6
  9. parsl/tests/configs/local_threads_globus.py +18 -14
  10. parsl/tests/sites/test_concurrent.py +51 -3
  11. parsl/tests/test_htex/test_priority_queue.py +18 -1
  12. parsl/tests/test_python_apps/test_basic.py +0 -14
  13. parsl/tests/test_python_apps/test_depfail_propagation.py +11 -1
  14. parsl/tests/test_python_apps/test_exception.py +19 -0
  15. parsl/tests/test_scaling/test_regression_3696_oscillation.py +1 -0
  16. parsl/tests/test_staging/test_staging_globus.py +2 -2
  17. parsl/tests/unit/test_globus_compute_executor.py +11 -2
  18. parsl/version.py +1 -1
  19. {parsl-2025.10.13.data → parsl-2025.10.27.data}/scripts/interchange.py +4 -7
  20. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/METADATA +5 -3
  21. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/RECORD +28 -27
  22. {parsl-2025.10.13.data → parsl-2025.10.27.data}/scripts/exec_parsl_function.py +0 -0
  23. {parsl-2025.10.13.data → parsl-2025.10.27.data}/scripts/parsl_coprocess.py +0 -0
  24. {parsl-2025.10.13.data → parsl-2025.10.27.data}/scripts/process_worker_pool.py +0 -0
  25. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/LICENSE +0 -0
  26. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/WHEEL +0 -0
  27. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/entry_points.txt +0 -0
  28. {parsl-2025.10.13.dist-info → parsl-2025.10.27.dist-info}/top_level.txt +0 -0
@@ -4,15 +4,18 @@ import hashlib
4
4
  import logging
5
5
  import os
6
6
  import pickle
7
+ import threading
7
8
  import types
8
9
  from concurrent.futures import Future
9
10
  from functools import lru_cache, singledispatch
10
- from typing import Any, Dict, List, Optional, Sequence
11
+ from typing import Any, Dict, List, Literal, Optional, Sequence
11
12
 
12
13
  import typeguard
13
14
 
14
15
  from parsl.dataflow.errors import BadCheckpoint
15
16
  from parsl.dataflow.taskrecord import TaskRecord
17
+ from parsl.errors import ConfigurationError, InternalConsistencyError
18
+ from parsl.utils import Timer, get_all_checkpoints
16
19
 
17
20
  logger = logging.getLogger(__name__)
18
21
 
@@ -146,7 +149,13 @@ class Memoizer:
146
149
 
147
150
  """
148
151
 
149
- def __init__(self, *, memoize: bool = True, checkpoint_files: Sequence[str]):
152
+ run_dir: str
153
+
154
+ def __init__(self, *,
155
+ memoize: bool = True,
156
+ checkpoint_files: Sequence[str] | None,
157
+ checkpoint_period: Optional[str],
158
+ checkpoint_mode: Literal['task_exit', 'periodic', 'dfk_exit', 'manual'] | None):
150
159
  """Initialize the memoizer.
151
160
 
152
161
  KWargs:
@@ -155,6 +164,26 @@ class Memoizer:
155
164
  """
156
165
  self.memoize = memoize
157
166
 
167
+ self.checkpointed_tasks = 0
168
+
169
+ self.checkpoint_lock = threading.Lock()
170
+
171
+ self.checkpoint_files = checkpoint_files
172
+ self.checkpoint_mode = checkpoint_mode
173
+ self.checkpoint_period = checkpoint_period
174
+
175
+ self.checkpointable_tasks: List[TaskRecord] = []
176
+
177
+ self._checkpoint_timer: Timer | None = None
178
+
179
+ def start(self) -> None:
180
+ if self.checkpoint_files is not None:
181
+ checkpoint_files = self.checkpoint_files
182
+ elif self.checkpoint_files is None and self.checkpoint_mode is not None:
183
+ checkpoint_files = get_all_checkpoints(self.run_dir)
184
+ else:
185
+ checkpoint_files = []
186
+
158
187
  checkpoint = self.load_checkpoints(checkpoint_files)
159
188
 
160
189
  if self.memoize:
@@ -164,6 +193,26 @@ class Memoizer:
164
193
  logger.info("App caching disabled for all apps")
165
194
  self.memo_lookup_table = {}
166
195
 
196
+ if self.checkpoint_mode == "periodic":
197
+ if self.checkpoint_period is None:
198
+ raise ConfigurationError("Checkpoint period must be specified with periodic checkpoint mode")
199
+ else:
200
+ try:
201
+ h, m, s = map(int, self.checkpoint_period.split(':'))
202
+ except Exception:
203
+ raise ConfigurationError("invalid checkpoint_period provided: {0} expected HH:MM:SS".format(self.checkpoint_period))
204
+ checkpoint_period = (h * 3600) + (m * 60) + s
205
+ self._checkpoint_timer = Timer(self.checkpoint, interval=checkpoint_period, name="Checkpoint")
206
+
207
+ def close(self) -> None:
208
+ if self.checkpoint_mode is not None:
209
+ logger.info("Making final checkpoint")
210
+ self.checkpoint()
211
+
212
+ if self._checkpoint_timer:
213
+ logger.info("Stopping checkpoint timer")
214
+ self._checkpoint_timer.close()
215
+
167
216
  def make_hash(self, task: TaskRecord) -> str:
168
217
  """Create a hash of the task inputs.
169
218
 
@@ -324,3 +373,78 @@ class Memoizer:
324
373
  return self._load_checkpoints(checkpointDirs)
325
374
  else:
326
375
  return {}
376
+
377
+ def update_checkpoint(self, task_record: TaskRecord) -> None:
378
+ if self.checkpoint_mode == 'task_exit':
379
+ self.checkpoint(task=task_record)
380
+ elif self.checkpoint_mode in ('manual', 'periodic', 'dfk_exit'):
381
+ with self.checkpoint_lock:
382
+ self.checkpointable_tasks.append(task_record)
383
+ elif self.checkpoint_mode is None:
384
+ pass
385
+ else:
386
+ raise InternalConsistencyError(f"Invalid checkpoint mode {self.checkpoint_mode}")
387
+
388
+ def checkpoint(self, *, task: Optional[TaskRecord] = None) -> None:
389
+ """Checkpoint the dfk incrementally to a checkpoint file.
390
+
391
+ When called with no argument, all tasks registered in self.checkpointable_tasks
392
+ will be checkpointed. When called with a single TaskRecord argument, that task will be
393
+ checkpointed.
394
+
395
+ By default the checkpoints are written to the RUNDIR of the current
396
+ run under RUNDIR/checkpoints/tasks.pkl
397
+
398
+ Kwargs:
399
+ - task (Optional task records) : A task to checkpoint. Default=None, meaning all
400
+ tasks registered for checkpointing.
401
+
402
+ .. note::
403
+ Checkpointing only works if memoization is enabled
404
+
405
+ """
406
+ with self.checkpoint_lock:
407
+
408
+ if task:
409
+ checkpoint_queue = [task]
410
+ else:
411
+ checkpoint_queue = self.checkpointable_tasks
412
+
413
+ checkpoint_dir = '{0}/checkpoint'.format(self.run_dir)
414
+ checkpoint_tasks = checkpoint_dir + '/tasks.pkl'
415
+
416
+ if not os.path.exists(checkpoint_dir):
417
+ os.makedirs(checkpoint_dir, exist_ok=True)
418
+
419
+ count = 0
420
+
421
+ with open(checkpoint_tasks, 'ab') as f:
422
+ for task_record in checkpoint_queue:
423
+ task_id = task_record['id']
424
+
425
+ app_fu = task_record['app_fu']
426
+
427
+ if app_fu.done() and app_fu.exception() is None:
428
+ hashsum = task_record['hashsum']
429
+ if not hashsum:
430
+ continue
431
+ t = {'hash': hashsum, 'exception': None, 'result': app_fu.result()}
432
+
433
+ # We are using pickle here since pickle dumps to a file in 'ab'
434
+ # mode behave like a incremental log.
435
+ pickle.dump(t, f)
436
+ count += 1
437
+ logger.debug("Task {} checkpointed".format(task_id))
438
+
439
+ self.checkpointed_tasks += count
440
+
441
+ if count == 0:
442
+ if self.checkpointed_tasks == 0:
443
+ logger.warning("No tasks checkpointed so far in this run. Please ensure caching is enabled")
444
+ else:
445
+ logger.debug("No tasks checkpointed in this pass.")
446
+ else:
447
+ logger.info("Done checkpointing {} tasks".format(count))
448
+
449
+ if not task:
450
+ self.checkpointable_tasks = []
parsl/dataflow/states.py CHANGED
@@ -67,10 +67,10 @@ class States(IntEnum):
67
67
  return self.__class__.__name__ + "." + self.name
68
68
 
69
69
 
70
- FINAL_STATES = [States.exec_done, States.memo_done, States.failed, States.dep_fail]
71
- """States from which we will never move to another state, because the job has
72
- either definitively completed or failed."""
73
-
74
- FINAL_FAILURE_STATES = [States.failed, States.dep_fail]
70
+ FINAL_FAILURE_STATES = {States.failed, States.dep_fail}
75
71
  """States which are final and which indicate a failure. This must
76
72
  be a subset of FINAL_STATES"""
73
+
74
+ FINAL_STATES = {States.exec_done, States.memo_done, *FINAL_FAILURE_STATES}
75
+ """States from which we will never move to another state, because the job has
76
+ either definitively completed or failed."""
@@ -138,9 +138,6 @@ class Interchange:
138
138
 
139
139
  self.pending_task_queue: SortedList[Any] = SortedList(key=lambda tup: (tup[0], tup[1]))
140
140
 
141
- # count of tasks that have been received from the submit side
142
- self.task_counter = 0
143
-
144
141
  # count of tasks that have been sent out to worker pools
145
142
  self.count = 0
146
143
 
@@ -332,15 +329,15 @@ class Interchange:
332
329
  msg = self.task_incoming.recv_pyobj()
333
330
 
334
331
  # Process priority, higher number = lower priority
332
+ task_id = msg['task_id']
335
333
  resource_spec = msg['context'].get('resource_spec', {})
336
334
  priority = resource_spec.get('priority', float('inf'))
337
- queue_entry = (-priority, -self.task_counter, msg)
335
+ queue_entry = (-priority, -task_id, msg)
338
336
 
339
- logger.debug("putting message onto pending_task_queue")
337
+ logger.debug("Putting task %s onto pending_task_queue", task_id)
340
338
 
341
339
  self.pending_task_queue.add(queue_entry)
342
- self.task_counter += 1
343
- logger.debug(f"Fetched {self.task_counter} tasks so far")
340
+ logger.debug("Put task %s onto pending_task_queue", task_id)
344
341
 
345
342
  def process_manager_socket_message(
346
343
  self,
parsl/jobs/strategy.py CHANGED
@@ -185,6 +185,11 @@ class Strategy:
185
185
 
186
186
  for executor in executors:
187
187
  label = executor.label
188
+
189
+ if executor.bad_state_is_set:
190
+ logger.info(f"Not strategizing for executor {label} because bad state is set")
191
+ continue
192
+
188
193
  logger.debug(f"Strategizing for executor {label}")
189
194
 
190
195
  if self.executors[label]['first']:
@@ -213,12 +218,8 @@ class Strategy:
213
218
 
214
219
  logger.debug(f"Slot ratio calculation: active_slots = {active_slots}, active_tasks = {active_tasks}")
215
220
 
216
- if hasattr(executor, 'connected_workers'):
217
- logger.debug('Executor {} has {} active tasks, {}/{} running/pending blocks, and {} connected workers'.format(
218
- label, active_tasks, running, pending, executor.connected_workers()))
219
- else:
220
- logger.debug('Executor {} has {} active tasks and {}/{} running/pending blocks'.format(
221
- label, active_tasks, running, pending))
221
+ logger.debug('Executor {} has {} active tasks and {}/{} running/pending blocks'.format(
222
+ label, active_tasks, running, pending))
222
223
 
223
224
  # reset idle timer if executor has active tasks
224
225
 
@@ -1,6 +1,5 @@
1
1
  from parsl.config import Config
2
2
  from parsl.data_provider.data_manager import default_staging
3
- from parsl.data_provider.globus import GlobusStaging
4
3
  from parsl.executors.threads import ThreadPoolExecutor
5
4
 
6
5
  # If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
@@ -10,19 +9,24 @@ from parsl.executors.threads import ThreadPoolExecutor
10
9
  # (i.e., user_opts['swan']['username'] -> 'your_username')
11
10
  from .user_opts import user_opts
12
11
 
13
- storage_access = default_staging + [GlobusStaging(
14
- endpoint_uuid=user_opts['globus']['endpoint'],
15
- endpoint_path=user_opts['globus']['path']
16
- )]
17
12
 
18
- config = Config(
19
- executors=[
20
- ThreadPoolExecutor(
21
- label='local_threads_globus',
22
- working_dir=user_opts['globus']['path'],
23
- storage_access=storage_access
24
- )
25
- ]
26
- )
13
+ def fresh_config():
14
+ from parsl.data_provider.globus import GlobusStaging
15
+
16
+ storage_access = default_staging + [GlobusStaging(
17
+ endpoint_uuid=user_opts['globus']['endpoint'],
18
+ endpoint_path=user_opts['globus']['path']
19
+ )]
20
+
21
+ return Config(
22
+ executors=[
23
+ ThreadPoolExecutor(
24
+ label='local_threads_globus',
25
+ working_dir=user_opts['globus']['path'],
26
+ storage_access=storage_access
27
+ )
28
+ ]
29
+ )
30
+
27
31
 
28
32
  remote_writeable = user_opts['globus']['remote_writeable']
@@ -1,7 +1,7 @@
1
1
  """Tests of the interfaces to Python's concurrent library"""
2
- from pytest import mark, warns
2
+ from pytest import mark, raises, warns
3
3
 
4
- from parsl import Config, HighThroughputExecutor
4
+ from parsl import Config, HighThroughputExecutor, load, python_app
5
5
  from parsl.concurrent import ParslPoolExecutor
6
6
 
7
7
 
@@ -9,21 +9,43 @@ def f(x):
9
9
  return x + 1
10
10
 
11
11
 
12
+ def g(x):
13
+ return 2 * x
14
+
15
+
16
+ @python_app
17
+ def is_odd(x):
18
+ if x % 2 == 1:
19
+ return 1
20
+ else:
21
+ return 0
22
+
23
+
12
24
  def make_config():
13
25
  return Config(
14
26
  executors=[
15
27
  HighThroughputExecutor(
28
+ label='test_executor',
16
29
  address="127.0.0.1",
17
30
  max_workers_per_node=2,
18
31
  heartbeat_period=2,
19
32
  heartbeat_threshold=4,
20
- encrypted=True,
33
+ encrypted=False,
21
34
  )
22
35
  ],
23
36
  strategy='none',
24
37
  )
25
38
 
26
39
 
40
+ @mark.local
41
+ def test_init_errors():
42
+ with load(make_config()) as dfk, raises(ValueError, match='Specify only one of config or dfk'):
43
+ ParslPoolExecutor(config=make_config(), dfk=dfk)
44
+
45
+ with raises(ValueError, match='Must specify one of config or dfk'):
46
+ ParslPoolExecutor()
47
+
48
+
27
49
  @mark.local
28
50
  def test_executor():
29
51
  my_config = make_config()
@@ -44,5 +66,31 @@ def test_executor():
44
66
  # Make sure only one function was registered
45
67
  assert exc.app_count == 1
46
68
 
69
+ with raises(RuntimeError, match='shut down'):
70
+ exc.submit(f, 1)
71
+
47
72
  with warns(UserWarning):
48
73
  ParslPoolExecutor(make_config()).shutdown(False, cancel_futures=True)
74
+
75
+
76
+ @mark.local
77
+ def test_with_dfk():
78
+ config = make_config()
79
+
80
+ with load(config) as dfk, ParslPoolExecutor(dfk=dfk, executors=['test_executor']) as exc:
81
+ future = exc.submit(f, 1)
82
+ assert future.result() == 2
83
+ assert exc.get_app(f).executors == ['test_executor']
84
+
85
+
86
+ @mark.local
87
+ def test_chaining():
88
+ """Make sure the executor functions can be chained together"""
89
+ config = make_config()
90
+
91
+ with ParslPoolExecutor(config) as exc:
92
+ future_odd = exc.submit(f, 10)
93
+ assert is_odd(future_odd).result()
94
+
95
+ future_even = exc.submit(g, future_odd)
96
+ assert not is_odd(future_even).result()
@@ -18,7 +18,7 @@ def fake_task(parsl_resource_specification=None):
18
18
 
19
19
 
20
20
  @pytest.mark.local
21
- def test_priority_queue():
21
+ def test_priority_queue(try_assert):
22
22
  provider = LocalProvider(
23
23
  init_blocks=0,
24
24
  max_blocks=0,
@@ -30,6 +30,7 @@ def test_priority_queue():
30
30
  max_workers_per_node=1,
31
31
  manager_selector=RandomManagerSelector(),
32
32
  provider=provider,
33
+ worker_debug=True, # needed to instrospect interchange logs
33
34
  )
34
35
 
35
36
  config = Config(
@@ -50,6 +51,22 @@ def test_priority_queue():
50
51
  spec = {'priority': priority}
51
52
  futures[(priority, i)] = fake_task(parsl_resource_specification=spec)
52
53
 
54
+ # wait for the interchange to have received all tasks
55
+ # (which happens asynchronously to the main thread, and is otherwise
56
+ # a race condition which can cause this test to fail)
57
+
58
+ n = len(priorities)
59
+
60
+ def interchange_logs_task_count():
61
+ with open(htex.worker_logdir + "/interchange.log", "r") as f:
62
+ lines = f.readlines()
63
+ for line in lines:
64
+ if f"Put task {n} onto pending_task_queue" in line:
65
+ return True
66
+ return False
67
+
68
+ try_assert(interchange_logs_task_count)
69
+
53
70
  provider.max_blocks = 1
54
71
  htex.scale_out_facade(1) # don't wait for the JSP to catch up
55
72
 
@@ -14,12 +14,6 @@ def import_square(x):
14
14
  return math.pow(x, 2)
15
15
 
16
16
 
17
- @python_app
18
- def custom_exception():
19
- from globus_sdk import GlobusError
20
- raise GlobusError('foobar')
21
-
22
-
23
17
  def test_simple(n=2):
24
18
  x = double(n)
25
19
  assert x.result() == n * 2
@@ -38,11 +32,3 @@ def test_parallel_for(n):
38
32
 
39
33
  for i in d:
40
34
  assert d[i].result() == 2 * i
41
-
42
-
43
- def test_custom_exception():
44
- from globus_sdk import GlobusError
45
-
46
- x = custom_exception()
47
- with pytest.raises(GlobusError):
48
- x.result()
@@ -1,5 +1,7 @@
1
+ import parsl
1
2
  from parsl import python_app
2
3
  from parsl.dataflow.errors import DependencyError
4
+ from parsl.dataflow.states import States
3
5
 
4
6
 
5
7
  @python_app
@@ -14,6 +16,7 @@ def depends(parent):
14
16
 
15
17
  def test_depfail_once():
16
18
  """Test the simplest dependency failure case"""
19
+ start_dep_fail_count = parsl.dfk().task_state_counts[States.dep_fail]
17
20
  f1 = fails()
18
21
  f2 = depends(f1)
19
22
 
@@ -25,9 +28,12 @@ def test_depfail_once():
25
28
  # in the DependencyError message
26
29
  assert ("task " + str(f1.task_record['id'])) in str(f2.exception())
27
30
 
31
+ assert parsl.dfk().task_state_counts[States.dep_fail] == start_dep_fail_count + 1
32
+
28
33
 
29
34
  def test_depfail_chain():
30
35
  """Test that dependency failures chain"""
36
+ start_dep_fail_count = parsl.dfk().task_state_counts[States.dep_fail]
31
37
  f1 = fails()
32
38
  f2 = depends(f1)
33
39
  f3 = depends(f2)
@@ -39,11 +45,13 @@ def test_depfail_chain():
39
45
  assert isinstance(f3.exception(), DependencyError)
40
46
  assert isinstance(f4.exception(), DependencyError)
41
47
 
48
+ assert parsl.dfk().task_state_counts[States.dep_fail] == start_dep_fail_count + 3
49
+
42
50
 
43
51
  def test_depfail_branches():
44
52
  """Test that dependency failures propagate in the
45
53
  presence of multiple downstream tasks."""
46
-
54
+ start_dep_fail_count = parsl.dfk().task_state_counts[States.dep_fail]
47
55
  f1 = fails()
48
56
  f2 = depends(f1)
49
57
  f3 = depends(f1)
@@ -52,3 +60,5 @@ def test_depfail_branches():
52
60
  assert not isinstance(f1.exception(), DependencyError)
53
61
  assert isinstance(f2.exception(), DependencyError)
54
62
  assert isinstance(f3.exception(), DependencyError)
63
+
64
+ assert parsl.dfk().task_state_counts[States.dep_fail] == start_dep_fail_count + 2
@@ -0,0 +1,19 @@
1
+ import pytest
2
+
3
+ from parsl.app.app import python_app
4
+
5
+
6
+ class CustomException(Exception):
7
+ pass
8
+
9
+
10
+ @python_app
11
+ def custom_exception():
12
+ from parsl.tests.test_python_apps.test_exception import CustomException
13
+ raise CustomException('foobar')
14
+
15
+
16
+ def test_custom_exception():
17
+ x = custom_exception()
18
+ with pytest.raises(CustomException):
19
+ x.result()
@@ -51,6 +51,7 @@ def test_htex_strategy_does_not_oscillate(ns):
51
51
  executor.outstanding = lambda: n_tasks
52
52
  executor.status_facade = statuses
53
53
  executor.workers_per_node = n_workers
54
+ executor.bad_state_is_set = False
54
55
 
55
56
  provider.parallelism = 1
56
57
  provider.init_blocks = 0
@@ -3,9 +3,9 @@ import pytest
3
3
  import parsl
4
4
  from parsl.app.app import python_app
5
5
  from parsl.data_provider.files import File
6
- from parsl.tests.configs.local_threads_globus import config, remote_writeable
6
+ from parsl.tests.configs.local_threads_globus import fresh_config, remote_writeable
7
7
 
8
- local_config = config
8
+ local_config = fresh_config
9
9
 
10
10
 
11
11
  @python_app
@@ -2,18 +2,21 @@ import random
2
2
  from unittest import mock
3
3
 
4
4
  import pytest
5
- from globus_compute_sdk import Executor
6
5
 
7
6
  from parsl.executors import GlobusComputeExecutor
8
7
 
9
8
 
10
9
  @pytest.fixture
11
10
  def mock_ex():
12
- # Not Parsl's job to test GC's Executor
11
+ # Not Parsl's job to test GC's Executor, although it
12
+ # still needs to be importable for these test cases.
13
+ from globus_compute_sdk import Executor
14
+
13
15
  yield mock.Mock(spec=Executor)
14
16
 
15
17
 
16
18
  @pytest.mark.local
19
+ @pytest.mark.globus_compute
17
20
  def test_gc_executor_mock_spec(mock_ex):
18
21
  # a test of tests -- make sure we're using spec= in the mock
19
22
  with pytest.raises(AttributeError):
@@ -21,12 +24,14 @@ def test_gc_executor_mock_spec(mock_ex):
21
24
 
22
25
 
23
26
  @pytest.mark.local
27
+ @pytest.mark.globus_compute
24
28
  def test_gc_executor_label_default(mock_ex):
25
29
  gce = GlobusComputeExecutor(mock_ex)
26
30
  assert gce.label == type(gce).__name__, "Expect reasonable default label"
27
31
 
28
32
 
29
33
  @pytest.mark.local
34
+ @pytest.mark.globus_compute
30
35
  def test_gc_executor_label(mock_ex, randomstring):
31
36
  exp_label = randomstring()
32
37
  gce = GlobusComputeExecutor(mock_ex, label=exp_label)
@@ -34,6 +39,7 @@ def test_gc_executor_label(mock_ex, randomstring):
34
39
 
35
40
 
36
41
  @pytest.mark.local
42
+ @pytest.mark.globus_compute
37
43
  def test_gc_executor_resets_spec_after_submit(mock_ex, randomstring):
38
44
  submit_res = {randomstring(): "some submit res"}
39
45
  res = {"some": randomstring(), "spec": randomstring()}
@@ -57,6 +63,7 @@ def test_gc_executor_resets_spec_after_submit(mock_ex, randomstring):
57
63
 
58
64
 
59
65
  @pytest.mark.local
66
+ @pytest.mark.globus_compute
60
67
  def test_gc_executor_resets_uep_after_submit(mock_ex, randomstring):
61
68
  uep_conf = randomstring()
62
69
  res = {"some": randomstring()}
@@ -79,6 +86,7 @@ def test_gc_executor_resets_uep_after_submit(mock_ex, randomstring):
79
86
 
80
87
 
81
88
  @pytest.mark.local
89
+ @pytest.mark.globus_compute
82
90
  def test_gc_executor_happy_path(mock_ex, randomstring):
83
91
  mock_fn = mock.Mock()
84
92
  args = tuple(randomstring() for _ in range(random.randint(0, 3)))
@@ -95,6 +103,7 @@ def test_gc_executor_happy_path(mock_ex, randomstring):
95
103
 
96
104
 
97
105
  @pytest.mark.local
106
+ @pytest.mark.globus_compute
98
107
  def test_gc_executor_shuts_down_asynchronously(mock_ex):
99
108
  gce = GlobusComputeExecutor(mock_ex)
100
109
  gce.shutdown()
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2025.10.13'
6
+ VERSION = '2025.10.27'
@@ -138,9 +138,6 @@ class Interchange:
138
138
 
139
139
  self.pending_task_queue: SortedList[Any] = SortedList(key=lambda tup: (tup[0], tup[1]))
140
140
 
141
- # count of tasks that have been received from the submit side
142
- self.task_counter = 0
143
-
144
141
  # count of tasks that have been sent out to worker pools
145
142
  self.count = 0
146
143
 
@@ -332,15 +329,15 @@ class Interchange:
332
329
  msg = self.task_incoming.recv_pyobj()
333
330
 
334
331
  # Process priority, higher number = lower priority
332
+ task_id = msg['task_id']
335
333
  resource_spec = msg['context'].get('resource_spec', {})
336
334
  priority = resource_spec.get('priority', float('inf'))
337
- queue_entry = (-priority, -self.task_counter, msg)
335
+ queue_entry = (-priority, -task_id, msg)
338
336
 
339
- logger.debug("putting message onto pending_task_queue")
337
+ logger.debug("Putting task %s onto pending_task_queue", task_id)
340
338
 
341
339
  self.pending_task_queue.add(queue_entry)
342
- self.task_counter += 1
343
- logger.debug(f"Fetched {self.task_counter} tasks so far")
340
+ logger.debug("Put task %s onto pending_task_queue", task_id)
344
341
 
345
342
  def process_manager_socket_message(
346
343
  self,
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2025.10.13
3
+ Version: 2025.10.27
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2025.10.13.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2025.10.27.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -19,7 +19,6 @@ License-File: LICENSE
19
19
  Requires-Dist: pyzmq>=17.1.2
20
20
  Requires-Dist: typeguard!=3.*,<5,>=2.10
21
21
  Requires-Dist: typing-extensions<5,>=4.6
22
- Requires-Dist: globus-sdk
23
22
  Requires-Dist: dill
24
23
  Requires-Dist: tblib
25
24
  Requires-Dist: requests
@@ -55,6 +54,7 @@ Requires-Dist: proxystore; extra == "all"
55
54
  Requires-Dist: radical.pilot==1.90; extra == "all"
56
55
  Requires-Dist: radical.utils==1.90; extra == "all"
57
56
  Requires-Dist: globus-compute-sdk>=2.34.0; extra == "all"
57
+ Requires-Dist: globus-sdk; extra == "all"
58
58
  Provides-Extra: aws
59
59
  Requires-Dist: boto3; extra == "aws"
60
60
  Provides-Extra: azure
@@ -71,6 +71,8 @@ Requires-Dist: cffi; extra == "flux"
71
71
  Requires-Dist: jsonschema; extra == "flux"
72
72
  Provides-Extra: globus_compute
73
73
  Requires-Dist: globus-compute-sdk>=2.34.0; extra == "globus-compute"
74
+ Provides-Extra: globus_transfer
75
+ Requires-Dist: globus-sdk; extra == "globus-transfer"
74
76
  Provides-Extra: google_cloud
75
77
  Requires-Dist: google-auth; extra == "google-cloud"
76
78
  Requires-Dist: google-api-python-client; extra == "google-cloud"