parsl 2023.5.29__py3-none-any.whl → 2023.6.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. parsl/addresses.py +2 -1
  2. parsl/app/errors.py +6 -20
  3. parsl/benchmark/perf.py +3 -1
  4. parsl/configs/vineex_local.py +26 -0
  5. parsl/data_provider/data_manager.py +2 -1
  6. parsl/data_provider/files.py +1 -1
  7. parsl/data_provider/globus.py +1 -1
  8. parsl/dataflow/memoization.py +1 -1
  9. parsl/executors/taskvine/__init__.py +3 -0
  10. parsl/executors/taskvine/errors.py +22 -0
  11. parsl/executors/taskvine/exec_parsl_function.py +207 -0
  12. parsl/executors/taskvine/executor.py +1055 -0
  13. parsl/executors/workqueue/executor.py +9 -7
  14. parsl/launchers/base.py +17 -0
  15. parsl/launchers/launchers.py +1 -16
  16. parsl/monitoring/monitoring.py +19 -8
  17. parsl/monitoring/visualization/plots/default/workflow_plots.py +32 -29
  18. parsl/providers/cluster_provider.py +2 -2
  19. parsl/providers/condor/condor.py +1 -1
  20. parsl/providers/kubernetes/kube.py +2 -1
  21. parsl/providers/slurm/slurm.py +1 -1
  22. parsl/tests/configs/taskvine_ex.py +11 -0
  23. parsl/tests/conftest.py +6 -6
  24. parsl/tests/scaling_tests/vineex_condor.py +10 -0
  25. parsl/tests/scaling_tests/vineex_local.py +10 -0
  26. parsl/tests/test_bash_apps/test_pipeline.py +2 -2
  27. parsl/tests/test_error_handling/test_retry_handler.py +1 -1
  28. parsl/tests/test_monitoring/test_viz_colouring.py +17 -0
  29. parsl/utils.py +2 -2
  30. parsl/version.py +1 -1
  31. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/METADATA +3 -3
  32. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/RECORD +45 -36
  33. parsl/tests/configs/workqueue_blocks.py +0 -12
  34. /parsl/tests/{workqueue_tests → scaling_tests}/__init__.py +0 -0
  35. /parsl/tests/{workqueue_tests → scaling_tests}/htex_local.py +0 -0
  36. /parsl/tests/{workqueue_tests → scaling_tests}/local_threads.py +0 -0
  37. /parsl/tests/{workqueue_tests → scaling_tests}/test_scale.py +0 -0
  38. /parsl/tests/{workqueue_tests → scaling_tests}/wqex_condor.py +0 -0
  39. /parsl/tests/{workqueue_tests → scaling_tests}/wqex_local.py +0 -0
  40. {parsl-2023.5.29.data → parsl-2023.6.12.data}/scripts/exec_parsl_function.py +0 -0
  41. {parsl-2023.5.29.data → parsl-2023.6.12.data}/scripts/parsl_coprocess.py +0 -0
  42. {parsl-2023.5.29.data → parsl-2023.6.12.data}/scripts/process_worker_pool.py +0 -0
  43. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/LICENSE +0 -0
  44. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/WHEEL +0 -0
  45. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/entry_points.txt +0 -0
  46. {parsl-2023.5.29.dist-info → parsl-2023.6.12.dist-info}/top_level.txt +0 -0
@@ -122,8 +122,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
122
122
  In this case, environment variables can be used to influence the
123
123
  choice of port, documented here:
124
124
  https://ccl.cse.nd.edu/software/manuals/api/html/work__queue_8h.html#a21714a10bcdfcf5c3bd44a96f5dcbda6
125
-
126
- Default: 0.
125
+ Default: WORK_QUEUE_DEFAULT_PORT.
127
126
 
128
127
  env: dict{str}
129
128
  Dictionary that contains the environmental variables that
@@ -179,7 +178,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
179
178
  invocations of an app have similar performance characteristics,
180
179
  this will provide a reasonable set of categories automatically.
181
180
 
182
- max_retries: Optional[int]
181
+ max_retries: int
183
182
  Set the number of retries that Work Queue will make when a task
184
183
  fails. This is distinct from Parsl level retries configured in
185
184
  parsl.config.Config. Set to None to allow Work Queue to retry
@@ -234,7 +233,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
234
233
  autolabel: bool = False,
235
234
  autolabel_window: int = 1,
236
235
  autocategory: bool = True,
237
- max_retries: Optional[int] = 1,
236
+ max_retries: int = 1,
238
237
  init_command: str = "",
239
238
  worker_options: str = "",
240
239
  full_debug: bool = True,
@@ -261,7 +260,7 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
261
260
  self.storage_access = storage_access
262
261
  self.use_cache = use_cache
263
262
  self.working_dir = working_dir
264
- self.registered_files = set() # type: Set[str]
263
+ self.registered_files: Set[str] = set()
265
264
  self.full_debug = full_debug
266
265
  self.source = True if pack else source
267
266
  self.pack = pack
@@ -473,7 +472,6 @@ class WorkQueueExecutor(BlockProviderExecutor, putils.RepresentationMixin):
473
472
 
474
473
  logger.debug("Creating executor task {} for function {} with args {}".format(executor_task_id, func, args))
475
474
 
476
- # Pickle the result into object to pass into message buffer
477
475
  function_file = self._path_in_task(executor_task_id, "function")
478
476
  result_file = self._path_in_task(executor_task_id, "result")
479
477
  map_file = self._path_in_task(executor_task_id, "map")
@@ -860,7 +858,11 @@ def _work_queue_submit_wait(*,
860
858
  logger.debug("Sending executor task {} with command: {}".format(task.id, command_str))
861
859
  t = wq.Task(command_str)
862
860
  else:
863
- t = wq.RemoteTask("run_parsl_task", "parsl_coprocess", task.map_file, task.function_file, task.result_file)
861
+ t = wq.RemoteTask("run_parsl_task",
862
+ "parsl_coprocess",
863
+ os.path.basename(task.map_file),
864
+ os.path.basename(task.function_file),
865
+ os.path.basename(task.result_file))
864
866
  t.specify_exec_method("direct")
865
867
  logger.debug("Sending executor task {} to coprocess".format(task.id))
866
868
 
@@ -0,0 +1,17 @@
1
+ from abc import ABCMeta, abstractmethod
2
+
3
+ from parsl.utils import RepresentationMixin
4
+
5
+
6
+ class Launcher(RepresentationMixin, metaclass=ABCMeta):
7
+ """Launchers are basically wrappers for user submitted scripts as they
8
+ are submitted to a specific execution resource.
9
+ """
10
+ def __init__(self, debug: bool = True):
11
+ self.debug = debug
12
+
13
+ @abstractmethod
14
+ def __call__(self, command: str, tasks_per_node: int, nodes_per_block: int) -> str:
15
+ """ Wraps the command with the Launcher calls.
16
+ """
17
+ pass
@@ -1,25 +1,10 @@
1
- from abc import ABCMeta, abstractmethod
2
1
  import logging
3
2
 
4
- from parsl.utils import RepresentationMixin
3
+ from parsl.launchers.base import Launcher
5
4
 
6
5
  logger = logging.getLogger(__name__)
7
6
 
8
7
 
9
- class Launcher(RepresentationMixin, metaclass=ABCMeta):
10
- """Launchers are basically wrappers for user submitted scripts as they
11
- are submitted to a specific execution resource.
12
- """
13
- def __init__(self, debug: bool = True):
14
- self.debug = debug
15
-
16
- @abstractmethod
17
- def __call__(self, command: str, tasks_per_node: int, nodes_per_block: int) -> str:
18
- """ Wraps the command with the Launcher calls.
19
- """
20
- pass
21
-
22
-
23
8
  class SimpleLauncher(Launcher):
24
9
  """ Does no wrapping. Just returns the command as-is
25
10
  """
@@ -139,7 +139,7 @@ class MonitoringHub(RepresentationMixin):
139
139
  self._dfk_channel = None # type: Any
140
140
 
141
141
  if _db_manager_excepts:
142
- raise(_db_manager_excepts)
142
+ raise _db_manager_excepts
143
143
 
144
144
  self.client_address = client_address
145
145
  self.client_port_range = client_port_range
@@ -170,12 +170,23 @@ class MonitoringHub(RepresentationMixin):
170
170
  self.logger.debug("Initializing ZMQ Pipes to client")
171
171
  self.monitoring_hub_active = True
172
172
 
173
- comm_q = SizedQueue(maxsize=10) # type: Queue[Union[Tuple[int, int], str]]
174
- self.exception_q = SizedQueue(maxsize=10) # type: Queue[Tuple[str, str]]
175
- self.priority_msgs = SizedQueue() # type: Queue[Tuple[Any, int]]
176
- self.resource_msgs = SizedQueue() # type: Queue[AddressedMonitoringMessage]
177
- self.node_msgs = SizedQueue() # type: Queue[AddressedMonitoringMessage]
178
- self.block_msgs = SizedQueue() # type: Queue[AddressedMonitoringMessage]
173
+ comm_q: Queue[Union[Tuple[int, int], str]]
174
+ comm_q = SizedQueue(maxsize=10)
175
+
176
+ self.exception_q: Queue[Tuple[str, str]]
177
+ self.exception_q = SizedQueue(maxsize=10)
178
+
179
+ self.priority_msgs: Queue[Tuple[Any, int]]
180
+ self.priority_msgs = SizedQueue()
181
+
182
+ self.resource_msgs: Queue[AddressedMonitoringMessage]
183
+ self.resource_msgs = SizedQueue()
184
+
185
+ self.node_msgs: Queue[AddressedMonitoringMessage]
186
+ self.node_msgs = SizedQueue()
187
+
188
+ self.block_msgs: Queue[AddressedMonitoringMessage]
189
+ self.block_msgs = SizedQueue()
179
190
 
180
191
  self.router_proc = ForkProcess(target=router_starter,
181
192
  args=(comm_q, self.exception_q, self.priority_msgs, self.node_msgs, self.block_msgs, self.resource_msgs),
@@ -328,7 +339,7 @@ def filesystem_receiver(logdir: str, q: "queue.Queue[AddressedMonitoringMessage]
328
339
  with open(full_path_filename, "rb") as f:
329
340
  message = deserialize(f.read())
330
341
  logger.info(f"Message received is: {message}")
331
- assert(isinstance(message, tuple))
342
+ assert isinstance(message, tuple)
332
343
  q.put(cast(AddressedMonitoringMessage, message))
333
344
  os.remove(full_path_filename)
334
345
  except Exception:
@@ -9,6 +9,22 @@ import networkx as nx
9
9
  from parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT
10
10
 
11
11
 
12
+ # gantt_colors must assign a color value for every state name defined
13
+ # in parsl/dataflow/states.py
14
+ gantt_colors = {'unsched': 'rgb(240, 240, 240)',
15
+ 'pending': 'rgb(168, 168, 168)',
16
+ 'launched': 'rgb(100, 255, 255)',
17
+ 'running': 'rgb(0, 0, 255)',
18
+ 'running_ended': 'rgb(64, 64, 255)',
19
+ 'joining': 'rgb(128, 128, 255)',
20
+ 'dep_fail': 'rgb(255, 128, 255)',
21
+ 'failed': 'rgb(200, 0, 0)',
22
+ 'exec_done': 'rgb(0, 200, 0)',
23
+ 'memo_done': 'rgb(64, 200, 64)',
24
+ 'fail_retryable': 'rgb(200, 128,128)'
25
+ }
26
+
27
+
12
28
  def task_gantt_plot(df_task, df_status, time_completed=None):
13
29
 
14
30
  # if the workflow is not recorded as completed, then assume
@@ -47,25 +63,9 @@ def task_gantt_plot(df_task, df_status, time_completed=None):
47
63
  }
48
64
  parsl_tasks.extend([last_status_bar])
49
65
 
50
- # colours must assign a colour value for every state name defined
51
- # in parsl/dataflow/states.py
52
-
53
- colors = {'unsched': 'rgb(240, 240, 240)',
54
- 'pending': 'rgb(168, 168, 168)',
55
- 'launched': 'rgb(100, 255, 255)',
56
- 'running': 'rgb(0, 0, 255)',
57
- 'running_ended': 'rgb(64, 64, 255)',
58
- 'joining': 'rgb(128, 128, 255)',
59
- 'dep_fail': 'rgb(255, 128, 255)',
60
- 'failed': 'rgb(200, 0, 0)',
61
- 'exec_done': 'rgb(0, 200, 0)',
62
- 'memo_done': 'rgb(64, 200, 64)',
63
- 'fail_retryable': 'rgb(200, 128,128)'
64
- }
65
-
66
66
  fig = ff.create_gantt(parsl_tasks,
67
67
  title="",
68
- colors=colors,
68
+ colors=gantt_colors,
69
69
  group_tasks=True,
70
70
  show_colorbar=True,
71
71
  index_col='Resource',
@@ -194,6 +194,20 @@ def total_tasks_plot(df_task, df_status, columns=20):
194
194
  return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
195
195
 
196
196
 
197
+ dag_state_colors = {"unsched": (0, 'rgb(240, 240, 240)'),
198
+ "pending": (1, 'rgb(168, 168, 168)'),
199
+ "launched": (2, 'rgb(100, 255, 255)'),
200
+ "running": (3, 'rgb(0, 0, 255)'),
201
+ "dep_fail": (4, 'rgb(255, 128, 255)'),
202
+ "failed": (5, 'rgb(200, 0, 0)'),
203
+ "exec_done": (6, 'rgb(0, 200, 0)'),
204
+ "memo_done": (7, 'rgb(64, 200, 64)'),
205
+ "fail_retryable": (8, 'rgb(200, 128,128)'),
206
+ "joining": (9, 'rgb(128, 128, 255)'),
207
+ "running_ended": (10, 'rgb(64, 64, 255)')
208
+ }
209
+
210
+
197
211
  def workflow_dag_plot(df_tasks, group_by_apps=True):
198
212
  G = nx.DiGraph(directed=True)
199
213
  nodes = df_tasks['task_id'].unique()
@@ -215,18 +229,7 @@ def workflow_dag_plot(df_tasks, group_by_apps=True):
215
229
  groups_list = {app: (i, None) for i, app in enumerate(
216
230
  df_tasks['task_func_name'].unique())}
217
231
  else:
218
- groups_list = {"unsched": (0, 'rgb(240, 240, 240)'),
219
- "pending": (1, 'rgb(168, 168, 168)'),
220
- "launched": (2, 'rgb(100, 255, 255)'),
221
- "running": (3, 'rgb(0, 0, 255)'),
222
- "dep_fail": (4, 'rgb(255, 128, 255)'),
223
- "failed": (5, 'rgb(200, 0, 0)'),
224
- "exec_done": (6, 'rgb(0, 200, 0)'),
225
- "memo_done": (7, 'rgb(64, 200, 64)'),
226
- "fail_retryable": (8, 'rgb(200, 128,128)'),
227
- "joining": (9, 'rgb(128, 128, 255)'),
228
- "running_ended": (10, 'rgb(64, 64, 255)')
229
- }
232
+ groups_list = dag_state_colors
230
233
 
231
234
  node_traces = [...] * len(groups_list)
232
235
 
@@ -67,9 +67,9 @@ class ClusterProvider(ExecutionProvider):
67
67
  self.walltime = walltime
68
68
  self.cmd_timeout = cmd_timeout
69
69
  if not callable(self.launcher):
70
- raise(BadLauncher(self.launcher,
70
+ raise BadLauncher(self.launcher,
71
71
  "Launcher for executor: {} is of type: {}. Expects a parsl.launcher.launcher.Launcher or callable".format(
72
- label, type(self.launcher))))
72
+ label, type(self.launcher)))
73
73
 
74
74
  self.script_dir = None
75
75
 
@@ -8,6 +8,7 @@ from parsl.channels import LocalChannel
8
8
  from parsl.providers.base import JobState, JobStatus
9
9
  from parsl.utils import RepresentationMixin
10
10
  from parsl.launchers import SingleNodeLauncher
11
+ from parsl.launchers.base import Launcher
11
12
  from parsl.providers.condor.template import template_string
12
13
  from parsl.providers.cluster_provider import ClusterProvider
13
14
  from parsl.providers.errors import ScaleOutFailed
@@ -16,7 +17,6 @@ logger = logging.getLogger(__name__)
16
17
 
17
18
  from typing import Dict, List, Optional
18
19
  from parsl.channels.base import Channel
19
- from parsl.launchers.launchers import Launcher
20
20
 
21
21
  # See http://pages.cs.wisc.edu/~adesmet/status.html
22
22
  translate_table = {
@@ -128,7 +128,8 @@ class KubernetesProvider(ExecutionProvider, RepresentationMixin):
128
128
  self.kube_client = client.CoreV1Api()
129
129
 
130
130
  # Dictionary that keeps track of jobs, keyed on job_id
131
- self.resources = {} # type: Dict[object, Dict[str, Any]]
131
+ self.resources: Dict[object, Dict[str, Any]]
132
+ self.resources = {}
132
133
 
133
134
  def submit(self, cmd_string, tasks_per_node, job_name="parsl"):
134
135
  """ Submit a job
@@ -10,7 +10,7 @@ from typing import Optional
10
10
  from parsl.channels import LocalChannel
11
11
  from parsl.channels.base import Channel
12
12
  from parsl.launchers import SingleNodeLauncher
13
- from parsl.launchers.launchers import Launcher
13
+ from parsl.launchers.base import Launcher
14
14
  from parsl.providers.cluster_provider import ClusterProvider
15
15
  from parsl.providers.base import JobState, JobStatus
16
16
  from parsl.providers.slurm.template import template_string
@@ -0,0 +1,11 @@
1
+ from parsl.config import Config
2
+ from parsl.executors.taskvine import TaskVineExecutor
3
+
4
+ from parsl.data_provider.http import HTTPInTaskStaging
5
+ from parsl.data_provider.ftp import FTPInTaskStaging
6
+ from parsl.data_provider.file_noop import NoOpFileStaging
7
+
8
+
9
+ def fresh_config():
10
+ return Config(executors=[TaskVineExecutor(port=9000,
11
+ storage_access=[FTPInTaskStaging(), HTTPInTaskStaging(), NoOpFileStaging()])])
parsl/tests/conftest.py CHANGED
@@ -124,7 +124,7 @@ def load_dfk_session(request, pytestconfig):
124
124
 
125
125
  yield
126
126
 
127
- if(parsl.dfk() != dfk):
127
+ if parsl.dfk() != dfk:
128
128
  raise RuntimeError("DFK changed unexpectedly during test")
129
129
  dfk.cleanup()
130
130
  parsl.clear()
@@ -158,16 +158,16 @@ def load_dfk_local_module(request, pytestconfig):
158
158
  assert isinstance(c, parsl.Config)
159
159
  dfk = parsl.load(c)
160
160
 
161
- if(callable(local_setup)):
161
+ if callable(local_setup):
162
162
  local_setup()
163
163
 
164
164
  yield
165
165
 
166
- if(callable(local_teardown)):
166
+ if callable(local_teardown):
167
167
  local_teardown()
168
168
 
169
- if(local_config):
170
- if(parsl.dfk() != dfk):
169
+ if local_config:
170
+ if parsl.dfk() != dfk:
171
171
  raise RuntimeError("DFK changed unexpectedly during test")
172
172
  dfk.cleanup()
173
173
  parsl.clear()
@@ -271,7 +271,7 @@ def pytest_ignore_collect(path):
271
271
  return True
272
272
  elif 'manual_tests' in path.strpath:
273
273
  return True
274
- elif 'workqueue_tests/test_scale' in path.strpath:
274
+ elif 'scaling_tests/test_scale' in path.strpath:
275
275
  return True
276
276
  else:
277
277
  return False
@@ -0,0 +1,10 @@
1
+ from parsl.config import Config
2
+ from parsl.executors.taskvine import TaskVineExecutor
3
+ from parsl.providers import CondorProvider
4
+
5
+ config = Config(
6
+ executors=[TaskVineExecutor(port=50055,
7
+ source=True,
8
+ provider=CondorProvider(),
9
+ )]
10
+ )
@@ -0,0 +1,10 @@
1
+ from parsl.config import Config
2
+ from parsl.executors.taskvine import TaskVineExecutor
3
+ from parsl.providers import LocalProvider
4
+
5
+ config = Config(
6
+ executors=[TaskVineExecutor(port=50055,
7
+ source=True,
8
+ provider=LocalProvider(),
9
+ )]
10
+ )
@@ -52,7 +52,7 @@ def test_increment(depth=5):
52
52
  futs = {}
53
53
  for i in range(1, depth):
54
54
  print("Launching {0} with {1}".format(i, prev))
55
- assert(isinstance(prev, DataFuture) or isinstance(prev, File))
55
+ assert isinstance(prev, DataFuture) or isinstance(prev, File)
56
56
  output = File("test{0}.txt".format(i))
57
57
  fu = increment(inputs=[prev], # Depend on the future from previous call
58
58
  # Name the file to be created here
@@ -62,7 +62,7 @@ def test_increment(depth=5):
62
62
  [prev] = fu.outputs
63
63
  futs[i] = prev
64
64
  print(prev.filepath)
65
- assert(isinstance(prev, DataFuture))
65
+ assert isinstance(prev, DataFuture)
66
66
 
67
67
  for key in futs:
68
68
  if key > 0:
@@ -61,4 +61,4 @@ def test_retry():
61
61
  with pytest.raises(parsl.app.errors.BashExitFailure):
62
62
  fu.result()
63
63
 
64
- assert(fu.exception().exitcode == 5)
64
+ assert fu.exception().exitcode == 5
@@ -0,0 +1,17 @@
1
+ import pytest
2
+ from parsl.dataflow.states import States
3
+
4
+
5
+ @pytest.mark.local
6
+ def test_all_states_colored() -> None:
7
+ """This checks that the coloring tables in parsl-visualize contain
8
+ a color for each state defined in the task state enumeration.
9
+ """
10
+
11
+ # imports inside test because viz can't be imported in an environment
12
+ # with no monitoring installed
13
+ import parsl.monitoring.visualization.plots.default.workflow_plots as workflow_plots
14
+
15
+ for s in States:
16
+ assert s.name in workflow_plots.gantt_colors
17
+ assert s.name in workflow_plots.dag_state_colors
parsl/utils.py CHANGED
@@ -55,7 +55,7 @@ def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
55
55
 
56
56
  """
57
57
 
58
- if(not os.path.isdir(rundir)):
58
+ if not os.path.isdir(rundir):
59
59
  return []
60
60
 
61
61
  dirs = sorted(os.listdir(rundir))
@@ -99,7 +99,7 @@ def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
99
99
  last_runid = dirs[-1]
100
100
  last_checkpoint = os.path.abspath(f'{rundir}/{last_runid}/checkpoint')
101
101
 
102
- if(not(os.path.isdir(last_checkpoint))):
102
+ if not os.path.isdir(last_checkpoint):
103
103
  return []
104
104
 
105
105
  return [last_checkpoint]
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2023.05.29'
6
+ VERSION = '2023.06.12'
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2023.5.29
3
+ Version: 2023.6.12
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2023.05.29.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2023.06.12.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -19,7 +19,7 @@ Requires-Python: >=3.7.0
19
19
  License-File: LICENSE
20
20
  Requires-Dist: pyzmq (>=17.1.2)
21
21
  Requires-Dist: typeguard (<3,>=2.10)
22
- Requires-Dist: typing-extensions
22
+ Requires-Dist: typing-extensions (<5,>=4.6)
23
23
  Requires-Dist: types-paramiko
24
24
  Requires-Dist: types-requests
25
25
  Requires-Dist: types-six