parsl 2024.11.11__py3-none-any.whl → 2024.11.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
parsl/dataflow/dflow.py CHANGED
@@ -111,8 +111,6 @@ class DataFlowKernel:
111
111
  self.monitoring = config.monitoring
112
112
 
113
113
  if self.monitoring:
114
- if self.monitoring.logdir is None:
115
- self.monitoring.logdir = self.run_dir
116
114
  self.monitoring.start(self.run_dir, self.config.run_dir)
117
115
 
118
116
  self.time_began = datetime.datetime.now()
@@ -279,7 +279,7 @@ class Database:
279
279
  class DatabaseManager:
280
280
  def __init__(self,
281
281
  db_url: str = 'sqlite:///runinfo/monitoring.db',
282
- logdir: str = '.',
282
+ run_dir: str = '.',
283
283
  logging_level: int = logging.INFO,
284
284
  batching_interval: float = 1,
285
285
  batching_threshold: float = 99999,
@@ -287,12 +287,12 @@ class DatabaseManager:
287
287
 
288
288
  self.workflow_end = False
289
289
  self.workflow_start_message: Optional[MonitoringMessage] = None
290
- self.logdir = logdir
291
- os.makedirs(self.logdir, exist_ok=True)
290
+ self.run_dir = run_dir
291
+ os.makedirs(self.run_dir, exist_ok=True)
292
292
 
293
293
  logger.propagate = False
294
294
 
295
- set_file_logger("{}/database_manager.log".format(self.logdir), level=logging_level,
295
+ set_file_logger(f"{self.run_dir}/database_manager.log", level=logging_level,
296
296
  format_string="%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] [%(threadName)s %(thread)d] %(message)s",
297
297
  name="database_manager")
298
298
 
@@ -681,7 +681,7 @@ class DatabaseManager:
681
681
  def dbm_starter(exception_q: mpq.Queue,
682
682
  resource_msgs: mpq.Queue,
683
683
  db_url: str,
684
- logdir: str,
684
+ run_dir: str,
685
685
  logging_level: int) -> None:
686
686
  """Start the database manager process
687
687
 
@@ -692,7 +692,7 @@ def dbm_starter(exception_q: mpq.Queue,
692
692
 
693
693
  try:
694
694
  dbm = DatabaseManager(db_url=db_url,
695
- logdir=logdir,
695
+ run_dir=run_dir,
696
696
  logging_level=logging_level)
697
697
  logger.info("Starting dbm in dbm starter")
698
698
  dbm.start(resource_msgs)
@@ -44,7 +44,6 @@ class MonitoringHub(RepresentationMixin):
44
44
  workflow_name: Optional[str] = None,
45
45
  workflow_version: Optional[str] = None,
46
46
  logging_endpoint: Optional[str] = None,
47
- logdir: Optional[str] = None,
48
47
  monitoring_debug: bool = False,
49
48
  resource_monitoring_enabled: bool = True,
50
49
  resource_monitoring_interval: float = 30): # in seconds
@@ -73,8 +72,6 @@ class MonitoringHub(RepresentationMixin):
73
72
  The database connection url for monitoring to log the information.
74
73
  These URLs follow RFC-1738, and can include username, password, hostname, database name.
75
74
  Default: sqlite, in the configured run_dir.
76
- logdir : str
77
- Parsl log directory paths. Logs and temp files go here. Default: '.'
78
75
  monitoring_debug : Bool
79
76
  Enable monitoring debug logging. Default: False
80
77
  resource_monitoring_enabled : boolean
@@ -96,7 +93,6 @@ class MonitoringHub(RepresentationMixin):
96
93
  self.hub_port_range = hub_port_range
97
94
 
98
95
  self.logging_endpoint = logging_endpoint
99
- self.logdir = logdir
100
96
  self.monitoring_debug = monitoring_debug
101
97
 
102
98
  self.workflow_name = workflow_name
@@ -109,13 +105,10 @@ class MonitoringHub(RepresentationMixin):
109
105
 
110
106
  logger.debug("Starting MonitoringHub")
111
107
 
112
- if self.logdir is None:
113
- self.logdir = "."
114
-
115
108
  if self.logging_endpoint is None:
116
109
  self.logging_endpoint = f"sqlite:///{os.fspath(config_run_dir)}/monitoring.db"
117
110
 
118
- os.makedirs(self.logdir, exist_ok=True)
111
+ os.makedirs(dfk_run_dir, exist_ok=True)
119
112
 
120
113
  self.monitoring_hub_active = True
121
114
 
@@ -151,7 +144,7 @@ class MonitoringHub(RepresentationMixin):
151
144
  "hub_address": self.hub_address,
152
145
  "udp_port": self.hub_port,
153
146
  "zmq_port_range": self.hub_port_range,
154
- "logdir": self.logdir,
147
+ "run_dir": dfk_run_dir,
155
148
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
156
149
  },
157
150
  name="Monitoring-Router-Process",
@@ -161,7 +154,7 @@ class MonitoringHub(RepresentationMixin):
161
154
 
162
155
  self.dbm_proc = ForkProcess(target=dbm_starter,
163
156
  args=(self.exception_q, self.resource_msgs,),
164
- kwargs={"logdir": self.logdir,
157
+ kwargs={"run_dir": dfk_run_dir,
165
158
  "logging_level": logging.DEBUG if self.monitoring_debug else logging.INFO,
166
159
  "db_url": self.logging_endpoint,
167
160
  },
@@ -172,7 +165,7 @@ class MonitoringHub(RepresentationMixin):
172
165
  logger.info("Started the router process %s and DBM process %s", self.router_proc.pid, self.dbm_proc.pid)
173
166
 
174
167
  self.filesystem_proc = ForkProcess(target=filesystem_receiver,
175
- args=(self.logdir, self.resource_msgs, dfk_run_dir),
168
+ args=(self.resource_msgs, dfk_run_dir),
176
169
  name="Monitoring-Filesystem-Process",
177
170
  daemon=True
178
171
  )
@@ -258,8 +251,8 @@ class MonitoringHub(RepresentationMixin):
258
251
 
259
252
 
260
253
  @wrap_with_logs
261
- def filesystem_receiver(logdir: str, q: Queue[TaggedMonitoringMessage], run_dir: str) -> None:
262
- logger = set_file_logger("{}/monitoring_filesystem_radio.log".format(logdir),
254
+ def filesystem_receiver(q: Queue[TaggedMonitoringMessage], run_dir: str) -> None:
255
+ logger = set_file_logger(f"{run_dir}/monitoring_filesystem_radio.log",
263
256
  name="monitoring_filesystem_radio",
264
257
  level=logging.INFO)
265
258
 
@@ -270,6 +263,8 @@ def filesystem_receiver(logdir: str, q: Queue[TaggedMonitoringMessage], run_dir:
270
263
  new_dir = f"{base_path}/new/"
271
264
  logger.debug("Creating new and tmp paths under %s", base_path)
272
265
 
266
+ target_radio = MultiprocessingQueueRadioSender(q)
267
+
273
268
  os.makedirs(tmp_dir, exist_ok=True)
274
269
  os.makedirs(new_dir, exist_ok=True)
275
270
 
@@ -285,7 +280,7 @@ def filesystem_receiver(logdir: str, q: Queue[TaggedMonitoringMessage], run_dir:
285
280
  message = pickle.load(f)
286
281
  logger.debug("Message received is: %s", message)
287
282
  assert isinstance(message, tuple)
288
- q.put(cast(TaggedMonitoringMessage, message))
283
+ target_radio.send(cast(TaggedMonitoringMessage, message))
289
284
  os.remove(full_path_filename)
290
285
  except Exception:
291
286
  logger.exception("Exception processing %s - probably will be retried next iteration", filename)
@@ -14,6 +14,7 @@ import typeguard
14
14
  import zmq
15
15
 
16
16
  from parsl.log_utils import set_file_logger
17
+ from parsl.monitoring.radios import MultiprocessingQueueRadioSender
17
18
  from parsl.monitoring.types import TaggedMonitoringMessage
18
19
  from parsl.process_loggers import wrap_with_logs
19
20
  from parsl.utils import setproctitle
@@ -30,7 +31,7 @@ class MonitoringRouter:
30
31
  zmq_port_range: Tuple[int, int] = (55050, 56000),
31
32
 
32
33
  monitoring_hub_address: str = "127.0.0.1",
33
- logdir: str = ".",
34
+ run_dir: str = ".",
34
35
  logging_level: int = logging.INFO,
35
36
  atexit_timeout: int = 3, # in seconds
36
37
  resource_msgs: mpq.Queue,
@@ -47,7 +48,7 @@ class MonitoringRouter:
47
48
  zmq_port_range : tuple(int, int)
48
49
  The MonitoringHub picks ports at random from the range which will be used by Hub.
49
50
  Default: (55050, 56000)
50
- logdir : str
51
+ run_dir : str
51
52
  Parsl log directory paths. Logs and temp files go here. Default: '.'
52
53
  logging_level : int
53
54
  Logging level as defined in the logging module. Default: logging.INFO
@@ -55,12 +56,11 @@ class MonitoringRouter:
55
56
  The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.
56
57
  resource_msgs : multiprocessing.Queue
57
58
  A multiprocessing queue to receive messages to be routed onwards to the database process
58
-
59
59
  exit_event : Event
60
60
  An event that the main Parsl process will set to signal that the monitoring router should shut down.
61
61
  """
62
- os.makedirs(logdir, exist_ok=True)
63
- self.logger = set_file_logger("{}/monitoring_router.log".format(logdir),
62
+ os.makedirs(run_dir, exist_ok=True)
63
+ self.logger = set_file_logger(f"{run_dir}/monitoring_router.log",
64
64
  name="monitoring_router",
65
65
  level=logging_level)
66
66
  self.logger.debug("Monitoring router starting")
@@ -98,7 +98,7 @@ class MonitoringRouter:
98
98
  min_port=zmq_port_range[0],
99
99
  max_port=zmq_port_range[1])
100
100
 
101
- self.resource_msgs = resource_msgs
101
+ self.target_radio = MultiprocessingQueueRadioSender(resource_msgs)
102
102
  self.exit_event = exit_event
103
103
 
104
104
  @wrap_with_logs(target="monitoring_router")
@@ -125,7 +125,7 @@ class MonitoringRouter:
125
125
  data, addr = self.udp_sock.recvfrom(2048)
126
126
  resource_msg = pickle.loads(data)
127
127
  self.logger.debug("Got UDP Message from {}: {}".format(addr, resource_msg))
128
- self.resource_msgs.put(resource_msg)
128
+ self.target_radio.send(resource_msg)
129
129
  except socket.timeout:
130
130
  pass
131
131
 
@@ -136,7 +136,7 @@ class MonitoringRouter:
136
136
  data, addr = self.udp_sock.recvfrom(2048)
137
137
  msg = pickle.loads(data)
138
138
  self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
139
- self.resource_msgs.put(msg)
139
+ self.target_radio.send(msg)
140
140
  last_msg_received_time = time.time()
141
141
  except socket.timeout:
142
142
  pass
@@ -160,7 +160,7 @@ class MonitoringRouter:
160
160
  assert len(msg) >= 1, "ZMQ Receiver expects tuples of length at least 1, got {}".format(msg)
161
161
  assert len(msg) == 2, "ZMQ Receiver expects message tuples of exactly length 2, got {}".format(msg)
162
162
 
163
- self.resource_msgs.put(msg)
163
+ self.target_radio.send(msg)
164
164
  except zmq.Again:
165
165
  pass
166
166
  except Exception:
@@ -187,14 +187,14 @@ def router_starter(*,
187
187
  udp_port: Optional[int],
188
188
  zmq_port_range: Tuple[int, int],
189
189
 
190
- logdir: str,
190
+ run_dir: str,
191
191
  logging_level: int) -> None:
192
192
  setproctitle("parsl: monitoring router")
193
193
  try:
194
194
  router = MonitoringRouter(hub_address=hub_address,
195
195
  udp_port=udp_port,
196
196
  zmq_port_range=zmq_port_range,
197
- logdir=logdir,
197
+ run_dir=run_dir,
198
198
  logging_level=logging_level,
199
199
  resource_msgs=resource_msgs,
200
200
  exit_event=exit_event)
@@ -70,6 +70,9 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
70
70
  Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.
71
71
  constraint : str
72
72
  Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.
73
+ clusters : str
74
+ Slurm cluster name, or comma seperated cluster list, used to choose between different clusters in a federated Slurm instance.
75
+ If unspecified or ``None``, no slurm directive for clusters will be added.
73
76
  channel : Channel
74
77
  Channel for accessing this provider.
75
78
  nodes_per_block : int
@@ -116,6 +119,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
116
119
  account: Optional[str] = None,
117
120
  qos: Optional[str] = None,
118
121
  constraint: Optional[str] = None,
122
+ clusters: Optional[str] = None,
119
123
  channel: Channel = LocalChannel(),
120
124
  nodes_per_block: int = 1,
121
125
  cores_per_node: Optional[int] = None,
@@ -152,6 +156,7 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
152
156
  self.account = account
153
157
  self.qos = qos
154
158
  self.constraint = constraint
159
+ self.clusters = clusters
155
160
  self.scheduler_options = scheduler_options + '\n'
156
161
  if exclusive:
157
162
  self.scheduler_options += "#SBATCH --exclusive\n"
@@ -163,6 +168,8 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
163
168
  self.scheduler_options += "#SBATCH --qos={}\n".format(qos)
164
169
  if constraint:
165
170
  self.scheduler_options += "#SBATCH --constraint={}\n".format(constraint)
171
+ if clusters:
172
+ self.scheduler_options += "#SBATCH --clusters={}\n".format(clusters)
166
173
 
167
174
  self.regex_job_id = regex_job_id
168
175
  self.worker_init = worker_init + '\n'
@@ -174,14 +181,22 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
174
181
  logger.debug(f"sacct returned retcode={retcode} stderr={stderr}")
175
182
  if retcode == 0:
176
183
  logger.debug("using sacct to get job status")
184
+ _cmd = "sacct"
185
+ # Add clusters option to sacct if provided
186
+ if self.clusters:
187
+ _cmd += f" --clusters={self.clusters}"
177
188
  # Using state%20 to get enough characters to not truncate output
178
189
  # of the state. Without output can look like "<job_id> CANCELLED+"
179
- self._cmd = "sacct -X --noheader --format=jobid,state%20 --job '{0}'"
190
+ self._cmd = _cmd + " -X --noheader --format=jobid,state%20 --job '{0}'"
180
191
  self._translate_table = sacct_translate_table
181
192
  else:
182
193
  logger.debug(f"sacct failed with retcode={retcode}")
183
194
  logger.debug("falling back to using squeue to get job status")
184
- self._cmd = "squeue --noheader --format='%i %t' --job '{0}'"
195
+ _cmd = "squeue"
196
+ # Add clusters option to squeue if provided
197
+ if self.clusters:
198
+ _cmd += f" --clusters={self.clusters}"
199
+ self._cmd = _cmd + " --noheader --format='%i %t' --job '{0}'"
185
200
  self._translate_table = squeue_translate_table
186
201
 
187
202
  def _status(self):
@@ -344,7 +359,14 @@ class SlurmProvider(ClusterProvider, RepresentationMixin):
344
359
  '''
345
360
 
346
361
  job_id_list = ' '.join(job_ids)
347
- retcode, stdout, stderr = self.execute_wait("scancel {0}".format(job_id_list))
362
+
363
+ # Make the command to cancel jobs
364
+ _cmd = "scancel"
365
+ if self.clusters:
366
+ _cmd += f" --clusters={self.clusters}"
367
+ _cmd += " {0}"
368
+
369
+ retcode, stdout, stderr = self.execute_wait(_cmd.format(job_id_list))
348
370
  rets = None
349
371
  if retcode == 0:
350
372
  for jid in job_ids:
parsl/version.py CHANGED
@@ -3,4 +3,4 @@
3
3
  Year.Month.Day[alpha/beta/..]
4
4
  Alphas will be numbered like this -> 2024.12.10a0
5
5
  """
6
- VERSION = '2024.11.11'
6
+ VERSION = '2024.11.18'
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parsl
3
- Version: 2024.11.11
3
+ Version: 2024.11.18
4
4
  Summary: Simple data dependent workflows in Python
5
5
  Home-page: https://github.com/Parsl/parsl
6
- Download-URL: https://github.com/Parsl/parsl/archive/2024.11.11.tar.gz
6
+ Download-URL: https://github.com/Parsl/parsl/archive/2024.11.18.tar.gz
7
7
  Author: The Parsl Team
8
8
  Author-email: parsl@googlegroups.com
9
9
  License: Apache 2.0
@@ -8,7 +8,7 @@ parsl/multiprocessing.py,sha256=MyaEcEq-Qf860u7V98u-PZrPNdtzOZL_NW6EhIJnmfQ,1937
8
8
  parsl/process_loggers.py,sha256=uQ7Gd0W72Jz7rrcYlOMfLsAEhkRltxXJL2MgdduJjEw,1136
9
9
  parsl/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  parsl/utils.py,sha256=rMLKeadEsQ9jGwm4ogqiLIXPS3zOAyfznQJXVkJSY8E,13107
11
- parsl/version.py,sha256=YN54GKP_n8ju-yF93YgA58EFPgsqVrPOc-yanh_BEMg,131
11
+ parsl/version.py,sha256=QWFxa3haTzr3-mjJsN5_Ug8jmK_2jH7q_8R_8J0nGFw,131
12
12
  parsl/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  parsl/app/app.py,sha256=0gbM4AH2OtFOLsv07I5nglpElcwMSOi-FzdZZfrk7So,8532
14
14
  parsl/app/bash.py,sha256=jm2AvePlCT9DZR7H_4ANDWxatp5dN_22FUlT_gWhZ-g,5528
@@ -56,7 +56,7 @@ parsl/data_provider/staging.py,sha256=ZDZuuFg38pjUStegKPcvPsfGp3iMeReMzfU6DSwtJj
56
56
  parsl/data_provider/zip.py,sha256=S4kVuH9lxAegRURYbvIUR7EYYBOccyslaqyCrVWUBhw,4497
57
57
  parsl/dataflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  parsl/dataflow/dependency_resolvers.py,sha256=Om8Dgh7a0ZwgXAc6TlhxLSzvxXHDlNNV1aBNiD3JTNY,3325
59
- parsl/dataflow/dflow.py,sha256=80lfWJnk2SPaDIOqDEwqOHx031wu_74SdWBMyiiorgY,65443
59
+ parsl/dataflow/dflow.py,sha256=_EsavFW9vLnWkqTKuATSg-TGv0Cvnu5xnCLWWZEsXeA,65342
60
60
  parsl/dataflow/errors.py,sha256=9SxVhIJY_53FQx8x4OU8UA8nd7lvUbDllH7KfMXpYaY,2177
61
61
  parsl/dataflow/futures.py,sha256=08LuP-HFiHBIZmeKCjlsazw_WpQ5fwevrU2_WbidkYw,6080
62
62
  parsl/dataflow/memoization.py,sha256=l9uw1Bu50GucBF70M5relpGKFkE4dIM9T3R1KrxW0v0,9583
@@ -115,13 +115,13 @@ parsl/launchers/base.py,sha256=CblcvPTJiu-MNLWaRtFe29SZQ0BpTOlaY8CGcHdlHIE,538
115
115
  parsl/launchers/errors.py,sha256=8YMV_CHpBNVa4eXkGE4x5DaFQlZkDCRCHmBktYcY6TA,467
116
116
  parsl/launchers/launchers.py,sha256=cQsNsHuCOL_nQTjPXf0--YsgsDoMoJ77bO1Wt4ncLjs,15134
117
117
  parsl/monitoring/__init__.py,sha256=0ywNz6i0lM1xo_7_BIxhETDGeVd2C_0wwD7qgeaMR4c,83
118
- parsl/monitoring/db_manager.py,sha256=G795Nme9di2AWT7zqFNNyOn8ZJd5i1I2hA6iDSorZD4,33330
118
+ parsl/monitoring/db_manager.py,sha256=D8lrngFGxbFhyWVkF8JZRTbGxRYmd3SY6_zu8KV0FJs,33330
119
119
  parsl/monitoring/errors.py,sha256=D6jpYzEzp0d6FmVKGqhvjAxr4ztZfJX2s-aXemH9bBU,148
120
120
  parsl/monitoring/message_type.py,sha256=Khn88afNxcOIciKiCK4GLnn90I5BlRTiOL3zK-P07yQ,401
121
- parsl/monitoring/monitoring.py,sha256=vSUaYokzWqsUJRikoXRN32LjqtGrB02_J89727s4FmI,12877
121
+ parsl/monitoring/monitoring.py,sha256=8uy-7ua3FyTWfGgxGavCzM9_r56gCJ-KLpUysAqFI5Q,12671
122
122
  parsl/monitoring/radios.py,sha256=l-a7GiWRBR3OaeLeHD_gBo2lMrqpjiQjLNaPTCr29ck,6021
123
123
  parsl/monitoring/remote.py,sha256=WfSqQWYPMx3gT6u4T171ngMPzt8ialR1jRSsrD-4O24,13619
124
- parsl/monitoring/router.py,sha256=5WrJ7YT2SV3T9BHCI8P0KqHm-4Y6NDgZkwmEcISmzGU,9110
124
+ parsl/monitoring/router.py,sha256=VvzzsxLpwSSn0VUZOJtf0uvP9Kcr1znDAR1_MoHdAeU,9208
125
125
  parsl/monitoring/types.py,sha256=oOCrzv-ab-_rv4pb8o58Sdb8G_RGp1aZriRbdf9zBEk,339
126
126
  parsl/monitoring/queries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
127
127
  parsl/monitoring/queries/pandas.py,sha256=0Z2r0rjTKCemf0eaDkF1irvVHn5g7KC5SYETvQPRxwU,2232
@@ -176,7 +176,7 @@ parsl/providers/pbspro/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
176
176
  parsl/providers/pbspro/pbspro.py,sha256=luPUxBA0QMax7tKICsmesESQcOhcGnLi6GUlfGeO5pQ,8598
177
177
  parsl/providers/pbspro/template.py,sha256=y-Dher--t5Eury-c7cAuSZs9FEUXWiruFUI07v81558,315
178
178
  parsl/providers/slurm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
179
- parsl/providers/slurm/slurm.py,sha256=XhLLiPBKHKUPf0KmML4kHdN97JB7bXNwDgGPnQIFSmE,15523
179
+ parsl/providers/slurm/slurm.py,sha256=6tnDB2rLNdnY_FGtmNg6tPSdU9dP5DuWBg4GGEMTPYI,16442
180
180
  parsl/providers/slurm/template.py,sha256=KpgBEFMc1ps-38jdrk13xUGx9TCivu-iF90jgQDdiEQ,315
181
181
  parsl/providers/torque/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
182
182
  parsl/providers/torque/template.py,sha256=4qfc2gmlEhRCAD7erFDOs4prJQ43I8s4E8DSUSVQx3A,358
@@ -448,13 +448,13 @@ parsl/usage_tracking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
448
448
  parsl/usage_tracking/api.py,sha256=iaCY58Dc5J4UM7_dJzEEs871P1p1HdxBMtNGyVdzc9g,1821
449
449
  parsl/usage_tracking/levels.py,sha256=xbfzYEsd55KiZJ-mzNgPebvOH4rRHum04hROzEf41tU,291
450
450
  parsl/usage_tracking/usage.py,sha256=tcoZ2OUjsQVakG8Uu9_HFuEdzpSHyt4JarSRcLGnSMw,8918
451
- parsl-2024.11.11.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
452
- parsl-2024.11.11.data/scripts/interchange.py,sha256=6jsxpVgtruFtE_0nMHAZYVF1gvoALBCkprEbUb_YQgg,30098
453
- parsl-2024.11.11.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
454
- parsl-2024.11.11.data/scripts/process_worker_pool.py,sha256=Qed0dgUa6375UgWm5h196V0FBdeTdW6iowG9RYDNG9Y,42920
455
- parsl-2024.11.11.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
456
- parsl-2024.11.11.dist-info/METADATA,sha256=cuVmmVJIvqai_5seI7Qr108RZ4QxxwK74gRS0XUPUdM,3848
457
- parsl-2024.11.11.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
458
- parsl-2024.11.11.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
459
- parsl-2024.11.11.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
460
- parsl-2024.11.11.dist-info/RECORD,,
451
+ parsl-2024.11.18.data/scripts/exec_parsl_function.py,sha256=RUkJ4JSJAjr7YyRZ58zhMdg8cR5dVV9odUl3AuzNf3k,7802
452
+ parsl-2024.11.18.data/scripts/interchange.py,sha256=6jsxpVgtruFtE_0nMHAZYVF1gvoALBCkprEbUb_YQgg,30098
453
+ parsl-2024.11.18.data/scripts/parsl_coprocess.py,sha256=zrVjEqQvFOHxsLufPi00xzMONagjVwLZbavPM7bbjK4,5722
454
+ parsl-2024.11.18.data/scripts/process_worker_pool.py,sha256=Qed0dgUa6375UgWm5h196V0FBdeTdW6iowG9RYDNG9Y,42920
455
+ parsl-2024.11.18.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
456
+ parsl-2024.11.18.dist-info/METADATA,sha256=xtjfEiUVaXoAAPKqrjCeSEJ6z5eEcfxWjRQgic18iqQ,3848
457
+ parsl-2024.11.18.dist-info/WHEEL,sha256=bFJAMchF8aTQGUgMZzHJyDDMPTO3ToJ7x23SLJa1SVo,92
458
+ parsl-2024.11.18.dist-info/entry_points.txt,sha256=XqnsWDYoEcLbsMcpnYGKLEnSBmaIe1YoM5YsBdJG2tI,176
459
+ parsl-2024.11.18.dist-info/top_level.txt,sha256=PIheYoUFQtF2icLsgOykgU-Cjuwr2Oi6On2jo5RYgRM,6
460
+ parsl-2024.11.18.dist-info/RECORD,,