DIRAC 9.0.0a60__py3-none-any.whl → 9.0.0a61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,99 +57,6 @@ class AccountingCLI(CLI):
57
57
  traceback.print_tb(sys.exc_info()[2])
58
58
  print("________________________\n")
59
59
 
60
- def do_registerType(self, args):
61
- """
62
- Registers a new accounting type
63
- Usage : registerType <typeName>
64
- <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
65
- should exist and inherit the base type
66
- """
67
- try:
68
- argList = args.split()
69
- if argList:
70
- typeName = argList[0].strip()
71
- else:
72
- gLogger.error("No type name specified")
73
- return
74
- # Try to import the type
75
- result = self.objectLoader.loadObject(f"DIRAC.AccountingSystem.Client.Types.{typeName}")
76
- if not result["OK"]:
77
- return result
78
- typeClass = result["Value"]
79
-
80
- gLogger.info(f"Loaded type {typeClass.__name__}")
81
- typeDef = typeClass().getDefinition()
82
- acClient = DataStoreClient()
83
- retVal = acClient.registerType(*typeDef)
84
- if retVal["OK"]:
85
- gLogger.info("Type registered successfully")
86
- else:
87
- gLogger.error(f"Error: {retVal['Message']}")
88
- except Exception:
89
- self.showTraceback()
90
-
91
- def do_resetBucketLength(self, args):
92
- """
93
- Set the bucket Length. Will trigger a recalculation of buckets. Can take a while.
94
- Usage : resetBucketLength <typeName>
95
- <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
96
- should exist and inherit the base type
97
- """
98
- try:
99
- argList = args.split()
100
- if argList:
101
- typeName = argList[0].strip()
102
- else:
103
- gLogger.error("No type name specified")
104
- return
105
-
106
- # Try to import the type
107
- result = self.objectLoader.loadObject(f"DIRAC.AccountingSystem.Client.Types.{typeName}")
108
- if not result["OK"]:
109
- return result
110
- typeClass = result["Value"]
111
- gLogger.info(f"Loaded type {typeClass.__name__}")
112
- typeDef = typeClass().getDefinition()
113
- acClient = DataStoreClient()
114
- retVal = acClient.setBucketsLength(typeDef[0], typeDef[3])
115
- if retVal["OK"]:
116
- gLogger.info("Type registered successfully")
117
- else:
118
- gLogger.error(f"Error: {retVal['Message']}")
119
- except Exception:
120
- self.showTraceback()
121
-
122
- def do_regenerateBuckets(self, args):
123
- """
124
- Regenerate buckets for type. Can take a while.
125
- Usage : regenerateBuckets <typeName>
126
- <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
127
- should exist and inherit the base type
128
- """
129
- try:
130
- argList = args.split()
131
- if argList:
132
- typeName = argList[0].strip()
133
- else:
134
- gLogger.error("No type name specified")
135
- return
136
-
137
- # Try to import the type
138
- result = self.objectLoader.loadObject(f"DIRAC.AccountingSystem.Client.Types.{typeName}")
139
- if not result["OK"]:
140
- return result
141
- typeClass = result["Value"]
142
- gLogger.info(f"Loaded type {typeClass.__name__}")
143
- typeDef = typeClass().getDefinition()
144
- acClient = DataStoreClient()
145
- retVal = acClient.regenerateBuckets(typeDef[0])
146
- if retVal["OK"]:
147
- gLogger.info("Buckets recalculated!")
148
- else:
149
- gLogger.error(f"Error: {retVal['Message']}")
150
- except Exception:
151
- self.showTraceback()
152
-
153
60
  def do_showRegisteredTypes(self, args):
154
61
  """
155
62
  Get a list of registered types
@@ -170,50 +77,3 @@ class AccountingCLI(CLI):
170
77
  print(" Value fields:\n %s" % "\n ".join(typeList[2]))
171
78
  except Exception:
172
79
  self.showTraceback()
173
-
174
- def do_deleteType(self, args):
175
- """
176
- Delete a registered accounting type.
177
- Usage : deleteType <typeName>
178
- WARN! It will delete all data associated to that type! VERY DANGEROUS!
179
- If you screw it, you'll discover a new dimension of pain and doom! :)
180
- """
181
- try:
182
- argList = args.split()
183
- if argList:
184
- typeName = argList[0].strip()
185
- else:
186
- gLogger.error("No type name specified")
187
- return
188
-
189
- choice = input(
190
- f"Are you completely sure you want to delete type {typeName} and all it's data? yes/no [no]: "
191
- )
192
- choice = choice.lower()
193
- if choice not in ("yes", "y"):
194
- print("Delete aborted")
195
- return
196
-
197
- acClient = DataStoreClient()
198
- retVal = acClient.deleteType(typeName)
199
- if not retVal["OK"]:
200
- gLogger.error(f"Error: {retVal['Message']}")
201
- return
202
- print("Hope you meant it, because it's done")
203
- except Exception:
204
- self.showTraceback()
205
-
206
- def do_compactBuckets(self, args):
207
- """
208
- Compact buckets table
209
- Usage : compactBuckets
210
- """
211
- try:
212
- acClient = DataStoreClient()
213
- retVal = acClient.compactDB()
214
- if not retVal["OK"]:
215
- gLogger.error(f"Error: {retVal['Message']}")
216
- return
217
- gLogger.info("Done")
218
- except Exception:
219
- self.showTraceback()
@@ -122,19 +122,6 @@ class DataStoreClient(Client):
122
122
 
123
123
  return S_OK()
124
124
 
125
- def remove(self, register):
126
- """
127
- Remove a Register from the Accounting DataStore
128
- """
129
- if not self.__checkBaseType(register.__class__):
130
- return S_ERROR("register is not a valid type (has to inherit from BaseAccountingType")
131
- retVal = register.checkValues()
132
- if not retVal["OK"]:
133
- return retVal
134
- if gConfig.getValue("/LocalSite/DisableAccounting", False):
135
- return S_OK()
136
- return self._getRPC().remove(*register.getValues())
137
-
138
125
 
139
126
  def _sendToFailover(rpcStub):
140
127
  """Create a ForwardDISET operation for failover"""
@@ -161,13 +161,6 @@ class BaseAccountingType:
161
161
  cD[self.fieldsList[iPos]] = self.valuesList[iPos]
162
162
  return cD
163
163
 
164
- def registerToServer(self):
165
- """
166
- Register type in server
167
- """
168
- rpcClient = Client(url="Accounting/DataStore")
169
- return rpcClient.registerType(*self.getDefinition())
170
-
171
164
  def commit(self):
172
165
  """
173
166
  Commit register to server
@@ -9,11 +9,6 @@ Services
9
9
  Authorization
10
10
  {
11
11
  Default = authenticated
12
- compactDB = ServiceAdministrator
13
- deleteType = ServiceAdministrator
14
- registerType = ServiceAdministrator
15
- setBucketsLength = ServiceAdministrator
16
- regenerateBuckets = ServiceAdministrator
17
12
  }
18
13
  }
19
14
  ##END
@@ -14,7 +14,6 @@ import datetime
14
14
  from DIRAC import S_ERROR, S_OK
15
15
  from DIRAC.AccountingSystem.DB.MultiAccountingDB import MultiAccountingDB
16
16
  from DIRAC.ConfigurationSystem.Client import PathFinder
17
- from DIRAC.Core.Base.Client import Client
18
17
  from DIRAC.Core.DISET.RequestHandler import RequestHandler, getServiceOption
19
18
  from DIRAC.Core.Utilities import TimeUtilities
20
19
  from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
@@ -39,30 +38,6 @@ class DataStoreHandler(RequestHandler):
39
38
  gThreadScheduler.addPeriodicTask(60, cls.__acDB.loadPendingRecords)
40
39
  return S_OK()
41
40
 
42
- types_registerType = [str, list, list, list]
43
-
44
- def export_registerType(self, typeName, definitionKeyFields, definitionAccountingFields, bucketsLength):
45
- """
46
- Register a new type. (Only for all powerful admins)
47
- """
48
- return self.__acDB.registerType(typeName, definitionKeyFields, definitionAccountingFields, bucketsLength)
49
-
50
- types_setBucketsLength = [str, list]
51
-
52
- def export_setBucketsLength(self, typeName, bucketsLength):
53
- """
54
- Change the buckets Length. (Only for all powerful admins)
55
- """
56
- return self.__acDB.changeBucketsLength(typeName, bucketsLength)
57
-
58
- types_regenerateBuckets = [str]
59
-
60
- def export_regenerateBuckets(self, typeName):
61
- """
62
- Recalculate buckets. (Only for all powerful admins)
63
- """
64
- return self.__acDB.regenerateBuckets(typeName)
65
-
66
41
  types_getRegisteredTypes = []
67
42
 
68
43
  def export_getRegisteredTypes(self):
@@ -106,51 +81,4 @@ class DataStoreHandler(RequestHandler):
106
81
  records.append((entry[0], startTime, endTime, entry[3]))
107
82
  return self.__acDB.insertRecordBundleThroughQueue(records)
108
83
 
109
- types_compactDB = []
110
-
111
- def export_compactDB(self):
112
- """
113
- Compact the db by grouping buckets
114
- """
115
- # if we are running workers (not only one service) we can redirect the request to the master
116
- # For more information please read the Administrative guide Accounting part!
117
- # ADVICE: If you want to trigger the bucketing, please make sure the bucketing is not running!!!!
118
- if self.runBucketing:
119
- return self.__acDB.compactBuckets()
120
-
121
- return Client(url="Accounting/DataStoreMaster").compactDB()
122
-
123
84
  types_remove = [str, datetime.datetime, datetime.datetime, list]
124
-
125
- def export_remove(self, typeName, startTime, endTime, valuesList):
126
- """
127
- Remove a record for a type
128
- """
129
- startTime = int(TimeUtilities.toEpoch(startTime))
130
- endTime = int(TimeUtilities.toEpoch(endTime))
131
- return self.__acDB.deleteRecord(typeName, startTime, endTime, valuesList)
132
-
133
- types_removeRegisters = [list]
134
-
135
- def export_removeRegisters(self, entriesList):
136
- """
137
- Remove a record for a type
138
- """
139
- expectedTypes = [str, datetime.datetime, datetime.datetime, list]
140
- for entry in entriesList:
141
- if len(entry) != 4:
142
- return S_ERROR("Invalid records")
143
- for i, en in enumerate(entry):
144
- if not isinstance(en, expectedTypes[i]):
145
- return S_ERROR(f"{i} field in the records should be {expectedTypes[i]}")
146
- ok = 0
147
- for entry in entriesList:
148
- startTime = int(TimeUtilities.toEpoch(entry[1]))
149
- endTime = int(TimeUtilities.toEpoch(entry[2]))
150
- record = entry[3]
151
- result = self.__acDB.deleteRecord(entry[0], startTime, endTime, record)
152
- if not result["OK"]:
153
- return S_OK(ok)
154
- ok += 1
155
-
156
- return S_OK(ok)
@@ -323,7 +323,7 @@ class BaseClient:
323
323
  pass
324
324
 
325
325
  # We randomize the list, and add at the end the failover URLs (System/FailoverURLs/Component)
326
- urlsList = List.randomize(List.fromChar(urls, ",")) + failoverUrls
326
+ urlsList = List.fromChar(urls, ",") + failoverUrls
327
327
  self.__nbOfUrls = len(urlsList)
328
328
  self.__nbOfRetry = (
329
329
  2 if self.__nbOfUrls > 2 else 3
@@ -445,7 +445,6 @@ and this is thread {cThID}
445
445
  return self.__initStatus
446
446
  if self.__enableThreadCheck:
447
447
  self.__checkThreadID()
448
-
449
448
  gLogger.debug(f"Trying to connect to: {self.serviceURL}")
450
449
  try:
451
450
  # Calls the transport method of the apropriate protocol.
@@ -100,7 +100,8 @@ def main():
100
100
  gLogger.error(result["Message"])
101
101
  DIRAC.exit(1)
102
102
  if result["Value"][0] != 0:
103
- gLogger.error(result["Value"][2])
103
+ gLogger.error("Apptainer command failed with exit code", result["Value"][0])
104
+ gLogger.error("Command output:", result["Value"])
104
105
  DIRAC.exit(2)
105
106
  gLogger.notice(result["Value"][1])
106
107
 
@@ -1,7 +1,8 @@
1
1
  """
2
- This module contains helper methods for accessing operational attributes or parameters of DMS objects
2
+ This module contains helper methods for accessing operational attributes or parameters of DMS objects
3
3
 
4
4
  """
5
+
5
6
  from collections import defaultdict
6
7
  from DIRAC import gConfig, gLogger, S_OK, S_ERROR
7
8
  from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
@@ -17,6 +18,9 @@ sLog = gLogger.getSubLogger(__name__)
17
18
  def resolveSEGroup(seGroupList, allSEs=None):
18
19
  """
19
20
  Resolves recursively a (list of) SEs that can be groupSEs
21
+ For modules, JobWrapper or whatever runs at a given site,
22
+ prefer using :py:func:`~DIRAC.DataManagementSystem.Utilities.ResolveSE.getDestinationSEList`
23
+
20
24
 
21
25
  :param seGroupList: list of SEs to resolve or comma-separated SEs
22
26
  :type seGroupList: list or string
@@ -447,26 +447,6 @@ class MonitoringDB(ElasticDB):
447
447
  records.append({paramName: getattr(resObj["_source"], paramName) for paramName in paramNames})
448
448
  return S_OK(records)
449
449
 
450
- def getLastDayData(self, typeName, condDict):
451
- """
452
- It returns the last day data for a given monitoring type.
453
-
454
- :returns: for example
455
-
456
- .. code-block:: python
457
-
458
- {'sort': [{'timestamp': {'order': 'desc'}}],
459
- 'query': {'bool': {'must': [{'match': {'host': 'dzmathe.cern.ch'}},
460
- {'match': {'component': 'Bookkeeping_BookkeepingManager'}}]}}}
461
-
462
- :param str typeName: name of the monitoring type
463
- :param dict condDict: conditions for the query
464
-
465
- * key -> name of the field
466
- * value -> list of possible values
467
- """
468
- return self.__getRawData(typeName, condDict)
469
-
470
450
  def getLimitedData(self, typeName, condDict, size=10):
471
451
  """
472
452
  Returns a list of records for a given selection.
@@ -244,24 +244,6 @@ class MonitoringHandlerMixin:
244
244
  reportRequest["generatePlot"] = False
245
245
  return reporter.generate(reportRequest)
246
246
 
247
- types_addMonitoringRecords = [str, list]
248
-
249
- def export_addMonitoringRecords(self, monitoringtype, data):
250
- """
251
- Bulk insert data directly to the given monitoring type.
252
-
253
- :param str monitoringtype: monitoring type name
254
- :param list data: list of documents
255
- :returns: S_OK or S_ERROR
256
- """
257
-
258
- retVal = self.__db.getIndexName(monitoringtype)
259
- if not retVal["OK"]:
260
- return retVal
261
- prefix = retVal["Value"]
262
- gLogger.debug("addMonitoringRecords:", prefix)
263
- return self.__db.bulk_index(prefix, data)
264
-
265
247
  types_addRecords = [str, str, list]
266
248
 
267
249
  def export_addRecords(self, indexname, monitoringType, data):
@@ -290,21 +272,6 @@ class MonitoringHandlerMixin:
290
272
  gLogger.debug("delete index:", indexName)
291
273
  return self.__db.deleteIndex(indexName)
292
274
 
293
- types_getLastDayData = [str, dict]
294
-
295
- def export_getLastDayData(self, typeName, condDict):
296
- """
297
- It returns the data from the last day index. Note: we create daily indexes.
298
-
299
- :param str typeName: name of the monitoring type
300
- :param dict condDict: conditions for the query
301
-
302
- * key -> name of the field
303
- * value -> list of possible values
304
- """
305
-
306
- return self.__db.getLastDayData(typeName, condDict)
307
-
308
275
  types_getLimitedDat = [str, dict, int]
309
276
 
310
277
  def export_getLimitedData(self, typeName, condDict, size):
@@ -6,6 +6,7 @@
6
6
  from __future__ import print_function
7
7
  from __future__ import absolute_import
8
8
  from __future__ import division
9
+ import json
9
10
  import re
10
11
  import tempfile
11
12
  import subprocess
@@ -25,6 +26,8 @@ STATES_MAP = {
25
26
 
26
27
  HOLD_REASON_SUBCODE = "55"
27
28
 
29
+ STATE_ATTRIBUTES = "ClusterId,ProcId,JobStatus,HoldReasonCode,HoldReasonSubCode,HoldReason"
30
+
28
31
  subTemplate = """
29
32
  # Environment
30
33
  # -----------
@@ -62,6 +65,7 @@ environment = "DIRAC_PILOT_STAMP=$(stamp) %(environment)s"
62
65
  # Requirements
63
66
  # ------------
64
67
  request_cpus = %(processors)s
68
+ requirements = NumJobStarts == 0
65
69
 
66
70
  # Exit options
67
71
  # ------------
@@ -73,7 +77,8 @@ on_exit_hold = ExitCode =!= 0
73
77
  # A subcode of our choice to identify who put the job on hold
74
78
  on_exit_hold_subcode = %(holdReasonSubcode)s
75
79
  # Jobs are then deleted from the system after N days if they are not idle or running
76
- periodic_remove = (JobStatus != 1) && (JobStatus != 2) && ((time() - EnteredCurrentStatus) > (%(daysToKeepRemoteLogs)s * 24 * 3600))
80
+ periodic_remove = ((JobStatus == 1) && (NumJobStarts > 0)) || \
81
+ ((JobStatus != 1) && (JobStatus != 2) && ((time() - EnteredCurrentStatus) > (%(daysToKeepRemoteLogs)s * 24 * 3600))
77
82
 
78
83
  # Specific options
79
84
  # ----------------
@@ -87,63 +92,34 @@ Queue stamp in %(pilotStampList)s
87
92
  """
88
93
 
89
94
 
90
- def parseCondorStatus(lines, jobID):
95
+ def getCondorStatus(jobMetadata):
91
96
  """parse the condor_q or condor_history output for the job status
92
97
 
93
- :param lines: list of lines from the output of the condor commands, each line is a tuple of jobID, statusID, and holdReasonCode
94
- :type lines: python:list
95
- :param str jobID: jobID of condor job, e.g.: 123.53
98
+ :param jobMetadata: dict with job metadata
99
+ :type jobMetadata: dict[str, str | int]
96
100
  :returns: Status as known by DIRAC, and a reason if the job is being held
97
101
  """
98
- jobID = str(jobID)
99
-
100
- holdReason = ""
101
- status = None
102
- for line in lines:
103
- l = line.strip().split()
104
-
105
- # Make sure the job ID exists
106
- if len(l) < 1 or l[0] != jobID:
107
- continue
108
-
109
- # Make sure the status is present and is an integer
110
- try:
111
- status = int(l[1])
112
- except (ValueError, IndexError):
113
- break
114
-
115
- # Stop here if the status is not held (5): result should be found in STATES_MAP
116
- if status != 5:
117
- break
118
-
119
- # A job can be held for various reasons,
120
- # we need to further investigate with the holdReasonCode & holdReasonSubCode
121
- # Details in:
122
- # https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html#HoldReasonCode
123
-
124
- # By default, a held (5) job is defined as Aborted in STATES_MAP, but there might be some exceptions
125
- status = 3
126
- try:
127
- holdReasonCode = l[2]
128
- holdReasonSubcode = l[3]
129
- holdReason = " ".join(l[4:])
130
- except IndexError:
131
- # This should not happen in theory
132
- # Just set the status to unknown such as
133
- status = None
134
- holdReasonCode = "undefined"
135
- holdReasonSubcode = "undefined"
136
- break
137
-
138
- # If holdReasonCode is 3 (The PERIODIC_HOLD expression evaluated to True. Or, ON_EXIT_HOLD was true)
139
- # And subcode is HOLD_REASON_SUBCODE, then it means the job failed by itself, it needs to be marked as Failed
140
- if holdReasonCode == "3" and holdReasonSubcode == HOLD_REASON_SUBCODE:
141
- status = 5
142
- # If holdReasonCode is 16 (Input files are being spooled), the job should be marked as Waiting
143
- elif holdReasonCode == "16":
144
- status = 1
145
-
146
- return (STATES_MAP.get(status, "Unknown"), holdReason)
102
+ if jobMetadata["JobStatus"] != 5:
103
+ # If the job is not held, we can return the status directly
104
+ return (STATES_MAP.get(jobMetadata["JobStatus"], "Unknown"), "")
105
+
106
+ # A job can be held for various reasons,
107
+ # we need to further investigate with the holdReasonCode & holdReasonSubCode
108
+ # Details in:
109
+ # https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html#HoldReasonCode
110
+
111
+ # By default, a held (5) job is defined as Aborted in STATES_MAP, but there might be some exceptions
112
+ status = 3
113
+
114
+ # If holdReasonCode is 3 (The PERIODIC_HOLD expression evaluated to True. Or, ON_EXIT_HOLD was true)
115
+ # And subcode is HOLD_REASON_SUBCODE, then it means the job failed by itself, it needs to be marked as Failed
116
+ if jobMetadata["HoldReasonCode"] == 3 and jobMetadata["HoldReasonSubCode"] == HOLD_REASON_SUBCODE:
117
+ status = 5
118
+ # If holdReasonCode is 16 (Input files are being spooled), the job should be marked as Waiting
119
+ elif jobMetadata["HoldReasonCode"] == 16:
120
+ status = 1
121
+
122
+ return (STATES_MAP.get(status, "Unknown"), jobMetadata["HoldReason"])
147
123
 
148
124
 
149
125
  class Condor(object):
@@ -171,8 +147,6 @@ class Condor(object):
171
147
  preamble = kwargs.get("Preamble")
172
148
 
173
149
  jdlFile = tempfile.NamedTemporaryFile(dir=outputDir, suffix=".jdl", mode="wt")
174
- scheddOptions = 'requirements = OpSys == "LINUX"\n'
175
- scheddOptions += "gentenv = False"
176
150
  jdlFile.write(
177
151
  subTemplate
178
152
  % dict(
@@ -185,7 +159,7 @@ class Condor(object):
185
159
  holdReasonSubcode=HOLD_REASON_SUBCODE,
186
160
  daysToKeepRemoteLogs=1,
187
161
  scheddOptions="",
188
- extraString="",
162
+ extraString=submitOptions,
189
163
  pilotStampList=",".join(stamps),
190
164
  )
191
165
  )
@@ -193,7 +167,7 @@ class Condor(object):
193
167
  jdlFile.flush()
194
168
 
195
169
  cmd = "%s; " % preamble if preamble else ""
196
- cmd += "condor_submit %s %s" % (submitOptions, jdlFile.name)
170
+ cmd += "condor_submit %s" % jdlFile.name
197
171
  sp = subprocess.Popen(
198
172
  cmd,
199
173
  shell=True,
@@ -283,7 +257,6 @@ class Condor(object):
283
257
 
284
258
  def getJobStatus(self, **kwargs):
285
259
  """Get status of the jobs in the given list"""
286
-
287
260
  resultDict = {}
288
261
 
289
262
  MANDATORY_PARAMETERS = ["JobIDList"]
@@ -299,15 +272,11 @@ class Condor(object):
299
272
  resultDict["Message"] = "Empty job list"
300
273
  return resultDict
301
274
 
302
- user = kwargs.get("User")
303
- if not user:
304
- user = os.environ.get("USER")
305
- if not user:
306
- resultDict["Status"] = -1
307
- resultDict["Message"] = "No user name"
308
- return resultDict
275
+ # Prepare the command to get the status of the jobs
276
+ cmdJobs = " ".join(str(jobID) for jobID in jobIDList)
309
277
 
310
- cmd = "condor_q -submitter %s -af:j JobStatus HoldReasonCode HoldReasonSubCode HoldReason" % user
278
+ # Get the status of the jobs currently active
279
+ cmd = "condor_q %s -attributes %s -json" % (cmdJobs, STATE_ATTRIBUTES)
311
280
  sp = subprocess.Popen(
312
281
  shlex.split(cmd),
313
282
  stdout=subprocess.PIPE,
@@ -317,16 +286,15 @@ class Condor(object):
317
286
  output, error = sp.communicate()
318
287
  status = sp.returncode
319
288
 
320
- if status != 0:
289
+ if status != 0 or not output:
321
290
  resultDict["Status"] = status
322
291
  resultDict["Message"] = error
323
292
  return resultDict
324
293
 
325
- qList = output.strip().split("\n")
294
+ jobsMetadata = json.loads(output)
326
295
 
327
- condorHistCall = (
328
- "condor_history -af:j JobStatus HoldReasonCode HoldReasonSubCode HoldReason -submitter %s" % user
329
- )
296
+ # Get the status of the jobs in the history
297
+ condorHistCall = "condor_history %s -attributes %s -json" % (cmdJobs, STATE_ATTRIBUTES)
330
298
  sp = subprocess.Popen(
331
299
  shlex.split(condorHistCall),
332
300
  stdout=subprocess.PIPE,
@@ -335,15 +303,26 @@ class Condor(object):
335
303
  )
336
304
  output, _ = sp.communicate()
337
305
  status = sp.returncode
338
- if status == 0:
339
- for line in output.split("\n"):
340
- qList.append(line)
306
+
307
+ if status != 0 or not output:
308
+ resultDict["Status"] = status
309
+ resultDict["Message"] = error
310
+ return resultDict
311
+
312
+ jobsMetadata += json.loads(output)
341
313
 
342
314
  statusDict = {}
343
- if len(qList):
344
- for job in jobIDList:
345
- job = str(job)
346
- statusDict[job], _ = parseCondorStatus(qList, job)
315
+ # Build a set of job IDs found in jobsMetadata
316
+ foundJobIDs = set()
317
+ for jobDict in jobsMetadata:
318
+ jobID = "%s.%s" % (jobDict["ClusterId"], jobDict["ProcId"])
319
+ statusDict[jobID], _ = getCondorStatus(jobDict)
320
+ foundJobIDs.add(jobID)
321
+
322
+ # For job IDs not found, set status to "Unknown"
323
+ for jobID in jobIDList:
324
+ if str(jobID) not in foundJobIDs:
325
+ statusDict[str(jobID)] = "Unknown"
347
326
 
348
327
  # Final output
349
328
  status = 0
@@ -355,19 +334,30 @@ class Condor(object):
355
334
  """Get the overall status of the CE"""
356
335
  resultDict = {}
357
336
 
358
- user = kwargs.get("User")
359
- if not user:
360
- user = os.environ.get("USER")
361
- if not user:
337
+ cmd = "condor_q -totals -json"
338
+ sp = subprocess.Popen(
339
+ shlex.split(cmd),
340
+ stdout=subprocess.PIPE,
341
+ stderr=subprocess.PIPE,
342
+ universal_newlines=True,
343
+ )
344
+ output, error = sp.communicate()
345
+ status = sp.returncode
346
+
347
+ if status != 0 or not output:
362
348
  resultDict["Status"] = -1
363
- resultDict["Message"] = "No user name"
349
+ resultDict["Message"] = error
364
350
  return resultDict
365
351
 
366
- waitingJobs = 0
367
- runningJobs = 0
352
+ jresult = json.loads(output)
353
+ resultDict["Status"] = 0
354
+ resultDict["Waiting"] = jresult[0]["Idle"]
355
+ resultDict["Running"] = jresult[0]["Running"]
368
356
 
357
+ # We also need to check the hold jobs, some of them are actually waiting (e.g. for input files)
358
+ cmd = 'condor_q -json -constraint "JobStatus == 5" -attributes HoldReasonCode'
369
359
  sp = subprocess.Popen(
370
- shlex.split("condor_q -submitter %s" % user),
360
+ shlex.split(cmd),
371
361
  stdout=subprocess.PIPE,
372
362
  stderr=subprocess.PIPE,
373
363
  universal_newlines=True,
@@ -376,33 +366,42 @@ class Condor(object):
376
366
  status = sp.returncode
377
367
 
378
368
  if status != 0:
379
- if "no record" in output:
380
- resultDict["Status"] = 0
381
- resultDict["Waiting"] = waitingJobs
382
- resultDict["Running"] = runningJobs
383
- return resultDict
384
- resultDict["Status"] = status
369
+ resultDict["Status"] = -1
385
370
  resultDict["Message"] = error
386
371
  return resultDict
387
372
 
388
- if "no record" in output:
389
- resultDict["Status"] = 0
390
- resultDict["Waiting"] = waitingJobs
391
- resultDict["Running"] = runningJobs
373
+ # If there are no held jobs, we can return the result
374
+ if not output:
392
375
  return resultDict
393
376
 
394
- if output:
395
- lines = output.split("\n")
396
- for line in lines:
397
- if not line.strip():
398
- continue
399
- if " I " in line:
400
- waitingJobs += 1
401
- elif " R " in line:
402
- runningJobs += 1
377
+ jresult = json.loads(output)
378
+ for job_metadata in jresult:
379
+ if job_metadata["HoldReasonCode"] == 16:
380
+ resultDict["Waiting"] += 1
381
+
382
+ return resultDict
383
+
384
+ def getJobOutputFiles(self, **kwargs):
385
+ """Get output file names and templates for the specific CE"""
386
+ resultDict = {}
387
+
388
+ MANDATORY_PARAMETERS = ["JobIDList", "OutputDir", "ErrorDir"]
389
+ for argument in MANDATORY_PARAMETERS:
390
+ if argument not in kwargs:
391
+ resultDict["Status"] = -1
392
+ resultDict["Message"] = "No %s" % argument
393
+ return resultDict
394
+
395
+ outputDir = kwargs["OutputDir"]
396
+ errorDir = kwargs["ErrorDir"]
397
+ jobIDList = kwargs["JobIDList"]
398
+
399
+ jobDict = {}
400
+ for jobID in jobIDList:
401
+ jobDict[jobID] = {}
402
+ jobDict[jobID]["Output"] = "%s/%s.out" % (outputDir, jobID)
403
+ jobDict[jobID]["Error"] = "%s/%s.err" % (errorDir, jobID)
403
404
 
404
- # Final output
405
405
  resultDict["Status"] = 0
406
- resultDict["Waiting"] = waitingJobs
407
- resultDict["Running"] = runningJobs
406
+ resultDict["Jobs"] = jobDict
408
407
  return resultDict
@@ -50,6 +50,7 @@ When using a local condor_schedd look at the HTCondor documentation for enabling
50
50
 
51
51
  import datetime
52
52
  import errno
53
+ import json
53
54
  import os
54
55
  import subprocess
55
56
  import tempfile
@@ -63,7 +64,12 @@ from DIRAC.Core.Utilities.File import mkDir
63
64
  from DIRAC.Core.Utilities.List import breakListIntoChunks
64
65
  from DIRAC.Core.Utilities.Subprocess import systemCall
65
66
  from DIRAC.FrameworkSystem.private.authorization.utils.Tokens import writeToTokenFile
66
- from DIRAC.Resources.Computing.BatchSystems.Condor import HOLD_REASON_SUBCODE, parseCondorStatus, subTemplate
67
+ from DIRAC.Resources.Computing.BatchSystems.Condor import (
68
+ HOLD_REASON_SUBCODE,
69
+ STATE_ATTRIBUTES,
70
+ getCondorStatus,
71
+ subTemplate,
72
+ )
67
73
  from DIRAC.Resources.Computing.ComputingElement import ComputingElement
68
74
  from DIRAC.WorkloadManagementSystem.Client import PilotStatus
69
75
 
@@ -400,45 +406,57 @@ class HTCondorCEComputingElement(ComputingElement):
400
406
  if isinstance(jobIDList, str):
401
407
  jobIDList = [jobIDList]
402
408
 
409
+ self.tokenFile = None
403
410
  resultDict = {}
404
411
  condorIDs = {}
405
412
  # Get all condorIDs so we can just call condor_q and condor_history once
406
413
  for jobReference in jobIDList:
407
414
  jobReference = jobReference.split(":::")[0]
408
- condorIDs[jobReference] = self._jobReferenceToCondorID(jobReference)
409
-
410
- self.tokenFile = None
415
+ condorIDs[self._jobReferenceToCondorID(jobReference)] = jobReference
411
416
 
412
- qList = []
413
- for _condorIDs in breakListIntoChunks(condorIDs.values(), 100):
414
- # This will return a list of 1245.75 3 undefined undefined undefined
417
+ jobsMetadata = []
418
+ for _condorIDs in breakListIntoChunks(condorIDs.keys(), 100):
415
419
  cmd = ["condor_q"]
416
420
  cmd.extend(self.remoteScheddOptions.strip().split(" "))
417
421
  cmd.extend(_condorIDs)
418
- cmd.extend(["-af:j", "JobStatus", "HoldReasonCode", "HoldReasonSubCode", "HoldReason"])
422
+ cmd.extend(["-attributes", STATE_ATTRIBUTES])
423
+ cmd.extend(["-json"])
419
424
  result = self._executeCondorCommand(cmd, keepTokenFile=True)
420
425
  if not result["OK"]:
421
426
  return result
422
427
 
423
- qList.extend(result["Value"].split("\n"))
428
+ if result["Value"]:
429
+ jobsMetadata.extend(json.loads(result["Value"]))
424
430
 
425
431
  condorHistCall = ["condor_history"]
426
432
  condorHistCall.extend(self.remoteScheddOptions.strip().split(" "))
427
433
  condorHistCall.extend(_condorIDs)
428
- condorHistCall.extend(["-af:j", "JobStatus", "HoldReasonCode", "HoldReasonSubCode", "HoldReason"])
434
+ condorHistCall.extend(["-attributes", STATE_ATTRIBUTES])
435
+ condorHistCall.extend(["-json"])
429
436
  result = self._executeCondorCommand(cmd, keepTokenFile=True)
430
437
  if not result["OK"]:
431
438
  return result
432
439
 
433
- qList.extend(result["Value"].split("\n"))
440
+ if result["Value"]:
441
+ jobsMetadata.extend(json.loads(result["Value"]))
434
442
 
435
- for job, jobID in condorIDs.items():
436
- jobStatus, reason = parseCondorStatus(qList, jobID)
443
+ foundJobIDs = set()
444
+ for jobDict in jobsMetadata:
445
+ jobStatus, reason = getCondorStatus(jobDict)
446
+ condorId = f"{jobDict['ClusterId']}.{jobDict['ProcId']}"
447
+ jobReference = condorIDs.get(condorId)
437
448
 
438
449
  if jobStatus == PilotStatus.ABORTED:
439
- self.log.verbose("Job", f"{jobID} held: {reason}")
450
+ self.log.verbose("Job", f"{jobReference} held: {reason}")
451
+
452
+ resultDict[jobReference] = jobStatus
453
+ foundJobIDs.add(jobReference)
440
454
 
441
- resultDict[job] = jobStatus
455
+ # Check if we have any jobs that were not found in the condor_q or condor_history
456
+ for jobReference in condorIDs.values():
457
+ if jobReference not in foundJobIDs:
458
+ self.log.verbose("Job", f"{jobReference} not found in condor_q or condor_history")
459
+ resultDict[jobReference] = PilotStatus.UNKNOWN
442
460
 
443
461
  self.tokenFile = None
444
462
 
@@ -2,6 +2,7 @@
2
2
  """
3
3
  tests for HTCondorCEComputingElement module
4
4
  """
5
+ import json
5
6
  import uuid
6
7
 
7
8
  import pytest
@@ -12,18 +13,30 @@ from DIRAC.Resources.Computing.BatchSystems import Condor
12
13
 
13
14
  MODNAME = "DIRAC.Resources.Computing.HTCondorCEComputingElement"
14
15
 
15
- STATUS_LINES = """
16
- 123.2 5 4 0 undefined
17
- 123.1 3 undefined undefined undefined
18
- """.strip().split(
19
- "\n"
20
- )
21
-
22
- HISTORY_LINES = """
23
- 123.0 4 undefined undefined undefined
24
- """.strip().split(
25
- "\n"
26
- )
16
+ STATUS_QUEUE = [
17
+ {
18
+ "ClusterId": 123,
19
+ "ProcId": 2,
20
+ "JobStatus": 5,
21
+ "HoldReasonCode": 4,
22
+ "HoldReasonSubCode": 0,
23
+ "HoldReason": "The credentials for the job are invalid",
24
+ },
25
+ {
26
+ "ClusterId": 123,
27
+ "ProcId": 1,
28
+ "JobStatus": 3,
29
+ },
30
+ ]
31
+
32
+
33
+ STATUS_HISTORY = [
34
+ {
35
+ "ClusterId": 123,
36
+ "ProcId": 0,
37
+ "JobStatus": 4,
38
+ }
39
+ ]
27
40
 
28
41
 
29
42
  @pytest.fixture
@@ -32,42 +45,47 @@ def setUp():
32
45
 
33
46
 
34
47
  def test_parseCondorStatus():
35
- statusLines = f"""
36
- 104098.1 1 undefined undefined undefined
37
- 104098.2 2 undefined undefined undefined
38
- 104098.3 3 undefined undefined undefined
39
- 104098.4 4 undefined undefined undefined
40
- 104098.5 5 16 57 Input data are being spooled
41
- 104098.6 5 3 {Condor.HOLD_REASON_SUBCODE} Policy
42
- 104098.7 5 1 0 undefined
43
-
44
- foo bar
45
- 104096.1 3 16 test test
46
- 104096.2 3 test
47
- 104096.3 5 undefined undefined undefined
48
- 104096.4 7
49
- """.strip().split(
50
- "\n"
51
- )
52
- # force there to be an empty line
48
+ statusOutput = {"ClusterId": 104098, "ProcId": 1, "JobStatus": 1}
49
+ assert HTCE.getCondorStatus(statusOutput) == ("Waiting", "")
53
50
 
54
- expectedResults = {
55
- "104098.1": "Waiting",
56
- "104098.2": "Running",
57
- "104098.3": "Aborted",
58
- "104098.4": "Done",
59
- "104098.5": "Waiting",
60
- "104098.6": "Failed",
61
- "104098.7": "Aborted",
62
- "foo": "Unknown",
63
- "104096.1": "Aborted",
64
- "104096.2": "Aborted",
65
- "104096.3": "Aborted",
66
- "104096.4": "Unknown",
67
- }
51
+ statusOutput = {"ClusterId": 104098, "ProcId": 2, "JobStatus": 2}
52
+ assert HTCE.getCondorStatus(statusOutput) == ("Running", "")
53
+
54
+ statusOutput = {"ClusterId": 104098, "ProcId": 3, "JobStatus": 3}
55
+ assert HTCE.getCondorStatus(statusOutput) == ("Aborted", "")
56
+
57
+ statusOutput = {"ClusterId": 104098, "ProcId": 4, "JobStatus": 4}
58
+ assert HTCE.getCondorStatus(statusOutput) == ("Done", "")
68
59
 
69
- for jobID, expected in expectedResults.items():
70
- assert HTCE.parseCondorStatus(statusLines, jobID)[0] == expected
60
+ statusOutput = {
61
+ "ClusterId": 104098,
62
+ "ProcId": 5,
63
+ "JobStatus": 5,
64
+ "HoldReasonCode": 16,
65
+ "HoldReasonSubCode": 57,
66
+ "HoldReason": "Input data are being spooled",
67
+ }
68
+ assert HTCE.getCondorStatus(statusOutput) == ("Waiting", "Input data are being spooled")
69
+
70
+ statusOutput = {
71
+ "ClusterId": 104098,
72
+ "ProcId": 6,
73
+ "JobStatus": 5,
74
+ "HoldReasonCode": 3,
75
+ "HoldReasonSubCode": HTCE.HOLD_REASON_SUBCODE,
76
+ "HoldReason": "Policy",
77
+ }
78
+ assert HTCE.getCondorStatus(statusOutput) == ("Failed", "Policy")
79
+
80
+ statusOutput = {
81
+ "ClusterId": 104098,
82
+ "ProcId": 7,
83
+ "JobStatus": 5,
84
+ "HoldReasonCode": 1,
85
+ "HoldReasonSubCode": 0,
86
+ "HoldReason": "Aborted by user",
87
+ }
88
+ assert HTCE.getCondorStatus(statusOutput) == ("Aborted", "Aborted by user")
71
89
 
72
90
 
73
91
  def test_getJobStatus(mocker):
@@ -75,8 +93,8 @@ def test_getJobStatus(mocker):
75
93
  mocker.patch(
76
94
  MODNAME + ".systemCall",
77
95
  side_effect=[
78
- S_OK((0, "\n".join(STATUS_LINES), "")),
79
- S_OK((0, "\n".join(HISTORY_LINES), "")),
96
+ S_OK((0, json.dumps(STATUS_QUEUE), "")),
97
+ S_OK((0, json.dumps(STATUS_HISTORY), "")),
80
98
  S_OK((0, "", "")),
81
99
  S_OK((0, "", "")),
82
100
  ],
@@ -110,7 +128,7 @@ def test_getJobStatus(mocker):
110
128
  def test_getJobStatusBatchSystem(mocker):
111
129
  """Test Condor Batch System plugin getJobStatus"""
112
130
  patchPopen = mocker.patch("DIRAC.Resources.Computing.BatchSystems.Condor.subprocess.Popen")
113
- patchPopen.return_value.communicate.side_effect = [("\n".join(STATUS_LINES), ""), ("\n".join(HISTORY_LINES), "")]
131
+ patchPopen.return_value.communicate.side_effect = [(json.dumps(STATUS_QUEUE), ""), (json.dumps(STATUS_HISTORY), "")]
114
132
  patchPopen.return_value.returncode = 0
115
133
 
116
134
  ret = Condor.Condor().getJobStatus(JobIDList=["123.0", "123.1", "123.2", "333.3"])
@@ -1,4 +1,4 @@
1
- """ Base Storage Class provides the base interface for all storage plug-ins
1
+ """Base Storage Class provides the base interface for all storage plug-ins
2
2
 
3
3
  exists()
4
4
 
@@ -33,6 +33,7 @@ These are the methods for getting information about the Storage:
33
33
  getOccupancy()
34
34
 
35
35
  """
36
+
36
37
  import errno
37
38
  import json
38
39
  import os
@@ -113,7 +114,8 @@ class StorageBase:
113
114
 
114
115
  def getParameters(self):
115
116
  """Get the parameters with which the storage was instantiated"""
116
- parameterDict = dict(self.protocolParameters)
117
+ parameterDict = dict(self._allProtocolParameters)
118
+ parameterDict.update(self.protocolParameters)
117
119
  parameterDict["StorageName"] = self.name
118
120
  parameterDict["PluginName"] = self.pluginName
119
121
  parameterDict["URLBase"] = self.getURLBase().get("Value", "")
@@ -10,6 +10,7 @@ and a Watchdog Agent that can monitor its progress.
10
10
  :caption: JobWrapper options
11
11
 
12
12
  """
13
+
13
14
  import contextlib
14
15
  import datetime
15
16
  import glob
@@ -119,14 +120,16 @@ class JobWrapper:
119
120
  self.pilotRef = gConfig.getValue("/LocalSite/PilotReference", "Unknown")
120
121
  self.cpuNormalizationFactor = gConfig.getValue("/LocalSite/CPUNormalizationFactor", 0.0)
121
122
  self.bufferLimit = gConfig.getValue(self.section + "/BufferLimit", 10485760)
122
- self.defaultOutputSE = getDestinationSEList(
123
- gConfig.getValue("/Resources/StorageElementGroups/SE-USER", []), self.siteName
124
- )
123
+ try:
124
+ self.defaultOutputSE = getDestinationSEList("SE-USER", self.siteName)
125
+ except RuntimeError:
126
+ self.defaultOutputSE = []
125
127
  self.defaultCatalog = gConfig.getValue(self.section + "/DefaultCatalog", [])
126
128
  self.masterCatalogOnlyFlag = gConfig.getValue(self.section + "/MasterCatalogOnlyFlag", True)
127
- self.defaultFailoverSE = getDestinationSEList(
128
- gConfig.getValue("/Resources/StorageElementGroups/Tier1-Failover", []), self.siteName
129
- )
129
+ try:
130
+ self.defaultFailoverSE = getDestinationSEList("Tier1-Failover", self.siteName)
131
+ except RuntimeError:
132
+ self.defaultFailoverSE = []
130
133
  self.defaultOutputPath = ""
131
134
  self.retryUpload = gConfig.getValue(self.section + "/RetryUpload", False)
132
135
  self.dm = DataManager()
@@ -93,10 +93,10 @@ class PilotManagerHandler(RequestHandler):
93
93
 
94
94
  result = self.pilotAgentsDB.getPilotInfo(pilotReference)
95
95
  if not result["OK"]:
96
- self.log.error("Failed to get info for pilot", result["Message"])
96
+ self.log.error("Failed to get info for pilot", f"{pilotReference}: {result['Message']}")
97
97
  return S_ERROR("Failed to get info for pilot")
98
98
  if not result["Value"]:
99
- self.log.warn("The pilot info is empty", pilotReference)
99
+ self.log.warn("The pilot info is empty for", pilotReference)
100
100
  return S_ERROR("Pilot info is empty")
101
101
 
102
102
  pilotDict = result["Value"][pilotReference]
@@ -105,11 +105,14 @@ class PilotManagerHandler(RequestHandler):
105
105
  # classic logs first, by default
106
106
  funcs = [self._getPilotOutput, self._getRemotePilotOutput]
107
107
  if remote:
108
+ self.log.info("Trying to retrieve output of pilot", f"{pilotReference} remotely first")
108
109
  funcs.reverse()
109
110
 
110
111
  result = funcs[0](pilotReference, pilotDict)
111
112
  if not result["OK"]:
112
- self.log.warn("Pilot log retrieval failed (first attempt), remote ?", remote)
113
+ self.log.warn(
114
+ "Failed getting output for pilot", f"{pilotReference}. Will try another approach: {result['Message']}"
115
+ )
113
116
  result = funcs[1](pilotReference, pilotDict)
114
117
  return result
115
118
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: DIRAC
3
- Version: 9.0.0a60
3
+ Version: 9.0.0a61
4
4
  Summary: DIRAC is an interware, meaning a software framework for distributed computing.
5
5
  Home-page: https://github.com/DIRACGrid/DIRAC/
6
6
  License: GPL-3.0-only
@@ -1,15 +1,15 @@
1
1
  DIRAC/__init__.py,sha256=IrCjUEvcKwE2KLYNuxiADIGrtOgSCb8MOoK8QVDsERU,11301
2
- DIRAC/AccountingSystem/ConfigTemplate.cfg,sha256=QCE21jXKXJIr1gEWMIpzKEczeqOKInfnTNkwYY0QR_c,1033
2
+ DIRAC/AccountingSystem/ConfigTemplate.cfg,sha256=xT_JHJGumwEx_w1aditaxn3zq0r4ehIScX4qYFwjT3o,819
3
3
  DIRAC/AccountingSystem/__init__.py,sha256=E88siUatzE7vxErfezaEuZxK4yfHvkKCqqC9toGvqwg,539
4
4
  DIRAC/AccountingSystem/Agent/NetworkAgent.py,sha256=qHY-aewzEYS99VsW_Ka-aoap2kqNEAe9qcH3jg25yqI,10999
5
5
  DIRAC/AccountingSystem/Agent/__init__.py,sha256=JNmpSKBieczXNZ8srT1yYs_j6gCT8JKhqE_nhcFMel4,45
6
6
  DIRAC/AccountingSystem/Agent/test/Test_NetworkAgent.py,sha256=DvWf5QQDhjCI94LT1l9VkOFmR8jK7_2Jpfvn5_ZDMFo,3746
7
- DIRAC/AccountingSystem/Client/AccountingCLI.py,sha256=RRGCAchP3JbNeD020zS9U7m36VBiDfrV7KmXiejuabk,7542
8
- DIRAC/AccountingSystem/Client/DataStoreClient.py,sha256=pzQd3o_oUf9AZmiWst3oLARlh9ebgrN-jVwB-pQAOA4,6110
7
+ DIRAC/AccountingSystem/Client/AccountingCLI.py,sha256=ME7UyAdIzznx9NLtjnLe2-uIc9lB_Brnc9qa_zrtFVE,2401
8
+ DIRAC/AccountingSystem/Client/DataStoreClient.py,sha256=C7UofNw3NIecJhg1p2cQuIJ42Vr3TM4tWdhjlTAm4gs,5592
9
9
  DIRAC/AccountingSystem/Client/ReportCLI.py,sha256=6BGufbZN_5MUJeiYgdnISfyCbAMyjVX7-mDxZLiedJ4,1907
10
10
  DIRAC/AccountingSystem/Client/ReportsClient.py,sha256=smGoZ4pDTj6hjj0cDpQsRmNjbuxX6ukc5dbzNliXn10,3587
11
11
  DIRAC/AccountingSystem/Client/__init__.py,sha256=dT6KQbA-0PPotnu-T_ofVcW-HmuRL3Q9t2HRAERkC6E,46
12
- DIRAC/AccountingSystem/Client/Types/BaseAccountingType.py,sha256=PGNwFKYS6bn5VRazzyRwQLN6gn04VXpKbh2xDRaRekA,6388
12
+ DIRAC/AccountingSystem/Client/Types/BaseAccountingType.py,sha256=NrH2ywkKq7vLs7voMH0XEsL7OOanbMWxeRjT-Kd9BNk,6183
13
13
  DIRAC/AccountingSystem/Client/Types/DataOperation.py,sha256=LTklg_czWwjYMZktswD-JtnnyV8VTQ1SC_d9ejIBvRs,1224
14
14
  DIRAC/AccountingSystem/Client/Types/Job.py,sha256=oBwd_U2HK0Ej9iEJFUcTmmdyFn1nJWQ_k4ZbAkgC6Dc,2176
15
15
  DIRAC/AccountingSystem/Client/Types/Network.py,sha256=xKoiolYkUjG8hpNAzJEdp6luc6TARyGlIa7UYs9QYeA,1165
@@ -23,7 +23,7 @@ DIRAC/AccountingSystem/DB/AccountingDB.sql,sha256=caJDh6ioNQq7uoBvBNW2ZgO5bAKj6q
23
23
  DIRAC/AccountingSystem/DB/MultiAccountingDB.py,sha256=IiRBqCnyVhP16-hW0pwSOUXYBVAEjQQPPioK7fzVR0s,3755
24
24
  DIRAC/AccountingSystem/DB/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  DIRAC/AccountingSystem/DB/test/Test_AccountingDB.py,sha256=vnMd_CrK7zBTieno9U5yMtfE5KwqX_xiETbaiBMCFzw,15849
26
- DIRAC/AccountingSystem/Service/DataStoreHandler.py,sha256=ci-tr8iUL_VmwjobDvZNsyoCtb6KpQrCbpvnBHXpbZc,6047
26
+ DIRAC/AccountingSystem/Service/DataStoreHandler.py,sha256=e5RjESW16kYaBQ0bhUJuHZULizZzcOTXYg2c_uMxcdk,3338
27
27
  DIRAC/AccountingSystem/Service/ReportGeneratorHandler.py,sha256=fksRV5NN0GVkNSmnvjEWQop0TrQ8TiMUmJf8CcIAhRw,8485
28
28
  DIRAC/AccountingSystem/Service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  DIRAC/AccountingSystem/private/DBUtils.py,sha256=2yTapP3e5WS67d_yWW0_uTeDUxi62Zr0Ogp3vfooUa8,12869
@@ -125,7 +125,7 @@ DIRAC/Core/DISET/ServiceReactor.py,sha256=JU3cPaVvXVUtbkXm1KPje4T4Ksg1mH728FW_OW
125
125
  DIRAC/Core/DISET/ThreadConfig.py,sha256=Q43BnQ8yliFliU65rLxHBV49c4TFzrYq9Ewe5n2vASQ,3086
126
126
  DIRAC/Core/DISET/TransferClient.py,sha256=2WDH3Be5NSAY4xWRK0RL2OnKeQLtaPRIJLQeeVYY5V8,7657
127
127
  DIRAC/Core/DISET/__init__.py,sha256=ZPbhSIBCK14nC9R5drkX8lfK2YHY7g-WVg0Zuy3uHAc,1085
128
- DIRAC/Core/DISET/private/BaseClient.py,sha256=qiDMZk2-XL8mFpzSKqyw_qvFwWWWzE5g-7w9lP4gHTo,27875
128
+ DIRAC/Core/DISET/private/BaseClient.py,sha256=CCyTi61cqXGCEavHpwNIjrXSXQK-7FFS3vV48vot0NA,27858
129
129
  DIRAC/Core/DISET/private/FileHelper.py,sha256=Tb1UGZX_dnBE-nRljNrHFP2-0XGiC1j3c2-lUgaDo1M,14383
130
130
  DIRAC/Core/DISET/private/GatewayService.py,sha256=Doe5NPpv0HH3AQA0HcIw0D95bFbohR3NxO5SuZUagfg,20839
131
131
  DIRAC/Core/DISET/private/InnerRPCClient.py,sha256=Q60yR3JZinPlFYDU080ltrderN37oHnAZLmYZfTgC_c,2922
@@ -323,7 +323,7 @@ DIRAC/Core/Workflow/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
323
323
  DIRAC/Core/Workflow/test/step_g.py,sha256=hlncZ_tbb74wz0mRxyHTl0NnjRut2ysWLgUfiTvi9yo,21830
324
324
  DIRAC/Core/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
325
325
  DIRAC/Core/scripts/dirac_agent.py,sha256=Nx1qlr2YOQobWkO1FtzFyL00tF8TH1HmonDsfbsCDzk,1644
326
- DIRAC/Core/scripts/dirac_apptainer_exec.py,sha256=2WwpPLUPZWcYRjeLd6SIIXmLVSK3ibbn-RaZXtg5CPA,4197
326
+ DIRAC/Core/scripts/dirac_apptainer_exec.py,sha256=uRPyQ27khQk_d8qMJwA3u9A-eJV8pGytSj0JXgGKptI,4298
327
327
  DIRAC/Core/scripts/dirac_cert_convert.py,sha256=3ThPmb1Tg-UgeH-3ZV0ilkZ8-H4RsK8SOszLuuYG-vs,2498
328
328
  DIRAC/Core/scripts/dirac_configure.py,sha256=c3ZfQUe_JUJASi3iuvl4qD83kyfWvS_QixISyG833sg,29494
329
329
  DIRAC/Core/scripts/dirac_executor.py,sha256=ZZnr8NjjBmmKmQhUnstRklw9HTsP75XRXnkBawDWdnE,1722
@@ -440,7 +440,7 @@ DIRAC/DataManagementSystem/Service/TornadoFileCatalogHandler.py,sha256=Exqqwl83C
440
440
  DIRAC/DataManagementSystem/Service/TornadoS3GatewayHandler.py,sha256=zJKf6Z_s0TF1eOqf4Z3ZsRZIcdEOcvT1OvcD7UaM9Wc,491
441
441
  DIRAC/DataManagementSystem/Service/__init__.py,sha256=gpMK8hQ30qy4_std-qzmjDuWA9QHVD5ceNJMyZQv44E,54
442
442
  DIRAC/DataManagementSystem/Service/test/Test_Service.py,sha256=hhb-nprUiRd2qaAqNeHM1U83xFhPdT41uOgEzlBxzBI,304
443
- DIRAC/DataManagementSystem/Utilities/DMSHelpers.py,sha256=L0PrpZJCPt1tBya2KayANq5QExIkBgTHmnfLSsUjT3Q,19052
443
+ DIRAC/DataManagementSystem/Utilities/DMSHelpers.py,sha256=UXResyFhzq4PQY-40gdLjav7zQiss0ZSTimVvHJqdOE,19211
444
444
  DIRAC/DataManagementSystem/Utilities/ResolveSE.py,sha256=Z_Gamxo31OhBWCBtO_1HkC0sj1wmhifGET9-xQSpqx0,4480
445
445
  DIRAC/DataManagementSystem/Utilities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
446
446
  DIRAC/DataManagementSystem/Utilities/test/Test_resolveSE.py,sha256=p9TukPS-adp4WpKFzT0pHvvsGFTRwLmb3ITtKxUNjIY,10846
@@ -696,10 +696,10 @@ DIRAC/MonitoringSystem/Client/Types/RMSMonitoring.py,sha256=DzmMIY22YKazfmfv8kOG
696
696
  DIRAC/MonitoringSystem/Client/Types/ServiceMonitoring.py,sha256=IZ_SIJbv8Eoub2DbbjKjSqlj0xNMU6wU9tX4EtOFU40,1491
697
697
  DIRAC/MonitoringSystem/Client/Types/WMSHistory.py,sha256=A6MTur_WQCGptfuDW8k2Sl-Utt8jUyUa4HzlS1lwsRA,1358
698
698
  DIRAC/MonitoringSystem/Client/Types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
699
- DIRAC/MonitoringSystem/DB/MonitoringDB.py,sha256=fHpaVjbP0N4UYP-ejgiyHxDhXR2_Bc4TPF_BcjH0GDI,20076
699
+ DIRAC/MonitoringSystem/DB/MonitoringDB.py,sha256=XZm32V1QShRBIq4FsONuwGHFSArXSmyc8gWmyoBgk_M,19357
700
700
  DIRAC/MonitoringSystem/DB/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
701
701
  DIRAC/MonitoringSystem/DB/test/Test_monitoringdb.py,sha256=Jf4NZgb9V6U4DYk4SCLVoTqxJVBYsj49rIaYB7e6qxw,2788
702
- DIRAC/MonitoringSystem/Service/MonitoringHandler.py,sha256=LoEk57slU9qJnCKkIpKFDT2RMRmX3Hhv4UnhyvpvdrM,13406
702
+ DIRAC/MonitoringSystem/Service/MonitoringHandler.py,sha256=JGN1MUsxP8swTuIa4qdPvo-O4UoA6TkxjfDZ1sC6L2w,12332
703
703
  DIRAC/MonitoringSystem/Service/TornadoMonitoringHandler.py,sha256=06gIHrrLJTgI-vnCHAGsDDjZrRwJaMsa8ESsdWy0Ir8,1550
704
704
  DIRAC/MonitoringSystem/Service/WebAppHandler.py,sha256=5RJmLfcSML394USyC0pqn3VdJEVJXKrZ7-Kx2OzeuxI,20542
705
705
  DIRAC/MonitoringSystem/Service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -904,7 +904,7 @@ DIRAC/Resources/Computing/AREXComputingElement.py,sha256=0QmYepoOfcYT6MBdHeG6dY_
904
904
  DIRAC/Resources/Computing/CloudComputingElement.py,sha256=N8wfRL6ppozkKFzbCuWuVvudNfAqTox1zNeDZY6lED0,21102
905
905
  DIRAC/Resources/Computing/ComputingElement.py,sha256=lAvlx7AOtkftku6sekIaEK4Wp_GlaDHnYeNfxW54zu4,19424
906
906
  DIRAC/Resources/Computing/ComputingElementFactory.py,sha256=gaYP8TvvP7OJsL2o6IMTIYYoBn6flt3Ue7f1NX4HmNY,2541
907
- DIRAC/Resources/Computing/HTCondorCEComputingElement.py,sha256=lMo7kOrYdg-U6LojD5DqkpW8U3Lb921xSRSAj1p2uew,25553
907
+ DIRAC/Resources/Computing/HTCondorCEComputingElement.py,sha256=zEekKBBcbAsUgZVU6H7zOKFaV3AzS549uHnEAK61nMo,26146
908
908
  DIRAC/Resources/Computing/InProcessComputingElement.py,sha256=6PTGQrAB7ROuTrO1Eq86fmDduf6IrkVInjSMzXb00eM,4620
909
909
  DIRAC/Resources/Computing/LocalComputingElement.py,sha256=om4cH89cZANy6VF2sYeLhmESs5XVPCpEOLFckusdAxw,12533
910
910
  DIRAC/Resources/Computing/PoolComputingElement.py,sha256=QzU7R_1lmZ67lhiQseNvW493fgBL8tOkqBZxZMrFsBE,10404
@@ -913,7 +913,7 @@ DIRAC/Resources/Computing/SSHComputingElement.py,sha256=Sy-Ab2Q_honFRt4Oj_Veh7uq
913
913
  DIRAC/Resources/Computing/SingularityComputingElement.py,sha256=6bWkSEmPMV85JeKiqn1Q151QaG5rcW61-mnIU_MUj2A,17880
914
914
  DIRAC/Resources/Computing/__init__.py,sha256=S0glZLs-Q2srpRLELQPDHSB9CVAcBXrPnQsFi-ZsokM,43
915
915
  DIRAC/Resources/Computing/cloudinit.template,sha256=gF4yOILXXWjlrs3bcazLA2Wf2n7QYkebMcaD_kNWK5I,4025
916
- DIRAC/Resources/Computing/BatchSystems/Condor.py,sha256=tAqIVViV22q6uUFnu7rxBC3qAxDf6BLT0_UFXVPs5HA,12972
916
+ DIRAC/Resources/Computing/BatchSystems/Condor.py,sha256=O9KctL-Pg7rZfT7-RPBtWcw1N8XGuGQOMtBxFeK0-f8,13337
917
917
  DIRAC/Resources/Computing/BatchSystems/GE.py,sha256=XkumJZPMgqxJr3qJc5EmLXOzrfKZSKuUPKZjPumTC1U,9170
918
918
  DIRAC/Resources/Computing/BatchSystems/Host.py,sha256=NmCIn6e5ZUiXiX1qIke-3G5dEEDXUznASvfIoP8Vx5U,9638
919
919
  DIRAC/Resources/Computing/BatchSystems/LSF.py,sha256=ujM4Fd22WdwM3VZgxtxXRhdV-izDkmSDbaS8Cd7YUEs,7218
@@ -942,7 +942,7 @@ DIRAC/Resources/Computing/BatchSystems/test/__init__.py,sha256=47DEQpj8HBSa-_TIm
942
942
  DIRAC/Resources/Computing/CloudProviders/OpenNebula.py,sha256=TJWtje-l5OqaXxIAeV7xr8L-WjR-eZPTAI7rVfrh09c,9928
943
943
  DIRAC/Resources/Computing/test/Test_AREXComputingElement.py,sha256=00DdLY_PLsGVogdHejIsJTRg73nwSOOS-F11gVDlBmM,2518
944
944
  DIRAC/Resources/Computing/test/Test_ComputingElement.py,sha256=lIVHO8ottEU1z0B4X_-V2Uqf6C9bJoDa-Tgh6-DVwYM,1541
945
- DIRAC/Resources/Computing/test/Test_HTCondorCEComputingElement.py,sha256=As_d4SHKx2vuUY7JM_Uujq_vPzk1s7V6Myun382qGGo,7560
945
+ DIRAC/Resources/Computing/test/Test_HTCondorCEComputingElement.py,sha256=7x8SqbOHfnn-c-fzyc2FK00_cJ25A3ed-oR9rfkwKXw,8193
946
946
  DIRAC/Resources/Computing/test/Test_InProcessComputingElement.py,sha256=j_ESF_f7-Aov48rtAZUjZclQvXSlKMs3SIpPu4uTJZ0,2067
947
947
  DIRAC/Resources/Computing/test/Test_PoolComputingElement.py,sha256=KPzPRp4Pz0EpO-bDQxTcfSpvgPikvqAYv98SRStiAhQ,11843
948
948
  DIRAC/Resources/Computing/test/Test_SSHComputingElement.py,sha256=ahYUZ9hySaEXik3nvUMWJ7eP87xj6u-LMDpX7A1OQd4,2425
@@ -1003,7 +1003,7 @@ DIRAC/Resources/Storage/GFAL2_StorageBase.py,sha256=_DUQjclrxzeICOQpvNNXCLGAJ8L9
1003
1003
  DIRAC/Resources/Storage/GFAL2_XROOTStorage.py,sha256=MlZMV9Eh3sEEOKDkiK9D2VGDHWlpauX5c-A1KnvJLLw,4497
1004
1004
  DIRAC/Resources/Storage/RFIOStorage.py,sha256=KmMEY-17_FMlq2rCY03X4EJAce41Cg5Mg-tbCUwz5LI,42741
1005
1005
  DIRAC/Resources/Storage/S3Storage.py,sha256=5kzgfeH81724odsDoqjDggP9a4yiAD0wJt3eJsgAeT8,28080
1006
- DIRAC/Resources/Storage/StorageBase.py,sha256=N_UquiN8w3R26BoioBMK3NCoNYHCpn7hEgFgcmcOmdA,16847
1006
+ DIRAC/Resources/Storage/StorageBase.py,sha256=ElZS4DlWWwM_WWLV61uliMy1EWrL1lpLWK18MGeWrFM,16905
1007
1007
  DIRAC/Resources/Storage/StorageElement.py,sha256=Tt8RoQjAiA0bH4qx7uGZk_dYYUnf5d4D1muFa8Gxq6k,62620
1008
1008
  DIRAC/Resources/Storage/StorageFactory.py,sha256=UlKzMOvhR8SSBpZ4QPp5taeOU_VZTlhv08ggL606Dxo,19435
1009
1009
  DIRAC/Resources/Storage/Utilities.py,sha256=v6PA-Y3ApNObLnsC-POM3f4UJMWcYNWTcqnQ5-T47J4,1048
@@ -1206,7 +1206,7 @@ DIRAC/WorkloadManagementSystem/FutureClient/JobMonitoringClient.py,sha256=3Mjq3h
1206
1206
  DIRAC/WorkloadManagementSystem/FutureClient/JobStateUpdateClient.py,sha256=tb0oz3uewK2laHYu-xwSWGPZ0ejXhfEUf6jV3x-loCM,7033
1207
1207
  DIRAC/WorkloadManagementSystem/FutureClient/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1208
1208
  DIRAC/WorkloadManagementSystem/JobWrapper/JobExecutionCoordinator.py,sha256=Y64YnkrKklOkXnV5wKsgzBONFljVJ0ByFVUMFNkiGAU,2461
1209
- DIRAC/WorkloadManagementSystem/JobWrapper/JobWrapper.py,sha256=2nlcSeQgCbn5yGOytZZx1F07HkvEfwfaJ7w3GnduAq4,74697
1209
+ DIRAC/WorkloadManagementSystem/JobWrapper/JobWrapper.py,sha256=wAxQT5NMJdt94X_YHbSN-uCLgcJW6jC6NhbqRGo-Km8,74716
1210
1210
  DIRAC/WorkloadManagementSystem/JobWrapper/JobWrapperOfflineTemplate.py,sha256=wem5VDN9XiC7szAzdsbgHUxpIOQB2Hj36DIVMoV9px8,2490
1211
1211
  DIRAC/WorkloadManagementSystem/JobWrapper/JobWrapperTemplate.py,sha256=4QgcFPMLRaTagP9e_Vvsla8pFH8HdewklHfS-gyS4-g,3313
1212
1212
  DIRAC/WorkloadManagementSystem/JobWrapper/JobWrapperUtilities.py,sha256=5w_4PMnaHhuexChADDvt1L9Ih1PstdUuYWObnlv9Dto,10072
@@ -1228,7 +1228,7 @@ DIRAC/WorkloadManagementSystem/Service/JobPolicy.py,sha256=o88xR3roe_JRB5F53oxb1
1228
1228
  DIRAC/WorkloadManagementSystem/Service/JobStateUpdateHandler.py,sha256=p-dLY4GI4H8I5joOel5ZUf2JBz-BDoYmbn-6BJ_4FQE,9829
1229
1229
  DIRAC/WorkloadManagementSystem/Service/MatcherHandler.py,sha256=L0sdRdoSh2qa7IA5tGIPAhlJoKLB4JecCTezkaQu1tc,4554
1230
1230
  DIRAC/WorkloadManagementSystem/Service/OptimizationMindHandler.py,sha256=7FbhdRkerS2CM0HnIDSUo_OqRyEnjgol9RR7WfVK5Tc,8592
1231
- DIRAC/WorkloadManagementSystem/Service/PilotManagerHandler.py,sha256=r2H_3xp_TKXVvZM7c1JV3trTzdsJqXi5unyU6Tc8uTg,14649
1231
+ DIRAC/WorkloadManagementSystem/Service/PilotManagerHandler.py,sha256=XYbWGlL-9wmk2FMkdGKZQgqkDjHbFEARem4mRdOaSN4,14846
1232
1232
  DIRAC/WorkloadManagementSystem/Service/SandboxStoreHandler.py,sha256=yFkIxpZ7QiA2gi_0gUo8DKwPBDn-IRgThPolmUvuYvA,20287
1233
1233
  DIRAC/WorkloadManagementSystem/Service/TornadoJobManagerHandler.py,sha256=8R4St1HhJh-NFbyKWkZz4L9a7ZhwspJDymFGzS0qyV8,528
1234
1234
  DIRAC/WorkloadManagementSystem/Service/TornadoJobMonitoringHandler.py,sha256=53m2Mi_yqo-bkouXKUHidI81KwT_Rk2IuWEDw4Z7Wm0,463
@@ -1297,9 +1297,9 @@ DIRAC/tests/Workflow/Integration/exe-script.py,sha256=B_slYdTocEzqfQLRhwuPiLyYUn
1297
1297
  DIRAC/tests/Workflow/Integration/helloWorld.py,sha256=tBgEHH3ZF7ZiTS57gtmm3DW-Qxgm_57HWHpM-Y8XSws,205
1298
1298
  DIRAC/tests/Workflow/Regression/helloWorld.py,sha256=69eCgFuVSYo-mK3Dj2dw1c6g86sF5FksKCf8V2aGVoM,509
1299
1299
  DIRAC/tests/Workflow/Regression/helloWorld.xml,sha256=xwydIcFTAHIX-YPfQfyxuQ7hzvIO3IhR3UAF7ORgkGg,5310
1300
- dirac-9.0.0a60.dist-info/licenses/LICENSE,sha256=uyr4oV6jmjUeepXZPPjkJRwa5q5MrI7jqJz5sVXNblQ,32452
1301
- dirac-9.0.0a60.dist-info/METADATA,sha256=SNxluakCtKWSHyDq1jPhCq0Y5P8JHLbwbZC31TwpvSo,9957
1302
- dirac-9.0.0a60.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
1303
- dirac-9.0.0a60.dist-info/entry_points.txt,sha256=rTtMPd5AGCg3zNaKE60PuEVFkSRzQlRG-DC_v9FCylM,16824
1304
- dirac-9.0.0a60.dist-info/top_level.txt,sha256=RISrnN9kb_mPqmVu8_o4jF-DSX8-h6AcgfkO9cgfkHA,6
1305
- dirac-9.0.0a60.dist-info/RECORD,,
1300
+ dirac-9.0.0a61.dist-info/licenses/LICENSE,sha256=uyr4oV6jmjUeepXZPPjkJRwa5q5MrI7jqJz5sVXNblQ,32452
1301
+ dirac-9.0.0a61.dist-info/METADATA,sha256=H6Ckm6zVriQeSOPVbrewj7nnpfppc3J3uNdGO819_i8,9957
1302
+ dirac-9.0.0a61.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
1303
+ dirac-9.0.0a61.dist-info/entry_points.txt,sha256=rTtMPd5AGCg3zNaKE60PuEVFkSRzQlRG-DC_v9FCylM,16824
1304
+ dirac-9.0.0a61.dist-info/top_level.txt,sha256=RISrnN9kb_mPqmVu8_o4jF-DSX8-h6AcgfkO9cgfkHA,6
1305
+ dirac-9.0.0a61.dist-info/RECORD,,