scipion-pyworkflow 3.7.0__py3-none-any.whl → 3.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. pyworkflow/config.py +26 -1
  2. pyworkflow/constants.py +7 -1
  3. pyworkflow/gui/canvas.py +6 -6
  4. pyworkflow/gui/dialog.py +3 -2
  5. pyworkflow/gui/form.py +12 -2
  6. pyworkflow/gui/graph.py +6 -6
  7. pyworkflow/gui/graph_layout.py +2 -2
  8. pyworkflow/gui/project/base.py +7 -7
  9. pyworkflow/gui/project/project.py +36 -28
  10. pyworkflow/gui/project/steps.py +8 -3
  11. pyworkflow/gui/project/viewdata.py +1 -1
  12. pyworkflow/gui/project/viewprotocols.py +9 -9
  13. pyworkflow/gui/project/viewprotocols_extra.py +3 -3
  14. pyworkflow/gui/widgets.py +2 -2
  15. pyworkflow/mapper/sqlite.py +5 -5
  16. pyworkflow/object.py +12 -4
  17. pyworkflow/plugin.py +8 -3
  18. pyworkflow/project/config.py +4 -1
  19. pyworkflow/project/manager.py +4 -3
  20. pyworkflow/project/project.py +36 -20
  21. pyworkflow/project/scripts/create.py +14 -4
  22. pyworkflow/project/scripts/schedule.py +1 -1
  23. pyworkflow/protocol/constants.py +1 -0
  24. pyworkflow/protocol/executor.py +133 -19
  25. pyworkflow/protocol/params.py +5 -8
  26. pyworkflow/protocol/protocol.py +57 -37
  27. pyworkflow/template.py +1 -1
  28. pyworkflow/utils/graph.py +24 -51
  29. pyworkflow/utils/properties.py +16 -12
  30. pyworkflowtests/protocols.py +3 -3
  31. pyworkflowtests/tests/test_protocol_execution.py +63 -0
  32. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/METADATA +12 -12
  33. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/RECORD +38 -38
  34. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/WHEEL +1 -1
  35. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/LICENSE.txt +0 -0
  36. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/dependency_links.txt +0 -0
  37. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/entry_points.txt +0 -0
  38. {scipion_pyworkflow-3.7.0.dist-info → scipion_pyworkflow-3.9.0.dist-info}/top_level.txt +0 -0
@@ -40,7 +40,7 @@ class ProjectSettings(pwobj.Object):
40
40
  COLOR_MODE_LABELS = 1
41
41
  COLOR_MODE_AGE = 2
42
42
  COLOR_MODE_SIZE = 3
43
- COLOR_MODES = (COLOR_MODE_STATUS, COLOR_MODE_LABELS, COLOR_MODE_AGE, COLOR_MODE_SIZE)
43
+ COLOR_MODES = (COLOR_MODE_STATUS, COLOR_MODE_LABELS, COLOR_MODE_AGE) # This has poor performance many cases, COLOR_MODE_SIZE)
44
44
 
45
45
  def __init__(self, confs={}, **kwargs):
46
46
  super().__init__(**kwargs)
@@ -112,6 +112,9 @@ class ProjectSettings(pwobj.Object):
112
112
  def setColorMode(self, colorMode):
113
113
  """ Set the color mode to use when drawing the graph.
114
114
  """
115
+ # Skip LABELS color mode to avoid double iteration
116
+ if colorMode == self.COLOR_MODE_LABELS:
117
+ colorMode+=1
115
118
  self.colorMode.set(colorMode)
116
119
 
117
120
  def statusColorMode(self):
@@ -97,9 +97,9 @@ class Manager(object):
97
97
  return projList
98
98
 
99
99
  def createProject(self, projectName, runsView=1,
100
- hostsConf=None, protocolsConf=None, location=None):
100
+ hostsConf=None, protocolsConf=None, location=None, comment=None):
101
101
  """Create a new project.
102
- confs dict can contains customs .conf files
102
+ confs dict can contain customs .conf files
103
103
  for: menus, protocols, or hosts
104
104
  """
105
105
  # Clean project name from undesired characters
@@ -118,7 +118,8 @@ class Manager(object):
118
118
  project = Project(pw.Config.getDomain(), projectPath)
119
119
  project.create(runsView=runsView,
120
120
  hostsConf=hostsConf,
121
- protocolsConf=protocolsConf)
121
+ protocolsConf=protocolsConf,
122
+ comment=comment)
122
123
  # If location is not the default one create a symlink on self.PROJECTS directory
123
124
  if projectPath != self.getProjectPath(projectName):
124
125
  # JMRT: Let's create the link to the absolute path, since relative
@@ -106,9 +106,11 @@ class Project(object):
106
106
  self.settings:config.ProjectSettings = None
107
107
  # Host configuration
108
108
  self._hosts = None
109
+
109
110
  # Creation time should be stored in project.sqlite when the project
110
111
  # is created and then loaded with other properties from the database
111
112
  self._creationTime = None
113
+
112
114
  # Time stamp with the last run has been updated
113
115
  self._lastRunTime = None
114
116
 
@@ -145,7 +147,16 @@ class Project(object):
145
147
  """ Return the time when the project was created. """
146
148
  # In project.create method, the first object inserted
147
149
  # in the mapper should be the creation time
148
- return self._creationTime
150
+ return self._creationTime.datetime()
151
+
152
+
153
+ def getComment(self):
154
+ """ Returns the project comment. Stored as CreationTime comment."""
155
+ return self._creationTime.getObjComment()
156
+
157
+ def setComment(self, newComment):
158
+ """ Sets the project comment """
159
+ self._creationTime.setObjComment(newComment)
149
160
 
150
161
  def getSettingsCreationTime(self):
151
162
  return self.settings.getCreationTime()
@@ -154,7 +165,7 @@ class Project(object):
154
165
  """ Returns the time elapsed from the creation to the last
155
166
  execution time. """
156
167
  if self._creationTime and self._lastRunTime:
157
- creationTs = self._creationTime
168
+ creationTs = self.getCreationTime()
158
169
  lastRunTs = self._lastRunTime.datetime()
159
170
  return lastRunTs - creationTs
160
171
  return None
@@ -293,12 +304,16 @@ class Project(object):
293
304
  creationTime = self.mapper.selectBy(name=PROJECT_CREATION_TIME)
294
305
 
295
306
  if creationTime: # CreationTime was found in project.sqlite
296
- self._creationTime = creationTime[0].datetime()
307
+ ctStr = creationTime[0] # This is our String type instance
308
+
309
+ # We store it in mem as dateime
310
+ self._creationTime = ctStr
311
+
297
312
  else:
298
313
  # We should read the creation time from settings.sqlite and
299
314
  # update the CreationTime in the project.sqlite
300
- self._creationTime = self.getSettingsCreationTime()
301
- self._storeCreationTime(self._creationTime)
315
+ self._creationTime = pwobj.String(self.getSettingsCreationTime())
316
+ self._storeCreationTime()
302
317
 
303
318
  # ---- Helper functions to load different pieces of a project
304
319
  def _loadDb(self, dbPath):
@@ -360,7 +375,7 @@ class Project(object):
360
375
  return self.settings.getProtocolView()
361
376
 
362
377
  def create(self, runsView=1, readOnly=False, hostsConf=None,
363
- protocolsConf=None):
378
+ protocolsConf=None, comment=None):
364
379
  """Prepare all required paths and files to create a new project.
365
380
 
366
381
  :param runsView: default view to associate the project with
@@ -376,7 +391,9 @@ class Project(object):
376
391
  # Create db through the mapper
377
392
  self.mapper = self.createMapper(self.dbPath)
378
393
  # Store creation time
379
- self._storeCreationTime(dt.datetime.now())
394
+ self._creationTime = pwobj.String(dt.datetime.now())
395
+ self.setComment(comment)
396
+ self._storeCreationTime()
380
397
  # Load settings from .conf files and write .sqlite
381
398
  self.settings = self.createSettings(runsView=runsView,
382
399
  readOnly=readOnly)
@@ -386,12 +403,11 @@ class Project(object):
386
403
 
387
404
  self._loadHosts(hostsConf)
388
405
 
389
- def _storeCreationTime(self, creationTime):
406
+ def _storeCreationTime(self, new=True):
390
407
  """ Store the creation time in the project db. """
391
408
  # Store creation time
392
- creation = pwobj.String(objName=PROJECT_CREATION_TIME)
393
- creation.set(creationTime)
394
- self.mapper.insert(creation)
409
+ self._creationTime.setName(PROJECT_CREATION_TIME)
410
+ self.mapper.store(self._creationTime)
395
411
  self.mapper.commit()
396
412
 
397
413
  def _cleanData(self):
@@ -820,7 +836,7 @@ class Project(object):
820
836
  for prot in protocols:
821
837
  node = runsGraph.getNode(prot.strId())
822
838
  if node:
823
- childs = [node.run for node in node.getChilds() if
839
+ childs = [node.run for node in node.getChildren() if
824
840
  self.__validDependency(prot, node.run, protocols)]
825
841
  if childs:
826
842
  deps = [' ' + c.getRunName() for c in childs]
@@ -839,7 +855,7 @@ class Project(object):
839
855
  visitedNodes[int(node.getName())] = node
840
856
 
841
857
  def getDescendents(rootNode):
842
- for child in rootNode.getChilds():
858
+ for child in rootNode.getChildren():
843
859
  if int(child.getName()) not in visitedNodes:
844
860
  visitedNodes[int(child.getName())] = child
845
861
  getDescendents(child)
@@ -998,7 +1014,7 @@ class Project(object):
998
1014
  affectedProtocolsActive[protocol.getObjId()] = protocol
999
1015
 
1000
1016
  node = runGraph.getNode(protocol.strId())
1001
- dependencies = [node.run for node in node.getChilds()]
1017
+ dependencies = [node.run for node in node.getChildren()]
1002
1018
  for dep in dependencies:
1003
1019
  if not dep.getObjId() in auxProtList:
1004
1020
  auxProtList.append([dep.getObjId(), level])
@@ -1034,7 +1050,7 @@ class Project(object):
1034
1050
  node = self.getRunsGraph().getNode(protocol.strId())
1035
1051
  deps = []
1036
1052
 
1037
- for node in node.getChilds():
1053
+ for node in node.getChildren():
1038
1054
  for _, inputObj in node.run.iterInputAttributes():
1039
1055
  value = inputObj.get()
1040
1056
  if (value is not None and
@@ -1187,7 +1203,7 @@ class Project(object):
1187
1203
  node = g.getNode(prot.strId())
1188
1204
  newProt = newDict[prot.getObjId()]
1189
1205
 
1190
- for childNode in node.getChilds():
1206
+ for childNode in node.getChildren():
1191
1207
  newChildProt = newDict.get(childNode.run.getObjId(), None)
1192
1208
 
1193
1209
  if newChildProt:
@@ -1246,7 +1262,7 @@ class Project(object):
1246
1262
  protId = prot.getObjId()
1247
1263
  node = g.getNode(prot.strId())
1248
1264
 
1249
- for childNode in node.getChilds():
1265
+ for childNode in node.getChildren():
1250
1266
  childId = childNode.run.getObjId()
1251
1267
  childProt = childNode.run
1252
1268
  if childId in newDict:
@@ -1705,7 +1721,7 @@ class Project(object):
1705
1721
  parentNode.addChild(node)
1706
1722
  if os.environ.get('CHECK_CYCLIC_REDUNDANCY') and self._checkCyclicRedundancy(parentNode, node):
1707
1723
  conflictiveNodes = set()
1708
- for child in node.getChilds():
1724
+ for child in node.getChildren():
1709
1725
  if node in child._parents:
1710
1726
  child._parents.remove(node)
1711
1727
  conflictiveNodes.add(child)
@@ -1714,7 +1730,7 @@ class Project(object):
1714
1730
  child.getLabel() + '(' + child.getName() + ')'))
1715
1731
 
1716
1732
  for conflictNode in conflictiveNodes:
1717
- node._childs.remove(conflictNode)
1733
+ node._children.remove(conflictNode)
1718
1734
 
1719
1735
  return False
1720
1736
  return True
@@ -1747,7 +1763,7 @@ class Project(object):
1747
1763
  def depthFirstSearch(node):
1748
1764
  visitedNodes.add(node)
1749
1765
  recursionStack.add(node)
1750
- for child in node.getChilds():
1766
+ for child in node.getChildren():
1751
1767
  if child not in visitedNodes:
1752
1768
  if depthFirstSearch(child):
1753
1769
  return True
@@ -31,24 +31,28 @@ import os
31
31
  from pyworkflow.project import Manager
32
32
  import pyworkflow.utils as pwutils
33
33
 
34
+ EMPTY_ARG = "-"
35
+
34
36
 
35
37
  def usage(error):
38
+
36
39
  print("""
37
40
  ERROR: %s
38
41
 
39
- Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION]
42
+ Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION] [COMMENT]
40
43
  NAME: project name
41
44
  WORKFLOW: path to a Scipion json workflow
42
45
  LOCATION: where to create it, defaults to scipion default location
46
+ COMMENT: project comment, location is mandatory in this case... for a NULL LOCATION pass %s
43
47
 
44
48
  This script will create a project project, optionally based on a workflow file
45
- """ % error)
49
+ """ % (error, EMPTY_ARG))
46
50
  sys.exit(1)
47
51
 
48
52
 
49
53
  n = len(sys.argv)
50
54
 
51
- if n < 2 or n > 4:
55
+ if n < 2 or n > 5:
52
56
  usage("Incorrect number of input parameters")
53
57
 
54
58
  projName = sys.argv[1]
@@ -56,6 +60,12 @@ projName = sys.argv[1]
56
60
  jsonFile = None if n < 3 else os.path.abspath(sys.argv[2])
57
61
  location = None if n < 4 else sys.argv[3]
58
62
 
63
+ # Location with - is None
64
+ if location == EMPTY_ARG:
65
+ location = None
66
+
67
+ comment = None if n < 5 else sys.argv[4]
68
+
59
69
  # This might not be working anymore for python3.
60
70
  # I'm getting invalid ELF header triggered by matplotlib -->from . import _tkagg
61
71
  # path = pw.join('gui', 'no-tkinter')
@@ -71,7 +81,7 @@ if manager.hasProject(projName):
71
81
  if jsonFile is not None and not os.path.exists(jsonFile):
72
82
  usage("Nonexistent json file: %s" % pwutils.red(jsonFile))
73
83
 
74
- project = manager.createProject(projName, location=location)
84
+ project = manager.createProject(projName, location=location, comment=comment)
75
85
 
76
86
  if jsonFile is not None:
77
87
  protDict = project.loadProtocols(jsonFile)
@@ -92,7 +92,7 @@ roots = runGraph.getRootNodes()
92
92
  # and the graph is lineal
93
93
 
94
94
  for root in roots:
95
- for child in root.getChilds():
95
+ for child in root.getChildren():
96
96
  workflow, _ = project._getSubworkflow(child.run)
97
97
  for prot, level in workflow.values():
98
98
  protLabelName = prot.getObjLabel()
@@ -75,6 +75,7 @@ LEVEL_CHOICES = ('Normal', 'Advanced')
75
75
  # Param names for GPU processing
76
76
  USE_GPU = 'useGpu'
77
77
  GPU_LIST = 'gpuList'
78
+ VOID_GPU = 99
78
79
 
79
80
  # Job management
80
81
  UNKNOWN_JOBID = -1
@@ -69,7 +69,7 @@ class StepExecutor:
69
69
  numberOfMpi, numberOfThreads,
70
70
  self.hostConfig,
71
71
  env=env, cwd=cwd, gpuList=self.getGpuList(), executable=executable)
72
-
72
+
73
73
  def _getRunnable(self, steps, n=1):
74
74
  """ Return the n steps that are 'new' and all its
75
75
  dependencies have been finished, or None if none ready.
@@ -79,11 +79,16 @@ class StepExecutor:
79
79
  for s in steps:
80
80
  if (s.getStatus() == cts.STATUS_NEW and
81
81
  all(steps[i-1].isFinished() for i in s._prerequisites)):
82
- rs.append(s)
83
- if len(rs) == n:
84
- break
82
+
83
+ if self._isStepRunnable(s):
84
+ rs.append(s)
85
+ if len(rs) == n:
86
+ break
85
87
  return rs
86
-
88
+ def _isStepRunnable(self, step):
89
+ """ Should be implemented by inherited classes to test extra conditions """
90
+ return True
91
+
87
92
  def _arePending(self, steps):
88
93
  """ Return True if there are pending steps (either running or waiting)
89
94
  that can be done and thus enable other steps to be executed.
@@ -169,28 +174,133 @@ class ThreadStepExecutor(StepExecutor):
169
174
  # all the threads
170
175
  self.gpuDict = {}
171
176
 
177
+ self._assignGPUperNode()
178
+
179
+ def _assignGPUperNode(self):
180
+ # If we have GPUs
172
181
  if self.gpuList:
173
- nodes = range(nThreads)
182
+
183
+ nThreads = self.numberOfProcs
184
+
185
+ # Nodes: each concurrent steps
186
+ nodes = range(1, nThreads+1)
187
+
188
+ # Number of GPUs
174
189
  nGpu = len(self.gpuList)
175
190
 
191
+ # If more GPUs than threads
176
192
  if nGpu > nThreads:
177
- chunk = int(nGpu / nThreads)
178
- for i, node in enumerate(nodes):
179
- self.gpuDict[node] = list(self.gpuList[i*chunk:(i+1)*chunk])
193
+
194
+ # Get the ratio: 2 GPUs per thread? 3 GPUs per thread?
195
+ # 3 GPU and 2 threads is rounded to 1 (flooring)
196
+ step = int(nGpu / nThreads)
197
+ spare = nGpu % nThreads
198
+ fromPos = 0
199
+ # For each node(concurrent thread)
200
+ for node in nodes:
201
+ # Store the GPUS per thread:
202
+ # GPUs: 0 1 2
203
+ # Threads 2 (step 1)
204
+ # Node 0 : GPU 0 1
205
+ # Node 1 : GPU 2
206
+
207
+ extraGpu = 1 if spare>0 else 0
208
+ toPos = fromPos + step +extraGpu
209
+ gpusForNode = list(self.gpuList[fromPos:toPos])
210
+
211
+ newGpusForNode = self.cleanVoidGPUs(gpusForNode)
212
+ if len(newGpusForNode) == 0:
213
+ logger.info("Gpu slot cancelled: all were null Gpus -> %s" % gpusForNode)
214
+ else:
215
+ logger.info("GPUs %s assigned to node %s" % (newGpusForNode, node))
216
+ self.gpuDict[-node] = newGpusForNode
217
+
218
+ fromPos = toPos
219
+ spare-=1
220
+
180
221
  else:
181
222
  # Expand gpuList repeating until reach nThreads items
182
223
  if nThreads > nGpu:
183
- newList = self.gpuList * (int(nThreads/nGpu)+1)
184
- self.gpuList = newList[:nThreads]
185
-
186
- for node, gpu in zip(nodes, self.gpuList):
187
- self.gpuDict[node] = [gpu]
224
+ logger.warning("GPUs are no longer extended. If you want all GPUs to match threads repeat as many "
225
+ "GPUs as threads.")
226
+ # newList = self.gpuList * (int(nThreads / nGpu) + 1)
227
+ # self.gpuList = newList[:nThreads]
228
+
229
+ for index, gpu in enumerate(self.gpuList):
230
+
231
+ if gpu == cts.VOID_GPU:
232
+ logger.info("Void GPU (%s) found in the list. Skipping the slot." % cts.VOID_GPU)
233
+ else:
234
+ logger.info("GPU slot for gpu %s." % gpu)
235
+ # Any negative number in the key means a free gpu slot. can't be 0!
236
+ self.gpuDict[-index-1] = [gpu]
237
+
238
+ def cleanVoidGPUs(self, gpuList):
239
+ newGPUList=[]
240
+ for gpuid in gpuList:
241
+ if gpuid == cts.VOID_GPU:
242
+ logger.info("Void GPU detected in %s" % gpuList)
243
+ else:
244
+ newGPUList.append(gpuid)
245
+ return newGPUList
188
246
 
189
247
  def getGpuList(self):
190
248
  """ Return the GPU list assigned to current thread
191
249
  or empty list if not using GPUs. """
192
- return self.gpuDict.get(threading.current_thread().thId, [])
193
-
250
+
251
+ # If the node id has assigned gpus?
252
+ nodeId = threading.current_thread().thId
253
+ if nodeId in self.gpuDict:
254
+ gpus = self.gpuDict.get(nodeId)
255
+ logger.info("Reusing GPUs (%s) slot for %s" % (gpus, nodeId))
256
+ return gpus
257
+ else:
258
+
259
+ gpus = self.getFreeGpuSlot(nodeId)
260
+ if gpus is None:
261
+ logger.warning("Step on node %s is requesting GPUs but there isn't any available. Review configuration of threads/GPUs. Returning and empty list." % nodeId)
262
+ return []
263
+ else:
264
+ return gpus
265
+ def getFreeGpuSlot(self, nodeId=None):
266
+ """ Returns a free gpu slot available or None. If node is passed it also reserves it for that node
267
+
268
+ :param node: node to make the reserve of Gpus
269
+ """
270
+ for node in self.gpuDict.keys():
271
+ # This is a free node. Book it
272
+ if node < 0:
273
+ gpus = self.gpuDict[node]
274
+
275
+ if nodeId is not None:
276
+ self.gpuDict.pop(node)
277
+ self.gpuDict[nodeId] = gpus
278
+ logger.info("GPUs %s assigned to thread %s" % (gpus, nodeId))
279
+ else:
280
+ logger.info("Free gpu slot found at %s" % node)
281
+ return gpus
282
+
283
+ return None
284
+ def freeGpusSlot(self, node):
285
+ gpus = self.gpuDict.get(node, None)
286
+
287
+ # Some nodes/threads do not use gpus so may not be booked and not in the dictionary
288
+ if gpus is not None:
289
+ self.gpuDict.pop(node)
290
+ self.gpuDict[-node-1] = gpus
291
+ logger.info("GPUs %s freed from thread %s" % (gpus, node))
292
+ else:
293
+ logger.debug("node %s not found in GPU slots" % node)
294
+
295
+ def _isStepRunnable(self, step):
296
+ """ Overwrite this method to check GPUs availability"""
297
+
298
+ if self.gpuList and step.needsGPU() and self.getFreeGpuSlot() is None:
299
+ logger.info("Can't run step %s. Needs gpus and there are no free gpu slots" % step)
300
+ return False
301
+
302
+ return True
303
+
194
304
  def runSteps(self, steps,
195
305
  stepStartedCallback,
196
306
  stepFinishedCallback,
@@ -213,7 +323,9 @@ class ThreadStepExecutor(StepExecutor):
213
323
  sharedLock = threading.Lock()
214
324
 
215
325
  runningSteps = {} # currently running step in each node ({node: step})
216
- freeNodes = list(range(self.numberOfProcs)) # available nodes to send jobs
326
+ freeNodes = list(range(1, self.numberOfProcs+1)) # available nodes to send jobs
327
+ logger.info("Execution threads: %s" % freeNodes)
328
+ logger.info("Running steps using %s threads. 1 thread is used for this main proccess." % self.numberOfProcs)
217
329
 
218
330
  while True:
219
331
  # See which of the runningSteps are not really running anymore.
@@ -225,6 +337,7 @@ class ThreadStepExecutor(StepExecutor):
225
337
  for node in nodesFinished:
226
338
  step = runningSteps.pop(node) # remove entry from runningSteps
227
339
  freeNodes.append(node) # the node is available now
340
+ self.freeGpusSlot(node)
228
341
  # Notify steps termination and check if we should continue
229
342
  doContinue = stepFinishedCallback(step)
230
343
  if not doContinue:
@@ -245,8 +358,9 @@ class ThreadStepExecutor(StepExecutor):
245
358
  anyLaunched = True
246
359
  step.setRunning()
247
360
  stepStartedCallback(step)
248
- node = freeNodes.pop() # take an available node
361
+ node = freeNodes.pop(0) # take an available node
249
362
  runningSteps[node] = step
363
+ logger.debug("Running step %s on node %s" % (step, node))
250
364
  t = StepThread(node, step, sharedLock)
251
365
  # won't keep process up if main thread ends
252
366
  t.daemon = True
@@ -255,7 +369,7 @@ class ThreadStepExecutor(StepExecutor):
255
369
 
256
370
  if not anyLaunched:
257
371
  if anyPending: # nothing running
258
- time.sleep(0.5)
372
+ time.sleep(3)
259
373
  else:
260
374
  break # yeah, we are done, either failed or finished :)
261
375
 
@@ -345,18 +345,15 @@ class Form(object):
345
345
  self.addParam('hostName', StringParam, default="localhost",
346
346
  label='Execution host',
347
347
  help='Select in which of the available do you want to launch this protocol.')
348
+
349
+ # NOTE: help messages for these parameters is defined at HELP_MPI_THREADS and used in form.py.
350
+
348
351
  if threads > 0:
349
352
  self.addParam('numberOfThreads', IntParam, default=threads,
350
- label='Threads',
351
- help='This option provides shared-memory parallelization on multi-core machines.'
352
- 'It does not require any additional software.')
353
+ label='Threads')
353
354
  if mpi > 0:
354
355
  self.addParam('numberOfMpi', IntParam, default=mpi,
355
- label='MPI processes',
356
- help='This option provides the number of independent processes spawned'
357
- 'in parallel by <mpirun> command in a cluster, usually through'
358
- 'a queue system. This will require that you have compile this software '
359
- 'with <mpi> support.')
356
+ label='MPI processes')
360
357
  if jobsize > 0:
361
358
  self.addParam('mpiJobSize', IntParam, default=jobsize,
362
359
  label='MPI job size', condition="numberOfMpi>1",