scipion-pyworkflow 3.8.0__py3-none-any.whl → 3.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyworkflow/config.py +8 -2
- pyworkflow/constants.py +6 -1
- pyworkflow/gui/canvas.py +6 -6
- pyworkflow/gui/dialog.py +3 -2
- pyworkflow/gui/form.py +12 -2
- pyworkflow/gui/graph.py +6 -6
- pyworkflow/gui/graph_layout.py +2 -2
- pyworkflow/gui/project/base.py +7 -7
- pyworkflow/gui/project/project.py +36 -28
- pyworkflow/gui/project/steps.py +8 -3
- pyworkflow/gui/project/viewdata.py +1 -1
- pyworkflow/gui/project/viewprotocols.py +9 -9
- pyworkflow/gui/project/viewprotocols_extra.py +3 -3
- pyworkflow/gui/widgets.py +2 -2
- pyworkflow/mapper/sqlite.py +5 -5
- pyworkflow/object.py +1 -1
- pyworkflow/plugin.py +1 -0
- pyworkflow/project/config.py +4 -1
- pyworkflow/project/manager.py +4 -3
- pyworkflow/project/project.py +37 -21
- pyworkflow/project/scripts/create.py +14 -4
- pyworkflow/project/scripts/schedule.py +1 -1
- pyworkflow/protocol/constants.py +1 -0
- pyworkflow/protocol/executor.py +133 -19
- pyworkflow/protocol/params.py +5 -8
- pyworkflow/protocol/protocol.py +36 -36
- pyworkflow/template.py +1 -1
- pyworkflow/utils/graph.py +24 -51
- pyworkflow/utils/properties.py +16 -12
- pyworkflowtests/protocols.py +3 -3
- pyworkflowtests/tests/test_protocol_execution.py +63 -0
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/METADATA +1 -1
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/RECORD +38 -38
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/WHEEL +1 -1
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/LICENSE.txt +0 -0
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/dependency_links.txt +0 -0
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/entry_points.txt +0 -0
- {scipion_pyworkflow-3.8.0.dist-info → scipion_pyworkflow-3.9.1.dist-info}/top_level.txt +0 -0
pyworkflow/project/project.py
CHANGED
@@ -106,9 +106,11 @@ class Project(object):
|
|
106
106
|
self.settings:config.ProjectSettings = None
|
107
107
|
# Host configuration
|
108
108
|
self._hosts = None
|
109
|
+
|
109
110
|
# Creation time should be stored in project.sqlite when the project
|
110
111
|
# is created and then loaded with other properties from the database
|
111
112
|
self._creationTime = None
|
113
|
+
|
112
114
|
# Time stamp with the last run has been updated
|
113
115
|
self._lastRunTime = None
|
114
116
|
|
@@ -145,7 +147,16 @@ class Project(object):
|
|
145
147
|
""" Return the time when the project was created. """
|
146
148
|
# In project.create method, the first object inserted
|
147
149
|
# in the mapper should be the creation time
|
148
|
-
return self._creationTime
|
150
|
+
return self._creationTime.datetime()
|
151
|
+
|
152
|
+
|
153
|
+
def getComment(self):
|
154
|
+
""" Returns the project comment. Stored as CreationTime comment."""
|
155
|
+
return self._creationTime.getObjComment()
|
156
|
+
|
157
|
+
def setComment(self, newComment):
|
158
|
+
""" Sets the project comment """
|
159
|
+
self._creationTime.setObjComment(newComment)
|
149
160
|
|
150
161
|
def getSettingsCreationTime(self):
|
151
162
|
return self.settings.getCreationTime()
|
@@ -154,7 +165,7 @@ class Project(object):
|
|
154
165
|
""" Returns the time elapsed from the creation to the last
|
155
166
|
execution time. """
|
156
167
|
if self._creationTime and self._lastRunTime:
|
157
|
-
creationTs = self.
|
168
|
+
creationTs = self.getCreationTime()
|
158
169
|
lastRunTs = self._lastRunTime.datetime()
|
159
170
|
return lastRunTs - creationTs
|
160
171
|
return None
|
@@ -293,12 +304,16 @@ class Project(object):
|
|
293
304
|
creationTime = self.mapper.selectBy(name=PROJECT_CREATION_TIME)
|
294
305
|
|
295
306
|
if creationTime: # CreationTime was found in project.sqlite
|
296
|
-
|
307
|
+
ctStr = creationTime[0] # This is our String type instance
|
308
|
+
|
309
|
+
# We store it in mem as dateime
|
310
|
+
self._creationTime = ctStr
|
311
|
+
|
297
312
|
else:
|
298
313
|
# We should read the creation time from settings.sqlite and
|
299
314
|
# update the CreationTime in the project.sqlite
|
300
|
-
self._creationTime = self.getSettingsCreationTime()
|
301
|
-
self._storeCreationTime(
|
315
|
+
self._creationTime = pwobj.String(self.getSettingsCreationTime())
|
316
|
+
self._storeCreationTime()
|
302
317
|
|
303
318
|
# ---- Helper functions to load different pieces of a project
|
304
319
|
def _loadDb(self, dbPath):
|
@@ -360,7 +375,7 @@ class Project(object):
|
|
360
375
|
return self.settings.getProtocolView()
|
361
376
|
|
362
377
|
def create(self, runsView=1, readOnly=False, hostsConf=None,
|
363
|
-
protocolsConf=None):
|
378
|
+
protocolsConf=None, comment=None):
|
364
379
|
"""Prepare all required paths and files to create a new project.
|
365
380
|
|
366
381
|
:param runsView: default view to associate the project with
|
@@ -376,7 +391,9 @@ class Project(object):
|
|
376
391
|
# Create db through the mapper
|
377
392
|
self.mapper = self.createMapper(self.dbPath)
|
378
393
|
# Store creation time
|
379
|
-
self.
|
394
|
+
self._creationTime = pwobj.String(dt.datetime.now())
|
395
|
+
self.setComment(comment)
|
396
|
+
self._storeCreationTime()
|
380
397
|
# Load settings from .conf files and write .sqlite
|
381
398
|
self.settings = self.createSettings(runsView=runsView,
|
382
399
|
readOnly=readOnly)
|
@@ -386,12 +403,11 @@ class Project(object):
|
|
386
403
|
|
387
404
|
self._loadHosts(hostsConf)
|
388
405
|
|
389
|
-
def _storeCreationTime(self,
|
406
|
+
def _storeCreationTime(self, new=True):
|
390
407
|
""" Store the creation time in the project db. """
|
391
408
|
# Store creation time
|
392
|
-
|
393
|
-
|
394
|
-
self.mapper.insert(creation)
|
409
|
+
self._creationTime.setName(PROJECT_CREATION_TIME)
|
410
|
+
self.mapper.store(self._creationTime)
|
395
411
|
self.mapper.commit()
|
396
412
|
|
397
413
|
def _cleanData(self):
|
@@ -820,7 +836,7 @@ class Project(object):
|
|
820
836
|
for prot in protocols:
|
821
837
|
node = runsGraph.getNode(prot.strId())
|
822
838
|
if node:
|
823
|
-
childs = [node.run for node in node.
|
839
|
+
childs = [node.run for node in node.getChildren() if
|
824
840
|
self.__validDependency(prot, node.run, protocols)]
|
825
841
|
if childs:
|
826
842
|
deps = [' ' + c.getRunName() for c in childs]
|
@@ -839,7 +855,7 @@ class Project(object):
|
|
839
855
|
visitedNodes[int(node.getName())] = node
|
840
856
|
|
841
857
|
def getDescendents(rootNode):
|
842
|
-
for child in rootNode.
|
858
|
+
for child in rootNode.getChildren():
|
843
859
|
if int(child.getName()) not in visitedNodes:
|
844
860
|
visitedNodes[int(child.getName())] = child
|
845
861
|
getDescendents(child)
|
@@ -998,7 +1014,7 @@ class Project(object):
|
|
998
1014
|
affectedProtocolsActive[protocol.getObjId()] = protocol
|
999
1015
|
|
1000
1016
|
node = runGraph.getNode(protocol.strId())
|
1001
|
-
dependencies = [node.run for node in node.
|
1017
|
+
dependencies = [node.run for node in node.getChildren()]
|
1002
1018
|
for dep in dependencies:
|
1003
1019
|
if not dep.getObjId() in auxProtList:
|
1004
1020
|
auxProtList.append([dep.getObjId(), level])
|
@@ -1034,7 +1050,7 @@ class Project(object):
|
|
1034
1050
|
node = self.getRunsGraph().getNode(protocol.strId())
|
1035
1051
|
deps = []
|
1036
1052
|
|
1037
|
-
for node in node.
|
1053
|
+
for node in node.getChildren():
|
1038
1054
|
for _, inputObj in node.run.iterInputAttributes():
|
1039
1055
|
value = inputObj.get()
|
1040
1056
|
if (value is not None and
|
@@ -1187,7 +1203,7 @@ class Project(object):
|
|
1187
1203
|
node = g.getNode(prot.strId())
|
1188
1204
|
newProt = newDict[prot.getObjId()]
|
1189
1205
|
|
1190
|
-
for childNode in node.
|
1206
|
+
for childNode in node.getChildren():
|
1191
1207
|
newChildProt = newDict.get(childNode.run.getObjId(), None)
|
1192
1208
|
|
1193
1209
|
if newChildProt:
|
@@ -1246,7 +1262,7 @@ class Project(object):
|
|
1246
1262
|
protId = prot.getObjId()
|
1247
1263
|
node = g.getNode(prot.strId())
|
1248
1264
|
|
1249
|
-
for childNode in node.
|
1265
|
+
for childNode in node.getChildren():
|
1250
1266
|
childId = childNode.run.getObjId()
|
1251
1267
|
childProt = childNode.run
|
1252
1268
|
if childId in newDict:
|
@@ -1705,7 +1721,7 @@ class Project(object):
|
|
1705
1721
|
parentNode.addChild(node)
|
1706
1722
|
if os.environ.get('CHECK_CYCLIC_REDUNDANCY') and self._checkCyclicRedundancy(parentNode, node):
|
1707
1723
|
conflictiveNodes = set()
|
1708
|
-
for child in node.
|
1724
|
+
for child in node.getChildren():
|
1709
1725
|
if node in child._parents:
|
1710
1726
|
child._parents.remove(node)
|
1711
1727
|
conflictiveNodes.add(child)
|
@@ -1714,7 +1730,7 @@ class Project(object):
|
|
1714
1730
|
child.getLabel() + '(' + child.getName() + ')'))
|
1715
1731
|
|
1716
1732
|
for conflictNode in conflictiveNodes:
|
1717
|
-
node.
|
1733
|
+
node._children.remove(conflictNode)
|
1718
1734
|
|
1719
1735
|
return False
|
1720
1736
|
return True
|
@@ -1747,7 +1763,7 @@ class Project(object):
|
|
1747
1763
|
def depthFirstSearch(node):
|
1748
1764
|
visitedNodes.add(node)
|
1749
1765
|
recursionStack.add(node)
|
1750
|
-
for child in node.
|
1766
|
+
for child in node.getChildren():
|
1751
1767
|
if child not in visitedNodes:
|
1752
1768
|
if depthFirstSearch(child):
|
1753
1769
|
return True
|
@@ -1918,7 +1934,7 @@ class Project(object):
|
|
1918
1934
|
|
1919
1935
|
if n is not None:
|
1920
1936
|
# Iterate recursively all descendants
|
1921
|
-
for node in n.
|
1937
|
+
for node in n.iterChildren():
|
1922
1938
|
connection[node.pointer.getUniqueId()] = True
|
1923
1939
|
# Add also
|
1924
1940
|
connection[node.pointer.get().strId()] = True
|
@@ -31,24 +31,28 @@ import os
|
|
31
31
|
from pyworkflow.project import Manager
|
32
32
|
import pyworkflow.utils as pwutils
|
33
33
|
|
34
|
+
EMPTY_ARG = "-"
|
35
|
+
|
34
36
|
|
35
37
|
def usage(error):
|
38
|
+
|
36
39
|
print("""
|
37
40
|
ERROR: %s
|
38
41
|
|
39
|
-
Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION]
|
42
|
+
Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION] [COMMENT]
|
40
43
|
NAME: project name
|
41
44
|
WORKFLOW: path to a Scipion json workflow
|
42
45
|
LOCATION: where to create it, defaults to scipion default location
|
46
|
+
COMMENT: project comment, location is mandatory in this case... for a NULL LOCATION pass %s
|
43
47
|
|
44
48
|
This script will create a project project, optionally based on a workflow file
|
45
|
-
""" % error)
|
49
|
+
""" % (error, EMPTY_ARG))
|
46
50
|
sys.exit(1)
|
47
51
|
|
48
52
|
|
49
53
|
n = len(sys.argv)
|
50
54
|
|
51
|
-
if n < 2 or n >
|
55
|
+
if n < 2 or n > 5:
|
52
56
|
usage("Incorrect number of input parameters")
|
53
57
|
|
54
58
|
projName = sys.argv[1]
|
@@ -56,6 +60,12 @@ projName = sys.argv[1]
|
|
56
60
|
jsonFile = None if n < 3 else os.path.abspath(sys.argv[2])
|
57
61
|
location = None if n < 4 else sys.argv[3]
|
58
62
|
|
63
|
+
# Location with - is None
|
64
|
+
if location == EMPTY_ARG:
|
65
|
+
location = None
|
66
|
+
|
67
|
+
comment = None if n < 5 else sys.argv[4]
|
68
|
+
|
59
69
|
# This might not be working anymore for python3.
|
60
70
|
# I'm getting invalid ELF header triggered by matplotlib -->from . import _tkagg
|
61
71
|
# path = pw.join('gui', 'no-tkinter')
|
@@ -71,7 +81,7 @@ if manager.hasProject(projName):
|
|
71
81
|
if jsonFile is not None and not os.path.exists(jsonFile):
|
72
82
|
usage("Nonexistent json file: %s" % pwutils.red(jsonFile))
|
73
83
|
|
74
|
-
project = manager.createProject(projName, location=location)
|
84
|
+
project = manager.createProject(projName, location=location, comment=comment)
|
75
85
|
|
76
86
|
if jsonFile is not None:
|
77
87
|
protDict = project.loadProtocols(jsonFile)
|
@@ -92,7 +92,7 @@ roots = runGraph.getRootNodes()
|
|
92
92
|
# and the graph is lineal
|
93
93
|
|
94
94
|
for root in roots:
|
95
|
-
for child in root.
|
95
|
+
for child in root.getChildren():
|
96
96
|
workflow, _ = project._getSubworkflow(child.run)
|
97
97
|
for prot, level in workflow.values():
|
98
98
|
protLabelName = prot.getObjLabel()
|
pyworkflow/protocol/constants.py
CHANGED
pyworkflow/protocol/executor.py
CHANGED
@@ -69,7 +69,7 @@ class StepExecutor:
|
|
69
69
|
numberOfMpi, numberOfThreads,
|
70
70
|
self.hostConfig,
|
71
71
|
env=env, cwd=cwd, gpuList=self.getGpuList(), executable=executable)
|
72
|
-
|
72
|
+
|
73
73
|
def _getRunnable(self, steps, n=1):
|
74
74
|
""" Return the n steps that are 'new' and all its
|
75
75
|
dependencies have been finished, or None if none ready.
|
@@ -79,11 +79,16 @@ class StepExecutor:
|
|
79
79
|
for s in steps:
|
80
80
|
if (s.getStatus() == cts.STATUS_NEW and
|
81
81
|
all(steps[i-1].isFinished() for i in s._prerequisites)):
|
82
|
-
|
83
|
-
if
|
84
|
-
|
82
|
+
|
83
|
+
if self._isStepRunnable(s):
|
84
|
+
rs.append(s)
|
85
|
+
if len(rs) == n:
|
86
|
+
break
|
85
87
|
return rs
|
86
|
-
|
88
|
+
def _isStepRunnable(self, step):
|
89
|
+
""" Should be implemented by inherited classes to test extra conditions """
|
90
|
+
return True
|
91
|
+
|
87
92
|
def _arePending(self, steps):
|
88
93
|
""" Return True if there are pending steps (either running or waiting)
|
89
94
|
that can be done and thus enable other steps to be executed.
|
@@ -169,28 +174,133 @@ class ThreadStepExecutor(StepExecutor):
|
|
169
174
|
# all the threads
|
170
175
|
self.gpuDict = {}
|
171
176
|
|
177
|
+
self._assignGPUperNode()
|
178
|
+
|
179
|
+
def _assignGPUperNode(self):
|
180
|
+
# If we have GPUs
|
172
181
|
if self.gpuList:
|
173
|
-
|
182
|
+
|
183
|
+
nThreads = self.numberOfProcs
|
184
|
+
|
185
|
+
# Nodes: each concurrent steps
|
186
|
+
nodes = range(1, nThreads+1)
|
187
|
+
|
188
|
+
# Number of GPUs
|
174
189
|
nGpu = len(self.gpuList)
|
175
190
|
|
191
|
+
# If more GPUs than threads
|
176
192
|
if nGpu > nThreads:
|
177
|
-
|
178
|
-
|
179
|
-
|
193
|
+
|
194
|
+
# Get the ratio: 2 GPUs per thread? 3 GPUs per thread?
|
195
|
+
# 3 GPU and 2 threads is rounded to 1 (flooring)
|
196
|
+
step = int(nGpu / nThreads)
|
197
|
+
spare = nGpu % nThreads
|
198
|
+
fromPos = 0
|
199
|
+
# For each node(concurrent thread)
|
200
|
+
for node in nodes:
|
201
|
+
# Store the GPUS per thread:
|
202
|
+
# GPUs: 0 1 2
|
203
|
+
# Threads 2 (step 1)
|
204
|
+
# Node 0 : GPU 0 1
|
205
|
+
# Node 1 : GPU 2
|
206
|
+
|
207
|
+
extraGpu = 1 if spare>0 else 0
|
208
|
+
toPos = fromPos + step +extraGpu
|
209
|
+
gpusForNode = list(self.gpuList[fromPos:toPos])
|
210
|
+
|
211
|
+
newGpusForNode = self.cleanVoidGPUs(gpusForNode)
|
212
|
+
if len(newGpusForNode) == 0:
|
213
|
+
logger.info("Gpu slot cancelled: all were null Gpus -> %s" % gpusForNode)
|
214
|
+
else:
|
215
|
+
logger.info("GPUs %s assigned to node %s" % (newGpusForNode, node))
|
216
|
+
self.gpuDict[-node] = newGpusForNode
|
217
|
+
|
218
|
+
fromPos = toPos
|
219
|
+
spare-=1
|
220
|
+
|
180
221
|
else:
|
181
222
|
# Expand gpuList repeating until reach nThreads items
|
182
223
|
if nThreads > nGpu:
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
224
|
+
logger.warning("GPUs are no longer extended. If you want all GPUs to match threads repeat as many "
|
225
|
+
"GPUs as threads.")
|
226
|
+
# newList = self.gpuList * (int(nThreads / nGpu) + 1)
|
227
|
+
# self.gpuList = newList[:nThreads]
|
228
|
+
|
229
|
+
for index, gpu in enumerate(self.gpuList):
|
230
|
+
|
231
|
+
if gpu == cts.VOID_GPU:
|
232
|
+
logger.info("Void GPU (%s) found in the list. Skipping the slot." % cts.VOID_GPU)
|
233
|
+
else:
|
234
|
+
logger.info("GPU slot for gpu %s." % gpu)
|
235
|
+
# Any negative number in the key means a free gpu slot. can't be 0!
|
236
|
+
self.gpuDict[-index-1] = [gpu]
|
237
|
+
|
238
|
+
def cleanVoidGPUs(self, gpuList):
|
239
|
+
newGPUList=[]
|
240
|
+
for gpuid in gpuList:
|
241
|
+
if gpuid == cts.VOID_GPU:
|
242
|
+
logger.info("Void GPU detected in %s" % gpuList)
|
243
|
+
else:
|
244
|
+
newGPUList.append(gpuid)
|
245
|
+
return newGPUList
|
188
246
|
|
189
247
|
def getGpuList(self):
|
190
248
|
""" Return the GPU list assigned to current thread
|
191
249
|
or empty list if not using GPUs. """
|
192
|
-
|
193
|
-
|
250
|
+
|
251
|
+
# If the node id has assigned gpus?
|
252
|
+
nodeId = threading.current_thread().thId
|
253
|
+
if nodeId in self.gpuDict:
|
254
|
+
gpus = self.gpuDict.get(nodeId)
|
255
|
+
logger.info("Reusing GPUs (%s) slot for %s" % (gpus, nodeId))
|
256
|
+
return gpus
|
257
|
+
else:
|
258
|
+
|
259
|
+
gpus = self.getFreeGpuSlot(nodeId)
|
260
|
+
if gpus is None:
|
261
|
+
logger.warning("Step on node %s is requesting GPUs but there isn't any available. Review configuration of threads/GPUs. Returning and empty list." % nodeId)
|
262
|
+
return []
|
263
|
+
else:
|
264
|
+
return gpus
|
265
|
+
def getFreeGpuSlot(self, nodeId=None):
|
266
|
+
""" Returns a free gpu slot available or None. If node is passed it also reserves it for that node
|
267
|
+
|
268
|
+
:param node: node to make the reserve of Gpus
|
269
|
+
"""
|
270
|
+
for node in self.gpuDict.keys():
|
271
|
+
# This is a free node. Book it
|
272
|
+
if node < 0:
|
273
|
+
gpus = self.gpuDict[node]
|
274
|
+
|
275
|
+
if nodeId is not None:
|
276
|
+
self.gpuDict.pop(node)
|
277
|
+
self.gpuDict[nodeId] = gpus
|
278
|
+
logger.info("GPUs %s assigned to thread %s" % (gpus, nodeId))
|
279
|
+
else:
|
280
|
+
logger.info("Free gpu slot found at %s" % node)
|
281
|
+
return gpus
|
282
|
+
|
283
|
+
return None
|
284
|
+
def freeGpusSlot(self, node):
|
285
|
+
gpus = self.gpuDict.get(node, None)
|
286
|
+
|
287
|
+
# Some nodes/threads do not use gpus so may not be booked and not in the dictionary
|
288
|
+
if gpus is not None:
|
289
|
+
self.gpuDict.pop(node)
|
290
|
+
self.gpuDict[-node-1] = gpus
|
291
|
+
logger.info("GPUs %s freed from thread %s" % (gpus, node))
|
292
|
+
else:
|
293
|
+
logger.debug("node %s not found in GPU slots" % node)
|
294
|
+
|
295
|
+
def _isStepRunnable(self, step):
|
296
|
+
""" Overwrite this method to check GPUs availability"""
|
297
|
+
|
298
|
+
if self.gpuList and step.needsGPU() and self.getFreeGpuSlot() is None:
|
299
|
+
logger.info("Can't run step %s. Needs gpus and there are no free gpu slots" % step)
|
300
|
+
return False
|
301
|
+
|
302
|
+
return True
|
303
|
+
|
194
304
|
def runSteps(self, steps,
|
195
305
|
stepStartedCallback,
|
196
306
|
stepFinishedCallback,
|
@@ -213,7 +323,9 @@ class ThreadStepExecutor(StepExecutor):
|
|
213
323
|
sharedLock = threading.Lock()
|
214
324
|
|
215
325
|
runningSteps = {} # currently running step in each node ({node: step})
|
216
|
-
freeNodes = list(range(self.numberOfProcs)) # available nodes to send jobs
|
326
|
+
freeNodes = list(range(1, self.numberOfProcs+1)) # available nodes to send jobs
|
327
|
+
logger.info("Execution threads: %s" % freeNodes)
|
328
|
+
logger.info("Running steps using %s threads. 1 thread is used for this main proccess." % self.numberOfProcs)
|
217
329
|
|
218
330
|
while True:
|
219
331
|
# See which of the runningSteps are not really running anymore.
|
@@ -225,6 +337,7 @@ class ThreadStepExecutor(StepExecutor):
|
|
225
337
|
for node in nodesFinished:
|
226
338
|
step = runningSteps.pop(node) # remove entry from runningSteps
|
227
339
|
freeNodes.append(node) # the node is available now
|
340
|
+
self.freeGpusSlot(node)
|
228
341
|
# Notify steps termination and check if we should continue
|
229
342
|
doContinue = stepFinishedCallback(step)
|
230
343
|
if not doContinue:
|
@@ -245,8 +358,9 @@ class ThreadStepExecutor(StepExecutor):
|
|
245
358
|
anyLaunched = True
|
246
359
|
step.setRunning()
|
247
360
|
stepStartedCallback(step)
|
248
|
-
node = freeNodes.pop() # take an available node
|
361
|
+
node = freeNodes.pop(0) # take an available node
|
249
362
|
runningSteps[node] = step
|
363
|
+
logger.debug("Running step %s on node %s" % (step, node))
|
250
364
|
t = StepThread(node, step, sharedLock)
|
251
365
|
# won't keep process up if main thread ends
|
252
366
|
t.daemon = True
|
@@ -255,7 +369,7 @@ class ThreadStepExecutor(StepExecutor):
|
|
255
369
|
|
256
370
|
if not anyLaunched:
|
257
371
|
if anyPending: # nothing running
|
258
|
-
time.sleep(
|
372
|
+
time.sleep(3)
|
259
373
|
else:
|
260
374
|
break # yeah, we are done, either failed or finished :)
|
261
375
|
|
pyworkflow/protocol/params.py
CHANGED
@@ -345,18 +345,15 @@ class Form(object):
|
|
345
345
|
self.addParam('hostName', StringParam, default="localhost",
|
346
346
|
label='Execution host',
|
347
347
|
help='Select in which of the available do you want to launch this protocol.')
|
348
|
+
|
349
|
+
# NOTE: help messages for these parameters is defined at HELP_MPI_THREADS and used in form.py.
|
350
|
+
|
348
351
|
if threads > 0:
|
349
352
|
self.addParam('numberOfThreads', IntParam, default=threads,
|
350
|
-
label='Threads'
|
351
|
-
help='This option provides shared-memory parallelization on multi-core machines.'
|
352
|
-
'It does not require any additional software.')
|
353
|
+
label='Threads')
|
353
354
|
if mpi > 0:
|
354
355
|
self.addParam('numberOfMpi', IntParam, default=mpi,
|
355
|
-
label='MPI processes'
|
356
|
-
help='This option provides the number of independent processes spawned'
|
357
|
-
'in parallel by <mpirun> command in a cluster, usually through'
|
358
|
-
'a queue system. This will require that you have compile this software '
|
359
|
-
'with <mpi> support.')
|
356
|
+
label='MPI processes')
|
360
357
|
if jobsize > 0:
|
361
358
|
self.addParam('mpiJobSize', IntParam, default=jobsize,
|
362
359
|
label='MPI job size', condition="numberOfMpi>1",
|
pyworkflow/protocol/protocol.py
CHANGED
@@ -25,8 +25,7 @@
|
|
25
25
|
This modules contains classes required for the workflow
|
26
26
|
execution and tracking like: Step and Protocol
|
27
27
|
"""
|
28
|
-
import
|
29
|
-
import sys, os
|
28
|
+
import os
|
30
29
|
import json
|
31
30
|
import threading
|
32
31
|
import time
|
@@ -36,7 +35,7 @@ import pyworkflow as pw
|
|
36
35
|
from pyworkflow.exceptions import ValidationException, PyworkflowException
|
37
36
|
from pyworkflow.object import *
|
38
37
|
import pyworkflow.utils as pwutils
|
39
|
-
from pyworkflow.utils.log import
|
38
|
+
from pyworkflow.utils.log import getExtraLogInfo, STATUS, setDefaultLoggingContext
|
40
39
|
from .executor import StepExecutor, ThreadStepExecutor, QueueStepExecutor
|
41
40
|
from .constants import *
|
42
41
|
from .params import Form
|
@@ -50,21 +49,24 @@ logger = logging.getLogger(__name__)
|
|
50
49
|
|
51
50
|
class Step(Object):
|
52
51
|
""" Basic execution unit.
|
53
|
-
It should
|
52
|
+
It should define its Input, Output
|
54
53
|
and define a run method.
|
55
54
|
"""
|
56
55
|
|
57
|
-
def __init__(self, **kwargs):
|
58
|
-
|
56
|
+
def __init__(self, interactive=False, needsGPU=True, **kwargs):
|
57
|
+
super().__init__()
|
59
58
|
self._prerequisites = CsvList() # which steps needs to be done first
|
60
59
|
self.status = String()
|
61
60
|
self.initTime = String()
|
62
61
|
self.endTime = String()
|
63
62
|
self._error = String()
|
64
|
-
self.interactive = Boolean(
|
63
|
+
self.interactive = Boolean(interactive)
|
65
64
|
self._resultFiles = String()
|
65
|
+
self._needsGPU = Boolean(needsGPU)
|
66
66
|
self._index = None
|
67
67
|
|
68
|
+
def needsGPU(self) -> bool:
|
69
|
+
return self._needsGPU.get()
|
68
70
|
def getIndex(self):
|
69
71
|
return self._index
|
70
72
|
|
@@ -214,7 +216,7 @@ class Step(Object):
|
|
214
216
|
self.status.set(status)
|
215
217
|
|
216
218
|
except PyworkflowException as e:
|
217
|
-
|
219
|
+
logger.info(pwutils.redStr(str(e)))
|
218
220
|
self.setFailed(str(e))
|
219
221
|
except Exception as e:
|
220
222
|
self.setFailed(str(e))
|
@@ -230,7 +232,7 @@ class FunctionStep(Step):
|
|
230
232
|
This class will ease the insertion of Protocol function steps
|
231
233
|
through the function _insertFunctionStep"""
|
232
234
|
|
233
|
-
def __init__(self, func=None, funcName=None, *funcArgs,
|
235
|
+
def __init__(self, func=None, funcName=None, *funcArgs, wait=False, interactive=False, needsGPU=True):
|
234
236
|
"""
|
235
237
|
Params:
|
236
238
|
func: the function that will be executed.
|
@@ -238,13 +240,12 @@ class FunctionStep(Step):
|
|
238
240
|
*funcArgs: argument list passed to the function (serialized and stored)
|
239
241
|
**kwargs: extra parameters.
|
240
242
|
"""
|
241
|
-
|
243
|
+
super().__init__(interactive=interactive, needsGPU=needsGPU)
|
242
244
|
self._func = func # Function should be set before run
|
243
245
|
self._args = funcArgs
|
244
246
|
self.funcName = String(funcName)
|
245
247
|
self.argsStr = String(json.dumps(funcArgs, default=lambda x: None))
|
246
|
-
|
247
|
-
if kwargs.get('wait', False):
|
248
|
+
if wait:
|
248
249
|
self.setStatus(STATUS_WAITING)
|
249
250
|
|
250
251
|
def _runFunc(self):
|
@@ -280,7 +281,7 @@ class FunctionStep(Step):
|
|
280
281
|
return not self.__eq__(other)
|
281
282
|
|
282
283
|
def __str__(self):
|
283
|
-
return self.funcName.get()
|
284
|
+
return "%s - %s" % (self._objId ,self.funcName.get())
|
284
285
|
|
285
286
|
|
286
287
|
class RunJobStep(FunctionStep):
|
@@ -1016,12 +1017,11 @@ class Protocol(Step):
|
|
1016
1017
|
"""
|
1017
1018
|
pass
|
1018
1019
|
|
1019
|
-
def __insertStep(self, step,
|
1020
|
+
def __insertStep(self, step, prerequisites=None):
|
1020
1021
|
""" Insert a new step in the list.
|
1021
1022
|
|
1022
1023
|
:param prerequisites: a single integer or a list with the steps index that need to be done
|
1023
1024
|
previous to the current one."""
|
1024
|
-
prerequisites = kwargs.get('prerequisites', None)
|
1025
1025
|
|
1026
1026
|
if prerequisites is None:
|
1027
1027
|
if len(self._steps):
|
@@ -1117,7 +1117,7 @@ class Protocol(Step):
|
|
1117
1117
|
"""
|
1118
1118
|
return self._getPath(os.path.basename(path))
|
1119
1119
|
|
1120
|
-
def _insertFunctionStep(self, func, *funcArgs,
|
1120
|
+
def _insertFunctionStep(self, func, *funcArgs, prerequisites=None, wait=False, interactive=False, needsGPU=True):
|
1121
1121
|
"""
|
1122
1122
|
Params:
|
1123
1123
|
func: the function itself or, optionally, the name (string) of the function to be run in the Step.
|
@@ -1135,24 +1135,24 @@ class Protocol(Step):
|
|
1135
1135
|
if not callable(func):
|
1136
1136
|
raise Exception("Protocol._insertFunctionStep: '%s' is not callable"
|
1137
1137
|
% func)
|
1138
|
-
step = FunctionStep(func, func.__name__, *funcArgs,
|
1139
|
-
|
1140
|
-
return self.__insertStep(step,
|
1141
|
-
|
1142
|
-
def _insertRunJobStep(self, progName, progArguments, resultFiles=[],
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1148
|
-
|
1149
|
-
|
1150
|
-
def _insertCopyFileStep(self, sourceFile, targetFile, **kwargs):
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1155
|
-
|
1138
|
+
step = FunctionStep(func, func.__name__, *funcArgs, wait=wait, interactive=interactive, needsGPU=needsGPU)
|
1139
|
+
|
1140
|
+
return self.__insertStep(step,prerequisites)
|
1141
|
+
|
1142
|
+
# def _insertRunJobStep(self, progName, progArguments, resultFiles=[],
|
1143
|
+
# **kwargs):
|
1144
|
+
# """ Insert an Step that will simple call runJob function
|
1145
|
+
# **args: see __insertStep
|
1146
|
+
# """
|
1147
|
+
# return self._insertFunctionStep('runJob', progName, progArguments,
|
1148
|
+
# **kwargs)
|
1149
|
+
#
|
1150
|
+
# def _insertCopyFileStep(self, sourceFile, targetFile, **kwargs):
|
1151
|
+
# """ Shortcut function to insert a step for copying a file to a destiny. """
|
1152
|
+
# step = FunctionStep(pwutils.copyFile, 'copyFile', sourceFile,
|
1153
|
+
# targetFile,
|
1154
|
+
# **kwargs)
|
1155
|
+
# return self.__insertStep(step, **kwargs)
|
1156
1156
|
|
1157
1157
|
def _enterDir(self, path):
|
1158
1158
|
""" Enter into a new directory path and store the current path.
|
@@ -1216,7 +1216,7 @@ class Protocol(Step):
|
|
1216
1216
|
protocol that allow some kind of continue (such as ctf estimation).
|
1217
1217
|
"""
|
1218
1218
|
for step in self.loadSteps():
|
1219
|
-
self.__insertStep(step)
|
1219
|
+
self.__insertStep(step, )
|
1220
1220
|
|
1221
1221
|
def __findStartingStep(self):
|
1222
1222
|
""" From a previous run, compare self._steps and self._prevSteps
|
@@ -2538,7 +2538,7 @@ class ProtStreamingBase(Protocol):
|
|
2538
2538
|
self.stepsExecutionMode = STEPS_PARALLEL
|
2539
2539
|
def _insertAllSteps(self):
|
2540
2540
|
# Insert the step that generates the steps
|
2541
|
-
self._insertFunctionStep(self.resumableStepGeneratorStep, str(datetime.now()))
|
2541
|
+
self._insertFunctionStep(self.resumableStepGeneratorStep, str(datetime.now()), needsGPU=False)
|
2542
2542
|
|
2543
2543
|
def resumableStepGeneratorStep(self, ts):
|
2544
2544
|
""" This allow to resume protocols. ts is the time stamp so this stap is alway different form previous exceution"""
|
pyworkflow/template.py
CHANGED
@@ -10,7 +10,7 @@ import logging
|
|
10
10
|
logger = logging.getLogger(__name__)
|
11
11
|
|
12
12
|
class Template:
|
13
|
-
def __init__(self, source, name, description):
|
13
|
+
def __init__(self, source, name, description=""):
|
14
14
|
self.source = source
|
15
15
|
# Tidy up templates names: removing .json.template and .json (when passed as parameter)
|
16
16
|
self.name = name
|