scipion-pyworkflow 3.10.6__py3-none-any.whl → 3.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. pyworkflow/config.py +131 -67
  2. pyworkflow/constants.py +2 -1
  3. pyworkflow/gui/browser.py +39 -5
  4. pyworkflow/gui/dialog.py +2 -0
  5. pyworkflow/gui/form.py +141 -52
  6. pyworkflow/gui/gui.py +8 -8
  7. pyworkflow/gui/project/project.py +6 -7
  8. pyworkflow/gui/project/searchprotocol.py +91 -7
  9. pyworkflow/gui/project/viewdata.py +1 -1
  10. pyworkflow/gui/project/viewprotocols.py +45 -22
  11. pyworkflow/gui/project/viewprotocols_extra.py +9 -6
  12. pyworkflow/gui/widgets.py +2 -2
  13. pyworkflow/mapper/sqlite.py +4 -4
  14. pyworkflow/plugin.py +93 -44
  15. pyworkflow/project/project.py +158 -70
  16. pyworkflow/project/usage.py +165 -0
  17. pyworkflow/protocol/executor.py +30 -18
  18. pyworkflow/protocol/hosts.py +9 -6
  19. pyworkflow/protocol/launch.py +15 -8
  20. pyworkflow/protocol/params.py +59 -19
  21. pyworkflow/protocol/protocol.py +124 -58
  22. pyworkflow/resources/showj/arrowDown.png +0 -0
  23. pyworkflow/resources/showj/arrowUp.png +0 -0
  24. pyworkflow/resources/showj/background_section.png +0 -0
  25. pyworkflow/resources/showj/colRowModeOff.png +0 -0
  26. pyworkflow/resources/showj/colRowModeOn.png +0 -0
  27. pyworkflow/resources/showj/delete.png +0 -0
  28. pyworkflow/resources/showj/doc_icon.png +0 -0
  29. pyworkflow/resources/showj/download_icon.png +0 -0
  30. pyworkflow/resources/showj/enabled_gallery.png +0 -0
  31. pyworkflow/resources/showj/galleryViewOff.png +0 -0
  32. pyworkflow/resources/showj/galleryViewOn.png +0 -0
  33. pyworkflow/resources/showj/goto.png +0 -0
  34. pyworkflow/resources/showj/menu.png +0 -0
  35. pyworkflow/resources/showj/separator.png +0 -0
  36. pyworkflow/resources/showj/tableViewOff.png +0 -0
  37. pyworkflow/resources/showj/tableViewOn.png +0 -0
  38. pyworkflow/resources/showj/ui-bg_glass_75_e6e6e6_1x400.png +0 -0
  39. pyworkflow/resources/showj/ui-bg_glass_95_fef1ec_1x400.png +0 -0
  40. pyworkflow/resources/showj/ui-bg_highlight-soft_75_cccccc_1x100.png +0 -0
  41. pyworkflow/resources/showj/volumeOff.png +0 -0
  42. pyworkflow/resources/showj/volumeOn.png +0 -0
  43. pyworkflow/utils/log.py +15 -6
  44. pyworkflow/utils/properties.py +78 -92
  45. pyworkflow/utils/utils.py +3 -2
  46. pyworkflow/viewer.py +23 -1
  47. pyworkflow/webservices/config.py +0 -3
  48. pyworkflow/webservices/notifier.py +24 -34
  49. pyworkflowtests/protocols.py +1 -3
  50. pyworkflowtests/tests/test_protocol_execution.py +4 -0
  51. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/METADATA +13 -27
  52. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/RECORD +56 -35
  53. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/WHEEL +1 -1
  54. scipion_pyworkflow-3.10.6.dist-info/dependency_links.txt +0 -1
  55. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/entry_points.txt +0 -0
  56. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/licenses/LICENSE.txt +0 -0
  57. {scipion_pyworkflow-3.10.6.dist-info → scipion_pyworkflow-3.11.1.dist-info}/top_level.txt +0 -0
@@ -144,14 +144,17 @@ def _launchLocal(protocol, wait, stdin=None, stdout=None, stderr=None):
144
144
  submitDict = dict(hostConfig.getQueuesDefault())
145
145
  submitDict.update(protocol.getSubmitDict())
146
146
  submitDict['JOB_COMMAND'] = command
147
- jobId = _submit(hostConfig, submitDict)
147
+ jobId, error = _submit(hostConfig, submitDict)
148
148
  if jobId is None or jobId == UNKNOWN_JOBID:
149
- protocol.setStatus(STATUS_FAILED)
149
+ protocol.setFailed("There was a problem submitting this protocol to the queue engine: %s" % error)
150
+
150
151
  else:
152
+ logger.info("Protocol %s sent to queue. Got JOB ID %s" %(protocol.getRunName(), jobId))
151
153
  protocol.setJobId(jobId)
152
154
  protocol.setPid(0) # we go through the queue, so we rely on the jobId
153
155
  else: # If not, retrieve and set the process ID (both for normal execution or when using the queue for steps)
154
156
  pId = _run(command, wait, stdin, stdout, stderr)
157
+ logger.info("Protocol %s executed in a subproccess. Got PID %s." % (protocol.getRunName(), pId))
155
158
  protocol.setPid(pId)
156
159
 
157
160
 
@@ -213,18 +216,22 @@ def _submit(hostConfig, submitDict, cwd=None, env=None):
213
216
  gcmd = greenStr(command)
214
217
  logger.info("** Submitting to queue: '%s'" % gcmd)
215
218
 
216
- p = Popen(command, shell=True, stdout=PIPE, cwd=cwd, env=env)
217
- out = p.communicate()[0]
219
+ p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd, env=env)
220
+ out = p.communicate()
218
221
  # Try to parse the result of qsub, searching for a number (jobId)
219
222
  # Review this, seems to exclusive to torque batch system
220
- s = re.search(r'(\d+)', str(out))
223
+ firstLine = out[0]
224
+ s = re.search(r'(\d+)', str(firstLine))
221
225
  if p.returncode == 0 and s:
222
226
  job = int(s.group(0))
223
227
  logger.info("Launched job with id %s" % job)
224
- return job
228
+ return job, None
225
229
  else:
226
- logger.info("Couldn't submit to queue for reason: %s " % redStr(out.decode()))
227
- return UNKNOWN_JOBID
230
+ # Call communicate again to get "late" messages.
231
+ out = p.communicate()
232
+ errors = [line.decode() for line in out]
233
+ logger.info("Couldn't submit to queue for reason: %s " % "\n".join(errors))
234
+ return UNKNOWN_JOBID, errors
228
235
 
229
236
  def _checkJobStatus(hostConfig, jobid):
230
237
  """
@@ -29,6 +29,8 @@ import collections
29
29
  from pyworkflow.object import *
30
30
  from .constants import *
31
31
 
32
+ BIN_THREADS_PARAM = 'binThreads'
33
+ PARALLELIZATION = 'Parallelization'
32
34
 
33
35
  class FormElement(Object):
34
36
  """Base for any element on the form"""
@@ -206,11 +208,13 @@ class Form(object):
206
208
  def getClass(self):
207
209
  return type(self)
208
210
 
209
- def addSection(self, label='', **kwargs):
211
+ def addSection(self, label='', updateSection=True, **kwargs):
210
212
  """Add a new section"""
211
- self.lastSection = Section(self, label=label, **kwargs)
212
- self._sectionList.append(self.lastSection)
213
- return self.lastSection
213
+ newSection = Section(self, label=label, **kwargs)
214
+ if updateSection:
215
+ self.lastSection = newSection
216
+ self._sectionList.append(newSection)
217
+ return newSection
214
218
 
215
219
  def getSection(self, label):
216
220
  """ get section by label from _sectionList"""
@@ -219,6 +223,9 @@ class Form(object):
219
223
  return s
220
224
  return
221
225
 
226
+ def hasSection(self, label):
227
+ return self.getSection(label) is not None
228
+
222
229
  def addGroup(self, *args, **kwargs):
223
230
  return self.lastSection.addGroup(*args, **kwargs)
224
231
 
@@ -232,7 +239,11 @@ class Form(object):
232
239
 
233
240
  def addParam(self, *args, **kwargs):
234
241
  """Add a new param to last section"""
235
- return self.lastSection.addParam(*args, **kwargs)
242
+ if args[0] == BIN_THREADS_PARAM:
243
+ section = self.getParallelSection(updateSection=False)
244
+ else:
245
+ section = self.lastSection
246
+ return section.addParam(*args, **kwargs)
236
247
 
237
248
  # Adhoc method for specific params
238
249
  def addBooleanParam(self, name, label, help, default=True, **kwargs):
@@ -331,35 +342,64 @@ class Form(object):
331
342
  'of this particular run and start from the beginning. This option'
332
343
  'should be used carefully.'
333
344
  )
345
+
346
+ def getParallelSection(self, updateSection=True):
347
+ section = self.getSection(PARALLELIZATION)
348
+ return section if section else self.addSection(label=PARALLELIZATION, updateSection=updateSection)
334
349
 
335
- def addParallelSection(self, threads=1, mpi=8, condition="",
336
- hours=72, jobsize=0):
350
+ def addParallelSection(self, threads=1, mpi=8, binThreads=0, binThreadsHelp=None):
337
351
 
338
352
  """ Adds the parallelization section to the form
339
353
  pass threads=0 to disable threads parameter and mpi=0 to disable mpi params
340
354
 
341
355
  :param threads: default value for of threads, defaults to 1
342
- :param mpi: default value for mpi, defaults to 8"""
343
-
344
- self.addSection(label='Parallelization')
356
+ :param mpi: default value for mpi, defaults to 8
357
+ :param binThreads: Threads to pass as an argument to the program
358
+ """
359
+ self.addSection(label=PARALLELIZATION)
345
360
  self.addParam('hostName', StringParam, default="localhost",
346
361
  label='Execution host',
347
362
  help='Select in which of the available do you want to launch this protocol.')
348
363
 
349
- # NOTE: help messages for these parameters is defined at HELP_MPI_THREADS and used in form.py.
364
+ # WARNING. THis is confusing but is described here. For legacy reasons it is not obvious how to disentangle this
365
+ # threads ahs 2 meanings:
366
+ # 1.- threads for the binary when execution mode is serial
367
+ # 2.- threads for Scipion when execution mode is parallel
368
+ # In this case (#2), there could be a binThreads which are the binary threads as in #1 case
369
+
370
+ binLabel = "Threads"
371
+ binHelpMsg = ("*Threads*:\nThis refers to different execution threads in the same process that "
372
+ "can share memory. They run in the same computer. This value is an argument"
373
+ " passed to the program integrated")
374
+ binHelpMsg = binThreadsHelp if binThreadsHelp else binHelpMsg
350
375
 
351
376
  if threads > 0:
377
+
378
+ label= "Scipion threads"
379
+ helpMsg= ("*Scipion threads*:\n threads created by Scipion to run the steps."
380
+ " 1 thread is always used by the master/main process. Then extra threads will allow"
381
+ " this protocol to run several steps at the same time, taking always into account "
382
+ "restrictions to previous steps and 'theoretical GPU availability'")
383
+
384
+ if self._protocol.modeSerial():
385
+ label = binLabel
386
+ helpMsg = binHelpMsg
387
+
388
+
352
389
  self.addParam('numberOfThreads', IntParam, default=threads,
353
- label='Threads')
390
+ label=label, help=helpMsg)
354
391
  if mpi > 0:
392
+ mpiHelp=("*MPI*:\nThis is a number of independent processes"
393
+ " that communicate through message passing "
394
+ "over the network (or the same computer).\n")
355
395
  self.addParam('numberOfMpi', IntParam, default=mpi,
356
- label='MPI processes')
357
- if jobsize > 0:
358
- self.addParam('mpiJobSize', IntParam, default=jobsize,
359
- label='MPI job size', condition="numberOfMpi>1",
360
- help='Minimum size of jobs in mpi processes.'
361
- 'Set to 1 for large images (e.g. 500x500)'
362
- 'and to 10 for small images (e.g. 100x100)')
396
+ label='MPIs', help=mpiHelp)
397
+ if binThreads:
398
+ if self._protocol.modeParallel():
399
+ self.addParam(BIN_THREADS_PARAM, IntParam, default=binThreads,
400
+ label=binLabel, help=binHelpMsg)
401
+ else:
402
+ logger.warning("binThreads can't be used when stepsExecutionMode is not STEPS_PARALLEL. Use threads instead.")
363
403
 
364
404
 
365
405
  class StringParam(Param):
@@ -27,6 +27,7 @@ execution and tracking like: Step and Protocol
27
27
  """
28
28
  import os
29
29
  import json
30
+ import sys
30
31
  import threading
31
32
  import time
32
33
  from datetime import datetime
@@ -342,13 +343,13 @@ class Protocol(Step):
342
343
 
343
344
  class MyOutput(enum.Enum):
344
345
  outputMicrographs = SetOfMicrographs
345
- outputMicrographDW = SetOfMicrographs
346
+ outputMicrographDW = SetOfMovies
346
347
 
347
348
  When defining outputs you can, optionally, use this enum like:
348
349
  self._defineOutputs(**{MyOutput.outputMicrographs.name, setOfMics})
349
350
  It will help to keep output names consistently
350
351
 
351
- Alternative an inline dictionary will work:
352
+ Alternative an inline dictionary will work (this is mandatory in case two or more outputs are of the same type):
352
353
  _possibleOutputs = {"outputMicrographs" : SetOfMicrographs}
353
354
 
354
355
  For a more fine detailed/dynamic output based on parameters, you can overwrite the getter:
@@ -396,7 +397,7 @@ class Protocol(Step):
396
397
  self._log = logger
397
398
  self._buffer = '' # text buffer for reading log files
398
399
  # Project to which the protocol belongs
399
- self.__project = kwargs.get('project', None)
400
+ self._project = kwargs.get('project', None)
400
401
  # Filename templates dict that will be used by _getFileName
401
402
  self.__filenamesDict = {}
402
403
 
@@ -405,12 +406,13 @@ class Protocol(Step):
405
406
  self.lastUpdateTimeStamp = String()
406
407
 
407
408
  # For non-parallel protocols mpi=1 and threads=1
409
+ # MPIs
408
410
  self.allowMpi = hasattr(self, 'numberOfMpi')
409
411
  if not self.allowMpi:
410
412
  self.numberOfMpi = Integer(1)
411
413
 
414
+ # Threads
412
415
  self.allowThreads = hasattr(self, 'numberOfThreads')
413
-
414
416
  if not self.allowThreads:
415
417
  self.numberOfThreads = Integer(1)
416
418
 
@@ -458,6 +460,38 @@ class Protocol(Step):
458
460
  # than one time, thus avoiding deadlock situation. This fixed the concurrency problems we had before.
459
461
  self.forceSchedule = Boolean(False)
460
462
 
463
+
464
+ def getMPIs(self):
465
+ """ Returns the value of MPIs (integer)"""
466
+ return self.numberOfMpi.get()
467
+
468
+ def getScipionThreads(self):
469
+ """ Returns the number of Scipion threads. Not the threads that are argument for programs but those that will
470
+ run steps in parallel. This assumes cls.stepsExecutionMode = STEP_PARALLEL. See Param.addParallelSection"""
471
+ return self.numberOfThreads.get()
472
+
473
+ def getBinThreads(self):
474
+ """ Returns the number of binary threads. An integer to pass as an argument for the binary program integrated.
475
+ See Param.addParallelSection"""
476
+
477
+ if self.modeSerial():
478
+ return self.numberOfThreads.get()
479
+ else:
480
+ return self.binThreads.get()
481
+
482
+ def getTotalThreads(self):
483
+ """ Returns the total number of threads the protocol will need. This may be necessary when clusters require this value"""
484
+ if self.modeSerial():
485
+ # This will be the main thread + the binary threads * mpi ?
486
+ return 1 + self.getTotalBinThreads()
487
+ else:
488
+ # One main thread (included in Scipion threads) plus TotalBinThread time processing steps (Scipion threads -1)
489
+ return 1 + ((self.getScipionThreads()-1)* self.getTotalBinThreads())
490
+
491
+ def getTotalBinThreads(self):
492
+ """ Returns the total number to cores the binary will use: threads * mpis"""
493
+ return self.getBinThreads() * self.getMPIs()
494
+
461
495
  def _storeAttributes(self, attrList, attrDict):
462
496
  """ Store all attributes in attrDict as
463
497
  attributes of self, also store the key in attrList.
@@ -494,6 +528,7 @@ class Protocol(Step):
494
528
  """Close all output set"""
495
529
  for outputName, output in self.iterOutputAttributes():
496
530
  if isinstance(output, Set) and output.isStreamOpen():
531
+ logger.info("Closing %s output" % outputName)
497
532
  self.__tryUpdateOutputSet(outputName, output, state=Set.STREAM_CLOSED)
498
533
 
499
534
  def _updateOutputSet(self, outputName, outputSet,
@@ -549,10 +584,10 @@ class Protocol(Step):
549
584
  return self._hasExpert
550
585
 
551
586
  def getProject(self):
552
- return self.__project
587
+ return self._project
553
588
 
554
589
  def setProject(self, project):
555
- self.__project = project
590
+ self._project = project
556
591
 
557
592
  @staticmethod
558
593
  def hasDefinition(cls):
@@ -745,7 +780,7 @@ class Protocol(Step):
745
780
  protocol = self.getProject().getRunsGraph(refresh=True).getNode(str(output.getObjParentId())).run
746
781
  else:
747
782
  # This is a problem, since protocols coming from
748
- # Pointers do not have the __project set.
783
+ # Pointers do not have the _project set.
749
784
  # We do not have a clear way to get the protocol if
750
785
  # we do not have the project object associated
751
786
  # This case implies Direct Pointers to Sets
@@ -1081,7 +1116,8 @@ class Protocol(Step):
1081
1116
  def _finalizeStep(self, status, msg=None):
1082
1117
  """ Closes the step and setting up the protocol process id """
1083
1118
  super()._finalizeStep(status, msg)
1084
- self._pid.set(None)
1119
+ self._closeOutputSet()
1120
+ self._pid.set(0)
1085
1121
 
1086
1122
  def _updateSteps(self, updater, where="1"):
1087
1123
  """Set the status of all steps
@@ -1152,14 +1188,17 @@ class Protocol(Step):
1152
1188
 
1153
1189
  def _insertRunJobStep(self, progName, progArguments, resultFiles=[],
1154
1190
  **kwargs):
1155
- """ Insert a Step that will simply call runJob function
1191
+ """ Insert an Step that will simple call runJob function
1156
1192
  **args: see __insertStep
1157
1193
  """
1158
- return self._insertFunctionStep('runJob', progName, progArguments, **kwargs)
1194
+ return self._insertFunctionStep('runJob', progName, progArguments,
1195
+ **kwargs)
1159
1196
 
1160
1197
  def _insertCopyFileStep(self, sourceFile, targetFile, **kwargs):
1161
1198
  """ Shortcut function to insert a step for copying a file to a destiny. """
1162
- step = FunctionStep(pwutils.copyFile, 'copyFile', sourceFile, targetFile, **kwargs)
1199
+ step = FunctionStep(pwutils.copyFile, 'copyFile', sourceFile,
1200
+ targetFile,
1201
+ **kwargs)
1163
1202
  return self.__insertStep(step, **kwargs)
1164
1203
 
1165
1204
  def _enterDir(self, path):
@@ -1226,15 +1265,16 @@ class Protocol(Step):
1226
1265
  for step in self.loadSteps():
1227
1266
  self.__insertStep(step, )
1228
1267
 
1229
- def __findStartingStep(self):
1268
+ def __updateDoneSteps(self):
1230
1269
  """ From a previous run, compare self._steps and self._prevSteps
1231
- to find which steps we need to start at, skipping successful done
1270
+ to find which steps we need to execute, skipping successful done
1232
1271
  and not changed steps. Steps that needs to be done, will be deleted
1233
1272
  from the previous run storage.
1234
1273
  """
1274
+ doneSteps = 0
1235
1275
  if self.runMode == MODE_RESTART:
1236
1276
  self._prevSteps = []
1237
- return 0
1277
+ return doneSteps
1238
1278
 
1239
1279
  self._prevSteps = self.loadSteps()
1240
1280
 
@@ -1248,21 +1288,24 @@ class Protocol(Step):
1248
1288
  if (not oldStep.isFinished() or newStep != oldStep
1249
1289
  or not oldStep._postconditions()):
1250
1290
  if pw.Config.debugOn():
1251
- self.info("Starting at step %d" % i)
1252
- self.info(" Old step: %s, args: %s"
1253
- % (oldStep.funcName, oldStep.argsStr))
1254
- self.info(" New step: %s, args: %s"
1255
- % (newStep.funcName, newStep.argsStr))
1256
- self.info(" not oldStep.isFinished(): %s"
1257
- % (not oldStep.isFinished()))
1258
- self.info(" newStep != oldStep: %s"
1259
- % (newStep != oldStep))
1260
- self.info(" not oldStep._postconditions(): %s"
1261
- % (not oldStep._postconditions()))
1262
- return i
1263
- newStep.copy(oldStep)
1264
-
1265
- return n
1291
+ self.info("Rerunning step %d" % i)
1292
+ if not oldStep.isFinished():
1293
+ self.info(" Old step: %s, args: %s was not finished"
1294
+ % (oldStep.funcName, oldStep.argsStr))
1295
+ elif newStep != oldStep:
1296
+ self.info(" New step: %s, args: %s is different"
1297
+ % (newStep.funcName, newStep.argsStr))
1298
+ elif not oldStep._postconditions():
1299
+ self.info(" Old step: %s, args: %s postconditions were not met"
1300
+ % (oldStep.funcName, oldStep.argsStr))
1301
+
1302
+ else:
1303
+ doneSteps += 1
1304
+ # If the step has not changed and is properly finished, it is copied to the new steps so it is not
1305
+ # executed again
1306
+ newStep.copy(oldStep)
1307
+
1308
+ return doneSteps
1266
1309
 
1267
1310
  def _storeSteps(self):
1268
1311
  """ Store the new steps list that can be retrieved
@@ -1335,9 +1378,9 @@ class Protocol(Step):
1335
1378
  def _stepsCheck(self):
1336
1379
  pass
1337
1380
 
1338
- def _runSteps(self, startIndex):
1381
+ def _runSteps(self, doneSteps):
1339
1382
  """ Run all steps defined in self._steps. """
1340
- self._stepsDone.set(startIndex)
1383
+ self._stepsDone.set(doneSteps)
1341
1384
  self._numberOfSteps.set(len(self._steps))
1342
1385
  self.setRunning()
1343
1386
  # Keep the original value to set in sub-protocols
@@ -1346,8 +1389,9 @@ class Protocol(Step):
1346
1389
  self.runMode.set(MODE_RESUME)
1347
1390
  self._store()
1348
1391
 
1349
- if startIndex == len(self._steps):
1392
+ if doneSteps == len(self._steps):
1350
1393
  self.lastStatus = STATUS_FINISHED
1394
+ self.setFinished()
1351
1395
  self.info("All steps seem to be FINISHED, nothing to be done.")
1352
1396
  else:
1353
1397
  self.lastStatus = self.status.get()
@@ -1357,8 +1401,9 @@ class Protocol(Step):
1357
1401
  self._stepsCheck,
1358
1402
  self._stepsCheckSecs)
1359
1403
 
1360
- print("*** Last status is %s " % self.lastStatus)
1361
- self.setStatus(self.lastStatus)
1404
+ logger.info("*** Last status is %s " % self.lastStatus)
1405
+ self.setStatus(self.lastStatus)
1406
+ self.cleanExecutionAttributes(includeSteps=False)
1362
1407
  self._store(self.status)
1363
1408
 
1364
1409
  def __deleteOutputs(self):
@@ -1516,11 +1561,11 @@ class Protocol(Step):
1516
1561
 
1517
1562
  self._insertAllSteps() # Define steps for execute later
1518
1563
  # Find at which step we need to start
1519
- startIndex = self.__findStartingStep()
1520
- self.info(" Starting at step: %d" % (startIndex + 1))
1564
+ doneSteps = self.__updateDoneSteps()
1565
+ # self.info(" Starting at step: %d" % (startIndex + 1))
1521
1566
  self._storeSteps()
1522
1567
  self.info(" Running steps ")
1523
- self._runSteps(startIndex)
1568
+ self._runSteps(doneSteps)
1524
1569
 
1525
1570
  def _getEnviron(self):
1526
1571
  """ This function should return an environ variable
@@ -1574,34 +1619,44 @@ class Protocol(Step):
1574
1619
  package = self.getClassPackage()
1575
1620
  if hasattr(package, "__version__"):
1576
1621
  self.info('plugin v: %s%s' %(package.__version__, ' (devel)' if plugin.inDevelMode() else '(production)'))
1577
- self.info('plugin binary v: %s' % plugin.getActiveVersion())
1622
+ try:
1623
+ self.info('plugin binary v: %s' % plugin.getActiveVersion())
1624
+ except Exception as e:
1625
+ logger.error("Coudn't get the active version of the binary. This may be cause by a variable in the config"
1626
+ " file with a missing - in it and the protocol to fail.", exc_info=e)
1578
1627
  self.info('currentDir: %s' % os.getcwd())
1579
1628
  self.info('workingDir: %s' % self.workingDir)
1580
1629
  self.info('runMode: %s' % MODE_CHOICES[self.runMode.get()])
1630
+
1631
+ if self.modeSerial():
1632
+ self.info("Serial execution")
1633
+ else:
1634
+ self.info("Scipion threads: %d" % self.getScipionThreads())
1635
+
1581
1636
  try:
1582
- self.info(' MPI: %d' % self.numberOfMpi)
1583
- self.info(' threads: %d' % self.numberOfThreads)
1637
+ self.info('binary MPI: %d' % self.numberOfMpi)
1638
+ self.info('binary Threads: %d' % self.getBinThreads())
1584
1639
  except Exception as e:
1585
1640
  self.info(' * Cannot get information about MPI/threads (%s)' % e)
1586
- # Something went wrong ans at this point status is launched. We mark it as failed.
1641
+ # Something went wrong and at this point status is launched. We mark it as failed.
1587
1642
  except Exception as e:
1588
- print(e)
1643
+ logger.error("Couldn't start the protocol." , exc_info=e)
1589
1644
  self.setFailed(str(e))
1590
- self._store(self.status, self.getError())
1645
+ # self._store(self.status, self.getError())
1591
1646
  self._endRun()
1592
1647
  return
1593
1648
 
1594
1649
  Step.run(self)
1595
- if self.isFailed():
1596
- self._store()
1650
+ # if self.isFailed():
1651
+ # self._store()
1597
1652
  self._endRun()
1598
1653
 
1599
1654
  def _endRun(self):
1600
1655
  """ Print some ending message and close some files. """
1601
- # self._store()
1602
- self._store(self.summaryVar)
1603
- self._store(self.methodsVar)
1604
- self._store(self.endTime)
1656
+ self._store() # Store all protocol attributes
1657
+ # self._store(self.summaryVar)
1658
+ # self._store(self.methodsVar)
1659
+ # self._store(self.endTime)
1605
1660
 
1606
1661
  if pwutils.envVarOn(pw.SCIPION_DEBUG_NOCLEAN):
1607
1662
  self.warning('Not cleaning temp folder since '
@@ -1637,6 +1692,7 @@ class Protocol(Step):
1637
1692
  """ Return the steps.sqlite file under logs directory. """
1638
1693
  return self._getLogsPath('steps.sqlite')
1639
1694
 
1695
+
1640
1696
  def _addChunk(self, txt, fmt=None):
1641
1697
  """
1642
1698
  Add text txt to self._buffer, with format fmt.
@@ -1737,7 +1793,7 @@ class Protocol(Step):
1737
1793
  executor = StepExecutor(self.getHostConfig())
1738
1794
 
1739
1795
  self._stepsExecutor = executor
1740
- self._stepsExecutor.setProtocol(self) # executor needs the protocol to store the jobs Ids submitted to a queue
1796
+ self._stepsExecutor.setProtocol(self) # executor needs the protocol to store the jobs Ids submitted to a queue
1741
1797
 
1742
1798
  def getFiles(self):
1743
1799
  resultFiles = set()
@@ -1919,17 +1975,19 @@ class Protocol(Step):
1919
1975
  queueName, queueParams = self.getQueueParams()
1920
1976
  hc = self.getHostConfig()
1921
1977
 
1978
+ scipion_project = "SCIPION_PROJECT" if self.getProject() is None else self.getProject().getShortName()
1979
+
1922
1980
  d = {'JOB_NAME': self.strId(),
1923
1981
  'JOB_QUEUE': queueName,
1924
- 'JOB_NODES': self.numberOfMpi.get(),
1925
- 'JOB_THREADS': self.numberOfThreads.get(),
1926
- 'JOB_CORES': self.numberOfMpi.get() * self.numberOfThreads.get(),
1982
+ 'JOB_NODES': max([1,self.numberOfMpi.get()]),
1983
+ 'JOB_THREADS': max([1,self.numberOfThreads.get()]),
1984
+ 'JOB_CORES': max([1,self.numberOfMpi.get() * self.numberOfThreads.get()]),
1927
1985
  'JOB_HOURS': 72,
1928
1986
  'GPU_COUNT': len(self.getGpuList()),
1929
1987
  QUEUE_FOR_JOBS: 'N',
1930
- 'SCIPION_PROJECT': "SCIPION_PROJECT", # self.getProject().getShortName(),
1931
- 'SCIPION_PROTOCOL': self.getRunName(),
1932
- PLUGIN_MODULE_VAR: self.getPlugin().getName()
1988
+ PLUGIN_MODULE_VAR: self.getPlugin().getName(),
1989
+ 'SCIPION_PROJECT': scipion_project,
1990
+ 'SCIPION_PROTOCOL': self.getRunName()
1933
1991
  }
1934
1992
 
1935
1993
  # Criteria in HostConfig.load to load or not QUEUE variables
@@ -2401,11 +2459,12 @@ class Protocol(Step):
2401
2459
 
2402
2460
  return self._size
2403
2461
 
2404
- def cleanExecutionAttributes(self):
2462
+ def cleanExecutionAttributes(self, includeSteps=True):
2405
2463
  """ Clean all the executions attributes """
2406
2464
  self.setPid(0)
2407
2465
  self._jobId.clear()
2408
- self._stepsDone.set(0)
2466
+ if includeSteps:
2467
+ self._stepsDone.set(0)
2409
2468
 
2410
2469
  class LegacyProtocol(Protocol):
2411
2470
  """ Special subclass of Protocol to be used when a protocol class
@@ -2443,6 +2502,13 @@ def runProtocolMain(projectPath, protDbPath, protId):
2443
2502
 
2444
2503
  setDefaultLoggingContext(protId, protocol.getProject().getShortName())
2445
2504
 
2505
+ if isinstance(protocol,LegacyProtocol):
2506
+ logger.error(f"There is a problem loading the protocol {protId} ({protocol}) at {pwutils.getHostName()} "
2507
+ f"Installations of the execution differs from the visualization installation. "
2508
+ f"This is probably because you are running this protocol in a cluster node which installation is not "
2509
+ f"compatible with the head node or you have a plugin available on the Main GUI process (check launching directory) but "
2510
+ f"not properly installed as a plugin in Scipion. Please verify installation.")
2511
+ sys.exit()
2446
2512
  hostConfig = protocol.getHostConfig()
2447
2513
  gpuList = protocol.getGpuList()
2448
2514
 
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file