PaIRS-UniNa 0.2.0__cp310-cp310-win_amd64.whl → 0.2.8__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PaIRS_UniNa/Calibration_Tab.py +39 -23
- PaIRS_UniNa/Changes.txt +127 -8
- PaIRS_UniNa/Custom_Top.py +12 -9
- PaIRS_UniNa/Explorer.py +534 -165
- PaIRS_UniNa/FolderLoop.py +562 -0
- PaIRS_UniNa/Input_Tab.py +261 -100
- PaIRS_UniNa/Input_Tab_CalVi.py +24 -25
- PaIRS_UniNa/Input_Tab_tools.py +967 -405
- PaIRS_UniNa/Output_Tab.py +324 -130
- PaIRS_UniNa/PaIRS_PIV.py +58 -1
- PaIRS_UniNa/PaIRS_pypacks.py +1415 -1039
- PaIRS_UniNa/Process_Tab.py +15 -19
- PaIRS_UniNa/Process_Tab_Disp.py +12 -5
- PaIRS_UniNa/Saving_tools.py +27 -13
- PaIRS_UniNa/TabTools.py +176 -59
- PaIRS_UniNa/Vis_Tab.py +664 -256
- PaIRS_UniNa/Vis_Tab_CalVi.py +114 -45
- PaIRS_UniNa/Whatsnew.py +58 -9
- PaIRS_UniNa/_PaIRS_PIV.pyd +0 -0
- PaIRS_UniNa/__init__.py +4 -3
- PaIRS_UniNa/addwidgets_ps.py +150 -63
- PaIRS_UniNa/calib.py +6 -3
- PaIRS_UniNa/calibView.py +19 -6
- PaIRS_UniNa/gPaIRS.py +1397 -543
- PaIRS_UniNa/icons/align_all.png +0 -0
- PaIRS_UniNa/icons/announcement.png +0 -0
- PaIRS_UniNa/icons/bugfix.png +0 -0
- PaIRS_UniNa/icons/change_folder.png +0 -0
- PaIRS_UniNa/icons/change_folder_off.png +0 -0
- PaIRS_UniNa/icons/close_all.png +0 -0
- PaIRS_UniNa/icons/copy_process_off.png +0 -0
- PaIRS_UniNa/icons/flaticon_PaIRS_beta.png +0 -0
- PaIRS_UniNa/icons/flaticon_PaIRS_download_warning.png +0 -0
- PaIRS_UniNa/icons/folder_loop_cleanup.png +0 -0
- PaIRS_UniNa/icons/folder_loop_cleanup_off.png +0 -0
- PaIRS_UniNa/icons/linked.png +0 -0
- PaIRS_UniNa/icons/logo_opaco.png +0 -0
- PaIRS_UniNa/icons/open_image.png +0 -0
- PaIRS_UniNa/icons/open_new_window.png +0 -0
- PaIRS_UniNa/icons/open_result.png +0 -0
- PaIRS_UniNa/icons/process_loop.png +0 -0
- PaIRS_UniNa/icons/pylog.png +0 -0
- PaIRS_UniNa/icons/python_warning.png +0 -0
- PaIRS_UniNa/icons/queue.png +0 -0
- PaIRS_UniNa/icons/restore_undo.png +0 -0
- PaIRS_UniNa/icons/save_and_stop.png +0 -0
- PaIRS_UniNa/icons/scale_all.png +0 -0
- PaIRS_UniNa/icons/scale_down.png +0 -0
- PaIRS_UniNa/icons/scale_up.png +0 -0
- PaIRS_UniNa/icons/scan_path_loop.png +0 -0
- PaIRS_UniNa/icons/scan_path_loop_off.png +0 -0
- PaIRS_UniNa/icons/show_all.png +0 -0
- PaIRS_UniNa/icons/star.png +0 -0
- PaIRS_UniNa/icons/step_inheritance.png +0 -0
- PaIRS_UniNa/icons/subMIN_on.png +0 -0
- PaIRS_UniNa/icons/unedited.png +0 -0
- PaIRS_UniNa/icons/uninitialized.png +0 -0
- PaIRS_UniNa/icons/window.png +0 -0
- PaIRS_UniNa/listLib.py +3 -2
- PaIRS_UniNa/parForMulti.py +7 -5
- PaIRS_UniNa/parForWorkers.py +174 -140
- PaIRS_UniNa/pivParFor.py +9 -11
- PaIRS_UniNa/preProcParFor.py +4 -4
- PaIRS_UniNa/procTools.py +120 -44
- PaIRS_UniNa/rqrdpckgs.txt +9 -0
- PaIRS_UniNa/stereoPivParFor.py +16 -22
- PaIRS_UniNa/tabSplitter.py +17 -7
- PaIRS_UniNa/ui_Input_Tab.py +56 -57
- PaIRS_UniNa/ui_Input_Tab_CalVi.py +71 -64
- PaIRS_UniNa/ui_Log_Tab.py +8 -6
- PaIRS_UniNa/ui_Output_Tab.py +180 -4
- PaIRS_UniNa/ui_Process_Tab.py +19 -19
- PaIRS_UniNa/ui_Process_Tab_Disp.py +181 -10
- PaIRS_UniNa/ui_Vis_Tab.py +332 -196
- PaIRS_UniNa/ui_Whatsnew.py +13 -13
- PaIRS_UniNa/ui_gPairs.py +140 -71
- PaIRS_UniNa/ui_infoPaIRS.py +156 -31
- PaIRS_UniNa/whatsnew.txt +6 -4
- {PaIRS_UniNa-0.2.0.dist-info → pairs_unina-0.2.8.dist-info}/METADATA +62 -26
- {PaIRS_UniNa-0.2.0.dist-info → pairs_unina-0.2.8.dist-info}/RECORD +82 -54
- {PaIRS_UniNa-0.2.0.dist-info → pairs_unina-0.2.8.dist-info}/WHEEL +1 -1
- PaIRS_UniNa/icons/order.png +0 -0
- PaIRS_UniNa/icons/order_reverse.png +0 -0
- PaIRS_UniNa/icons/run_piv.png +0 -0
- PaIRS_UniNa/stereo.py +0 -685
- PaIRS_UniNa/ui_infoCalVi.py +0 -428
- PaIRS_UniNa-0.2.0.dist-info/LICENSE +0 -19
- {PaIRS_UniNa-0.2.0.dist-info → pairs_unina-0.2.8.dist-info}/top_level.txt +0 -0
PaIRS_UniNa/parForWorkers.py
CHANGED
|
@@ -26,7 +26,7 @@ class WorkerSignals(QObject):
|
|
|
26
26
|
kill = Signal(int)
|
|
27
27
|
|
|
28
28
|
class ParForWorker(QRunnable):
|
|
29
|
-
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,
|
|
29
|
+
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,numMaxProcs:int,pfPool:ParForPool,parForMul:ParForMul,nameWorker:str,mainFun:Callable):
|
|
30
30
|
#super(MIN_ParFor_Worker,self).__init__(data,indWorker,indProc,pfPool=ParForPool,parForMul=ParForMul)
|
|
31
31
|
super().__init__()
|
|
32
32
|
self.pfPool=pfPool
|
|
@@ -35,7 +35,7 @@ class ParForWorker(QRunnable):
|
|
|
35
35
|
self.data=data.duplicate() #OPTIMIZE TA GP controllare se le modifiche fatte nel workers interferiscono con quelle fatte in progress_proc ed eventualmente evitare l'aggiornamento in resetProc e in store_proc
|
|
36
36
|
self.indWorker = indWorker
|
|
37
37
|
self.indProc = indProc
|
|
38
|
-
self.
|
|
38
|
+
self.numMaxProcs=numMaxProcs
|
|
39
39
|
self.signals=WorkerSignals()
|
|
40
40
|
self.isKilled = False
|
|
41
41
|
self.isStoreCompleted = False
|
|
@@ -48,7 +48,7 @@ class ParForWorker(QRunnable):
|
|
|
48
48
|
if Flag_DEBUG_PARPOOL: debugpy.debug_this_thread()
|
|
49
49
|
try:
|
|
50
50
|
#pr(f'ParForWorker.run self.isKilled={self.isKilled} self.indWorker={self.indWorker} self.indProc={self.indProc} ')
|
|
51
|
-
self.parForMul.numUsedCores=self.
|
|
51
|
+
self.parForMul.numUsedCores=self.numMaxProcs
|
|
52
52
|
while self.indWorker!=self.indProc:# and not self.isKilled:
|
|
53
53
|
timesleep(SleepTime_Workers)
|
|
54
54
|
if self.isKilled:
|
|
@@ -87,8 +87,8 @@ class ParForWorker(QRunnable):
|
|
|
87
87
|
self.isStoreCompleted=True
|
|
88
88
|
|
|
89
89
|
class MIN_ParFor_Worker(ParForWorker):
|
|
90
|
-
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,
|
|
91
|
-
super().__init__(data,indWorker,indProc,
|
|
90
|
+
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,numMaxProcs:int,pfPool:ParForPool,parForMul:ParForMul):
|
|
91
|
+
super().__init__(data,indWorker,indProc,numMaxProcs,pfPool,parForMul,nameWorker='calcMin_Worker',mainFun=self.calcmin)
|
|
92
92
|
|
|
93
93
|
def calcmin(self):
|
|
94
94
|
stringaErr=''
|
|
@@ -98,10 +98,10 @@ class MIN_ParFor_Worker(ParForWorker):
|
|
|
98
98
|
|
|
99
99
|
#pp=ParForMul()
|
|
100
100
|
#pp.sleepTime=ParFor_sleepTime #time between calls of callBack
|
|
101
|
-
#pp.numCoresParPool=
|
|
101
|
+
#pp.numCoresParPool=numMaxProcs
|
|
102
102
|
|
|
103
103
|
self.data.compMin.restoreMin()
|
|
104
|
-
args=(self.data,self.
|
|
104
|
+
args=(self.data,self.numMaxProcs)
|
|
105
105
|
kwargs={}
|
|
106
106
|
numCallBackTotOk=self.data.numFinalized #su quelli non finalized ci ripassiamo quindi inizialmente il num di callback ok = num di finalized
|
|
107
107
|
|
|
@@ -110,8 +110,10 @@ class MIN_ParFor_Worker(ParForWorker):
|
|
|
110
110
|
|
|
111
111
|
myCallBack=lambda a,b,c,d,e,f: callBackMin(a,b,c,d,e,f,self.signals.progress)
|
|
112
112
|
#for ii,f in enumerate(self.data.list_pim): pr(f'{ii}-{hex(f)} ',end='')
|
|
113
|
-
pri.Process.blue(f'Init calcmin Contab={self.data.compMin.contab} numCallBackTotOk={numCallBackTotOk}
|
|
113
|
+
pri.Process.blue(f'Init calcmin Contab={self.data.compMin.contab} numCallBackTotOk={numCallBackTotOk} numMaxProcs={self.numMaxProcs}')
|
|
114
114
|
self.signals.initialized.emit()
|
|
115
|
+
|
|
116
|
+
self.parForMul.numUsedCores=self.numMaxProcs
|
|
115
117
|
#TBD TA all the exceptions should be managed inside parForExtPool therefore the try should be useless just in case I check
|
|
116
118
|
try:
|
|
117
119
|
|
|
@@ -122,6 +124,7 @@ class MIN_ParFor_Worker(ParForWorker):
|
|
|
122
124
|
except Exception as e:
|
|
123
125
|
PrintTA().printEvidenced('Calcmin exception raised.\nThis should never happen.')
|
|
124
126
|
raise (e)
|
|
127
|
+
|
|
125
128
|
if flagError:
|
|
126
129
|
self.signals.finished.emit(self.data,printException('calcmin',flagMessage=True,exception=self.parForMul.exception))
|
|
127
130
|
return
|
|
@@ -172,13 +175,15 @@ class MIN_ParFor_Worker(ParForWorker):
|
|
|
172
175
|
self.signals.finished.emit(self.data,stringaErr)
|
|
173
176
|
|
|
174
177
|
class PIV_ParFor_Worker(ParForWorker):
|
|
175
|
-
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,
|
|
176
|
-
super().__init__(data,indWorker,indProc,
|
|
178
|
+
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,numMaxProcs:int,pfPool:ParForPool,parForMul:ParForMul):
|
|
179
|
+
super().__init__(data,indWorker,indProc,numMaxProcs,pfPool,parForMul,nameWorker='PIV_Worker',mainFun=self.runPIVParFor)
|
|
177
180
|
|
|
178
|
-
def runPIVParFor(self)
|
|
181
|
+
def runPIVParFor(self):#unified function for piv and Stereo
|
|
179
182
|
stringaErr=''
|
|
180
183
|
global FlagStopWorkers
|
|
181
|
-
|
|
184
|
+
|
|
185
|
+
outString='runPIVParFor' if self.data.Step==StepTypes.piv else 'runStereoPIVParFor'
|
|
186
|
+
pri.Time.cyan(3,outString)
|
|
182
187
|
FlagStopWorkers[0]=0
|
|
183
188
|
# TODEL
|
|
184
189
|
|
|
@@ -190,126 +195,52 @@ class PIV_ParFor_Worker(ParForWorker):
|
|
|
190
195
|
self.data.mediaPIV.restoreSum()
|
|
191
196
|
|
|
192
197
|
#args=(self.data,)
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
198
|
+
'''
|
|
199
|
+
if self.pfPool:#normal condition
|
|
200
|
+
#todo GP we should print the number of piv cores and processes in the log
|
|
201
|
+
numPivOmpCores,numUsedProcs=optimalPivCores(self.numMaxProcs,self.data.nimg,penCore=0.95)
|
|
202
|
+
self.data.numUsedProcs=numUsedProcs # self.data.numUsedProcs is used for output should be the number of parfor threads used
|
|
203
|
+
self.data.numPivOmpCores=numPivOmpCores
|
|
204
|
+
else:# serial par for used only for debugging the c library
|
|
205
|
+
self.data.numUsedProcs=numUsedProcs=1 # self.data.numUsedProcs is used for output should be the number of parfor threads used
|
|
206
|
+
self.data.numPivOmpCores=numPivOmpCores=self.numMaxProcs
|
|
207
|
+
self.data.numUsedProcs=numUsedProcs=1 # self.data.numUsedProcs is used for output should be the number of parfor threads used
|
|
208
|
+
self.data.numPivOmpCores=numPivOmpCores=2#self.numMaxProcs '''
|
|
209
|
+
numPivOmpCores=self.data.numPivOmpCores
|
|
210
|
+
numUsedProcs=self.data.numUsedProcs
|
|
211
|
+
args=(self.data,numUsedProcs)
|
|
212
|
+
#kwargs={'finalPIVPIppo': self.data.nimg}#unused just for example
|
|
203
213
|
kwargs={}
|
|
204
214
|
numCallBackTotOk=self.data.numFinalized #su quelli non finalized ci ripassiamo quindi inizialmente il num di callback ok = num di finalized
|
|
205
215
|
|
|
206
216
|
|
|
207
217
|
nImg=range(self.data.nimg)
|
|
208
218
|
myCallBack=lambda a,b,c,d,e,f: callBackPIV(a,b,c,d,e,f,self.signals.progress)
|
|
209
|
-
pri.Process.blue(f'
|
|
219
|
+
pri.Process.blue(f'{outString} mediaPIV cont={self.data.mediaPIV.cont} self.numCallBackTotOk={self.numCallBackTotOk} self.data.nimg={self.data.nimg} numProc={numUsedProcs} numPivProc={numPivOmpCores}')
|
|
210
220
|
|
|
211
221
|
self.signals.initialized.emit()
|
|
212
222
|
#TBD TA all the exceptions should be managed inside parForExtPool therefore the try should be useless just in case I check
|
|
213
|
-
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
if self.pfPool:
|
|
226
|
+
self.parForMul.numUsedCores=numUsedProcs
|
|
227
|
+
parPool=self.pfPool.parPool
|
|
228
|
+
parForFun= self.parForMul.parForExtPool
|
|
229
|
+
else:
|
|
230
|
+
parPool=None
|
|
231
|
+
parForFun= self.parForMul.simpleFor
|
|
214
232
|
try:
|
|
215
233
|
if self.data.Step==StepTypes.piv:
|
|
216
|
-
(me,flagOut,VarOut,flagError)=parForFun(
|
|
234
|
+
(me,flagOut,VarOut,flagError)=parForFun(parPool,procPIV,nImg,initTask=initPIV,finalTask=finalPIV, wrapUp=saveAndMean, callBack=myCallBack,*args,**kwargs)
|
|
217
235
|
elif self.data.Step==StepTypes.spiv:
|
|
218
|
-
(me,flagOut,VarOut,flagError)=parForFun(
|
|
236
|
+
(me,flagOut,VarOut,flagError)=parForFun(parPool,procStereoPIV,nImg,initTask=initStereoPIV,finalTask=finalStereoPIV, wrapUp=saveAndMean, callBack=myCallBack,*args,**kwargs)
|
|
219
237
|
|
|
220
238
|
except Exception as e:
|
|
221
|
-
PrintTA().printEvidenced('
|
|
239
|
+
PrintTA().printEvidenced(f'{outString} exception raised\nThis should never happen ')
|
|
222
240
|
raise (e)
|
|
223
|
-
if flagError:
|
|
224
|
-
self.signals.finished.emit(self.data,printException('calcmin',flagMessage=True,exception=self.parForMul.exception))
|
|
225
|
-
return
|
|
226
|
-
|
|
227
|
-
try:
|
|
228
|
-
if me.cont:
|
|
229
|
-
me:MediaPIV
|
|
230
|
-
me.calcMedia()
|
|
231
|
-
nameFields=me.namesPIV.avgVelFields
|
|
232
|
-
Var=[getattr(me,f) for f in nameFields ]#me.x,me.y,me.u,me.v,me.up,me.vp,me.uvp,me.sn,me.Info]
|
|
233
|
-
nameVar=me.namesPIV.avgVel
|
|
234
|
-
saveResults(self.data,-1,Var,nameVar)
|
|
235
|
-
except:
|
|
236
|
-
stringaErr+=printException('calcmin',flagMessage=True,exception=self.parForMul.exception)+'\n'
|
|
237
|
-
numCallBackTotOk+=sum(1 if x&FLAG_CALLBACK_INTERNAL else 0 for x in flagOut)
|
|
238
|
-
|
|
239
|
-
# Tbd
|
|
240
|
-
'''
|
|
241
|
-
if flagDebugMem:
|
|
242
|
-
pri.Time.cyan(0,'Save results')
|
|
243
|
-
pr(f"Number of garbage element not collected before {gc.get_count()}",end='')
|
|
244
|
-
gc.collect()
|
|
245
|
-
pr(f" after {gc.get_count()}")
|
|
246
|
-
pr(f"********************** End Fun Main -> {(memoryUsagePsutil()-m1)/ float(2 ** 20)}MByte")
|
|
247
|
-
pr(*gc.garbage)
|
|
248
|
-
'''
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
#initTime=time()
|
|
253
|
-
self.data.flagParForCompleted=True
|
|
254
|
-
while self.numCallBackTotOk!=numCallBackTotOk :
|
|
255
|
-
pri.Process.blue (f'Error runPIVParFor self.numCallBackTotOk={self.numCallBackTotOk} numCallBackTotOk={numCallBackTotOk} numProc={nProcOpt} numPivProc={nPivOpt}')
|
|
256
|
-
timesleep(SleepTime_Workers)
|
|
257
|
-
|
|
258
|
-
if me.cont:
|
|
259
|
-
pri.Time.cyan(f'u={me.u[5][4]} v={me.v[5][4]} up={me.up[5][4]} vp={me.vp[5][4]} uvp={me.uvp[5][4]} sn={me.sn[5][4]} Info={me.Info[5][4]}')
|
|
260
|
-
|
|
261
|
-
#self.numFinalized=sum(1 if f&FLAG_FINALIZED[0] else 0 for f in flagOut)
|
|
262
|
-
numProcOrErrTot=sum(1 if f else 0 for f in flagOut)
|
|
263
|
-
|
|
264
|
-
#for ii,f in enumerate(flagOut): pr(f'{ii}-{hex(f)} ',end='')
|
|
265
|
-
pri.Process.blue (f'Fine runPIVParFor ************** numCallBackTotOk={numCallBackTotOk} numProcOrErrTot={numProcOrErrTot} numFinalized={self.data.numFinalized}')
|
|
266
|
-
|
|
267
|
-
self.data.mediaPIV=me
|
|
268
|
-
self.data.FlagFinished=self.data.nimg==numProcOrErrTot
|
|
269
|
-
self.signals.finished.emit(self.data,stringaErr)
|
|
270
|
-
|
|
271
|
-
class StereoPIV_ParFor_Worker(ParForWorker):
|
|
272
|
-
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,numUsedThreadsPIV:int,pfPool:ParForPool,parForMul:ParForMul):
|
|
273
|
-
super().__init__(data,indWorker,indProc,numUsedThreadsPIV,pfPool,parForMul,nameWorker='SPIV_Worker',mainFun=self.runStereoPIVParFor)
|
|
274
|
-
|
|
275
|
-
def runStereoPIVParFor(self):
|
|
276
|
-
stringaErr=''
|
|
277
|
-
global FlagStopWorkers
|
|
278
|
-
pri.Time.cyan(3,'runStereoPIVParFor')
|
|
279
|
-
FlagStopWorkers[0]=0
|
|
280
|
-
# TODEL
|
|
281
|
-
|
|
282
|
-
flagDebugMem=False
|
|
283
|
-
if flagDebugMem:# TODEL?
|
|
284
|
-
m1=memoryUsagePsutil()
|
|
285
241
|
|
|
286
|
-
filename_preproc=self.data.filename_proc[StepTypes.min]# todo serve?
|
|
287
|
-
|
|
288
|
-
self.data.mediaPIV.restoreSum()#todo modificare in mediaStereoPIV
|
|
289
|
-
|
|
290
|
-
#args=(self.data,)
|
|
291
|
-
args=(self.data,self.numUsedThreadsPIV)
|
|
292
|
-
#kwargs={'finalPIVPIppo': self.data.nimg}#unused just for example
|
|
293
|
-
kwargs={}
|
|
294
|
-
numCallBackTotOk=self.data.numFinalized #su quelli non finalized ci ripassiamo quindi inizialmente il num di callback ok = num di finalized
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
nImg=range(self.data.nimg)
|
|
298
|
-
myCallBack=lambda a,b,c,d,e,f: callBackPIV(a,b,c,d,e,f,self.signals.progress)
|
|
299
|
-
pri.Process.blue(f'runStereoPIVParFor mediaPIV cont={self.data.mediaPIV.cont} self.numCallBackTotOk={self.numCallBackTotOk} self.data.nimg={self.data.nimg}')
|
|
300
|
-
|
|
301
|
-
self.signals.initialized.emit()
|
|
302
|
-
#TBD TA all the exceptions should be managed inside parForExtPool therefore the try should be useless just in case I check
|
|
303
|
-
try:
|
|
304
|
-
if self.pfPool:
|
|
305
|
-
(me,flagOut,VarOut,flagError)=self.parForMul.parForExtPool(self.pfPool.parPool,procStereoPIV,nImg,initTask=initStereoPIV,finalTask=finalStereoPIV, wrapUp=saveAndMean, callBack=myCallBack,*args,**kwargs)
|
|
306
|
-
else:
|
|
307
|
-
(me,flagOut,VarOut,flagError)=self.parForMul.simpleFor(procStereoPIV,nImg,initTask=initStereoPIV,finalTask=finalStereoPIV, wrapUp=saveAndMean, callBack=myCallBack,*args,**kwargs)
|
|
308
|
-
except Exception as e:
|
|
309
|
-
PrintTA().printEvidenced('runStereoPIVParFor exception raised\nThis should never happen ')
|
|
310
|
-
raise (e)
|
|
311
242
|
if flagError:
|
|
312
|
-
self.signals.finished.emit(self.data,printException(
|
|
243
|
+
self.signals.finished.emit(self.data,printException(outString,flagMessage=True,exception=self.parForMul.exception))
|
|
313
244
|
return
|
|
314
245
|
|
|
315
246
|
try:
|
|
@@ -321,7 +252,7 @@ class StereoPIV_ParFor_Worker(ParForWorker):
|
|
|
321
252
|
nameVar=me.namesPIV.avgVel
|
|
322
253
|
saveResults(self.data,-1,Var,nameVar)
|
|
323
254
|
except:
|
|
324
|
-
stringaErr+=printException(
|
|
255
|
+
stringaErr+=printException(outString,flagMessage=True,exception=self.parForMul.exception)+'\n'
|
|
325
256
|
numCallBackTotOk+=sum(1 if x&FLAG_CALLBACK_INTERNAL else 0 for x in flagOut)
|
|
326
257
|
|
|
327
258
|
# Tbd
|
|
@@ -340,17 +271,17 @@ class StereoPIV_ParFor_Worker(ParForWorker):
|
|
|
340
271
|
#initTime=time()
|
|
341
272
|
self.data.flagParForCompleted=True
|
|
342
273
|
while self.numCallBackTotOk!=numCallBackTotOk :
|
|
343
|
-
pri.Process.blue (f'Error
|
|
274
|
+
pri.Process.blue (f'Error {outString} self.numCallBackTotOk={self.numCallBackTotOk} numCallBackTotOk={numCallBackTotOk} numUsedProcs={numUsedProcs} numPivOmpCores={numPivOmpCores}')
|
|
344
275
|
timesleep(SleepTime_Workers)
|
|
345
276
|
|
|
346
|
-
if me.cont:
|
|
347
|
-
|
|
277
|
+
#if me.cont:
|
|
278
|
+
# pri.Time.cyan(f'u={me.u[5][4]} v={me.v[5][4]} up={me.up[5][4]} vp={me.vp[5][4]} uvp={me.uvp[5][4]} sn={me.sn[5][4]} Info={me.Info[5][4]}')
|
|
348
279
|
|
|
349
280
|
#self.numFinalized=sum(1 if f&FLAG_FINALIZED[0] else 0 for f in flagOut)
|
|
350
281
|
numProcOrErrTot=sum(1 if f else 0 for f in flagOut)
|
|
351
282
|
|
|
352
283
|
#for ii,f in enumerate(flagOut): pr(f'{ii}-{hex(f)} ',end='')
|
|
353
|
-
pri.Process.blue (f'Fine
|
|
284
|
+
pri.Process.blue (f'Fine {outString} ************** numCallBackTotOk={numCallBackTotOk} numProcOrErrTot={numProcOrErrTot} numFinalized={self.data.numFinalized}')
|
|
354
285
|
|
|
355
286
|
self.data.mediaPIV=me
|
|
356
287
|
self.data.FlagFinished=self.data.nimg==numProcOrErrTot
|
|
@@ -358,8 +289,8 @@ class StereoPIV_ParFor_Worker(ParForWorker):
|
|
|
358
289
|
|
|
359
290
|
|
|
360
291
|
class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
361
|
-
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,
|
|
362
|
-
super().__init__(data,indWorker,indProc,
|
|
292
|
+
def __init__(self,data:dataTreePar,indWorker:int,indProc:int,numMaxProcs:int,pfPool:ParForPool,parForMul:ParForMul):
|
|
293
|
+
super().__init__(data,indWorker,indProc,numMaxProcs,pfPool,parForMul,nameWorker='Disp_Worker',mainFun=self.runDisparity)
|
|
363
294
|
|
|
364
295
|
def runDisparity(self):
|
|
365
296
|
''' main proc function called for all the images one time per processor
|
|
@@ -384,6 +315,11 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
384
315
|
self.disp=data2Disp(data)
|
|
385
316
|
spivIn=self.disp.SPIVIn
|
|
386
317
|
dP=self.disp.dataProc
|
|
318
|
+
dAC=self.disp.dispAvCo
|
|
319
|
+
ve=self.disp.vect
|
|
320
|
+
ve.PianoLaser[0]=np.float32(data.OUT_dict['zconst'])
|
|
321
|
+
ve.PianoLaser[1]=np.float32(data.OUT_dict['xterm'])
|
|
322
|
+
ve.PianoLaser[2]=np.float32(data.OUT_dict['yterm'])
|
|
387
323
|
|
|
388
324
|
stringaErr=''
|
|
389
325
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
@@ -398,6 +334,7 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
398
334
|
else:
|
|
399
335
|
FlagInitError=False
|
|
400
336
|
|
|
337
|
+
"""
|
|
401
338
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
402
339
|
if not FlagInitError:
|
|
403
340
|
self.imgs=[]
|
|
@@ -430,10 +367,28 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
430
367
|
else:
|
|
431
368
|
self.imgs.append(ic)
|
|
432
369
|
#self.imgs.append(np.ascontiguousarray(da[spivIn.RigaPart:spivIn.RigaPart+dP.ImgH,spivIn.ColPart:spivIn.ColPart+dP.ImgW],dtype= np.uint16))
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
imList=data.list_Image_Files
|
|
373
|
+
indim=np.ix_(np.arange(spivIn.RigaPart,spivIn.RigaPart+dP.ImgH),np.arange(spivIn.ColPart,spivIn.ColPart+dP.ImgW))
|
|
374
|
+
if data.FlagMIN:
|
|
375
|
+
data.compMin.Imin=[[np.zeros(1) for _ in range(data.nframe)] for _ in range(data.ncam)]
|
|
376
|
+
for c,Iminc in enumerate(data.Imin):
|
|
377
|
+
for k,filename in enumerate(Iminc):
|
|
378
|
+
try:
|
|
379
|
+
data.compMin.Imin[c][k]=np.ascontiguousarray(np.array(Image.open(filename),dtype=float)[indim],dtype= np.uint16)
|
|
380
|
+
except Exception as inst:
|
|
381
|
+
FlagInitError=True
|
|
382
|
+
stringaErr=f'\n!!!!!!!!!! Error while reading background image {filename}:\n{inst}\n'
|
|
383
|
+
pri.Error.red(stringaErr)
|
|
384
|
+
flagOut|=FLAG_READ_ERR[kConst]
|
|
385
|
+
break
|
|
386
|
+
|
|
433
387
|
if not FlagInitError:
|
|
434
388
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
435
389
|
try:
|
|
436
390
|
self.disp.initAllocDisp()
|
|
391
|
+
self.disp.dispAvCo.NImg=2*len(imList[0][0]) if spivIn.FlagImgTau==0 else len(imList[0][0])
|
|
437
392
|
except Exception as inst: #ValueError as exc:
|
|
438
393
|
FlagInitError=True
|
|
439
394
|
stringaErr=f'\n!!!!!!!!!! Error during disparity process initialization:\n{inst}\n'
|
|
@@ -441,10 +396,8 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
441
396
|
flagOut|=FLAG_READ_ERR[kConst]
|
|
442
397
|
|
|
443
398
|
#print(f'Esempio risoluzione utilizzata {self.disp.dataProc.RisxRadd} {1/self.disp.dataProc.RisxRadd}')
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
sleepTimeWorkers=0.2 #for multithreading and other stuff
|
|
447
|
-
if FlagInitError: flagOut|=FLAG_FINALIZED[kConst]|FLAG_CALLBACK_INTERNAL
|
|
399
|
+
#sleepTimeWorkers=0.2 #for multithreading and other stuff
|
|
400
|
+
#if FlagInitError: flagOut|=FLAG_FINALIZED[kConst]|FLAG_CALLBACK_INTERNAL
|
|
448
401
|
flagOutIter=FLAG_READ[kConst]
|
|
449
402
|
for it in range(spivIn.Niter):
|
|
450
403
|
if FlagInitError:
|
|
@@ -453,7 +406,7 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
453
406
|
else: stampa=''
|
|
454
407
|
data.list_print[it]=stampa
|
|
455
408
|
data.list_pim[it]=flagOut
|
|
456
|
-
self.signals.progress.emit(procID,
|
|
409
|
+
self.signals.progress.emit(procID,-1,flagOut,Var,stringaErr)
|
|
457
410
|
continue
|
|
458
411
|
if data.list_pim[it]&FLAG_FINALIZED[kConst]:
|
|
459
412
|
flagOut=FLAG_READ[kConst]|FLAG_PROC[kConst]|FLAG_FINALIZED[kConst] # It has been already processed. Exit without calling the callback core
|
|
@@ -467,13 +420,46 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
467
420
|
ind=it
|
|
468
421
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
469
422
|
try:
|
|
470
|
-
|
|
423
|
+
errorType='evaluating viewing angles'
|
|
471
424
|
self.disp.evaldXdY()
|
|
472
425
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
473
|
-
for
|
|
474
|
-
self.
|
|
426
|
+
for p in range(len(imList[0][0])):
|
|
427
|
+
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
428
|
+
ic=[]
|
|
429
|
+
#print(f'reading {p}')
|
|
430
|
+
for cam in range(len(imList[0])):
|
|
431
|
+
da=db=None
|
|
432
|
+
if spivIn.FlagImgTau in (0,1):
|
|
433
|
+
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
434
|
+
nomeImg=data.inpPath+imList[cam][0][p]
|
|
435
|
+
errorType=f'reading the image {nomeImg}'
|
|
436
|
+
da=np.ascontiguousarray(np.array(Image.open(nomeImg),dtype=float)[indim],dtype= np.uint16)
|
|
437
|
+
if data.FlagMIN:
|
|
438
|
+
if data.FlagTR and p%2:
|
|
439
|
+
da=da-data.compMin.Imin[cam][1]
|
|
440
|
+
else:
|
|
441
|
+
da=da-data.compMin.Imin[cam][0]
|
|
442
|
+
da=np.ascontiguousarray(da,dtype= np.uint16)
|
|
443
|
+
if spivIn.FlagImgTau in (0,2):
|
|
444
|
+
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
445
|
+
nomeImg=data.inpPath+imList[cam][1][p]
|
|
446
|
+
errorType=f'reading the image {nomeImg}'
|
|
447
|
+
db=np.ascontiguousarray(np.array(Image.open(nomeImg),dtype=float)[indim],dtype= np.uint16)
|
|
448
|
+
if data.FlagMIN:
|
|
449
|
+
if data.FlagTR and p%2:
|
|
450
|
+
db=db-data.compMin.Imin[cam][0]
|
|
451
|
+
else:
|
|
452
|
+
db=db-data.compMin.Imin[cam][1]
|
|
453
|
+
db=np.ascontiguousarray(db,dtype= np.uint16)
|
|
454
|
+
|
|
455
|
+
ic.append([da,db])
|
|
456
|
+
|
|
457
|
+
errorType=f'dewarping and computing disparity ({imList[0][0][p]},...,{imList[1][1][p]})'
|
|
458
|
+
self.disp.deWarpAndCalcCC(ic)
|
|
475
459
|
#while self.disp.flagWorking==2:# and not self.isKilled: sleep (sleepTimeWorkers)
|
|
476
460
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
461
|
+
|
|
462
|
+
errorType=f'computing the laser plane constants'
|
|
477
463
|
self.disp.calcDisparity()
|
|
478
464
|
if self.isKilled: return self.stopDisparity(it,flagOut,flagOutIter)
|
|
479
465
|
#while self.disp.flagWorking==2:# and not self.isKilled: sleep (sleepTimeWorkers)
|
|
@@ -482,13 +468,16 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
482
468
|
#dum=str(inst.__cause__).split('\n')[3] #solved
|
|
483
469
|
dum=str(inst.args[0])
|
|
484
470
|
#varOut[1]+=f"\n!!!!!!!!!! Error while processing the above image pair: {dum}"
|
|
485
|
-
errorPrint=f"!!!!!!!!!! Error while
|
|
471
|
+
errorPrint=f"!!!!!!!!!! Error while {errorType}:\n{dum}\n"
|
|
486
472
|
stampa+=errorPrint
|
|
487
473
|
stringaErr+=stampa
|
|
488
474
|
pri.Error.red(stringaErr)
|
|
489
475
|
else:
|
|
490
476
|
flagOut|=FLAG_PROC[kConst]# per completezza aggiungo anche processato
|
|
491
|
-
stampa+=f" Laser plane eq. :
|
|
477
|
+
stampa+=f" Laser plane eq. : z (mm) = {ve.PianoLaser[0]:.4g} + {ve.PianoLaser[1]:.4g} * x + {ve.PianoLaser[2]:.4g} * y\n Residual calib. err. ort. = {dAC.dOrtMean:.4g} pixels\n Residual calib. err. par. = {dAC.dParMean:.4g} pixels\n Estimated laser thick. = {dAC.DeltaZ:.4g} pixels (approx. {dAC.DeltaZ * dP.RisxRadd / abs(dAC.ta0Mean - dAC.ta1Mean):.4g} mm)\n Outliers % = {dAC.percOutlier*100:g} \n"
|
|
478
|
+
data.OUT_dict['zconst']=float(ve.PianoLaser[0])
|
|
479
|
+
data.OUT_dict['xterm']=float(ve.PianoLaser[1])
|
|
480
|
+
data.OUT_dict['yterm']=float(ve.PianoLaser[2])
|
|
492
481
|
try:
|
|
493
482
|
nameVar=data.namesPIV.instVel
|
|
494
483
|
Var=[getattr(self.disp.vect,f) for f in nameVar ]
|
|
@@ -496,6 +485,10 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
496
485
|
# saveResults(data,-1,Var,nameVar)
|
|
497
486
|
#else:
|
|
498
487
|
saveResults(data,f'it{it+1}',Var,nameVar)
|
|
488
|
+
CC_rot=self.create_CC_image(self.disp.vect.CCrot)
|
|
489
|
+
nameFileOut=data.resF(f'it{it+1}',string='dispMap')
|
|
490
|
+
self.save_CC_image(CC_rot,nameFileOut)
|
|
491
|
+
pass
|
|
499
492
|
except Exception as inst:
|
|
500
493
|
errorPrint=f"\n!!!!!!!!!! Error while saving the results:\n{str(inst)}\n"
|
|
501
494
|
stampa+=errorPrint
|
|
@@ -513,15 +506,15 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
513
506
|
FlagFinalized=False
|
|
514
507
|
else:
|
|
515
508
|
data.res=1./self.disp.dataProc.RisxRadd
|
|
516
|
-
data.laserConst=[const for const in ve.PianoLaser]
|
|
509
|
+
data.laserConst=[float(const) for const in ve.PianoLaser]
|
|
517
510
|
if FlagFinalized:
|
|
518
511
|
flagOut|=FLAG_FINALIZED[kConst]|FLAG_CALLBACK_INTERNAL
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
512
|
+
|
|
513
|
+
data.list_print[it]=stampa
|
|
514
|
+
data.list_pim[it]=flagOut
|
|
515
|
+
self.signals.progress.emit(procID,ind,flagOut,Var,stampa)
|
|
516
|
+
if stringaErr: break
|
|
517
|
+
timesleep(3)
|
|
525
518
|
|
|
526
519
|
for j in range(it+1,spivIn.Niter):
|
|
527
520
|
data.list_print[j]=''
|
|
@@ -556,4 +549,45 @@ class StereoDisparity_ParFor_Worker(ParForWorker):
|
|
|
556
549
|
data.list_print[j]=''
|
|
557
550
|
data.list_pim[j]=flagOut if j==it else flagOutIter
|
|
558
551
|
self.signals.progress.emit(procID,ind,flagOutIter,[],'')
|
|
559
|
-
self.signals.finished.emit(data,'
|
|
552
|
+
self.signals.finished.emit(data,'')
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def create_CC_image(self,CC):
|
|
556
|
+
# Get the dimensions of the input matrix
|
|
557
|
+
n, m, h, w = CC.shape
|
|
558
|
+
|
|
559
|
+
# Normalize the matrix values to the range [0, 65535]
|
|
560
|
+
CC_min = CC.min()
|
|
561
|
+
CC_max = CC.max()
|
|
562
|
+
CC_mean = 0.5*(CC_min+CC_max)
|
|
563
|
+
CC_mean_normalized = (CC_mean + 1.0)/2.0 * 65535
|
|
564
|
+
|
|
565
|
+
CC_normalized = (CC + 1.0)/2.0 * 65535
|
|
566
|
+
CC_normalized = CC_normalized.astype(np.uint16) # Convert to 16-bit integers
|
|
567
|
+
|
|
568
|
+
# Initialize the final image grid with the correct size
|
|
569
|
+
h=2*self.disp.dispAvCo.N_NormEpi+1
|
|
570
|
+
grid_height = h * n
|
|
571
|
+
grid_width = w * m
|
|
572
|
+
CC_rot = np.zeros((grid_height, grid_width), dtype=np.uint16)
|
|
573
|
+
|
|
574
|
+
# Populate the grid with submatrices
|
|
575
|
+
try:
|
|
576
|
+
for i in range(n):
|
|
577
|
+
for j in range(m):
|
|
578
|
+
# Copy the submatrix (w x h) into the appropriate position in the grid
|
|
579
|
+
CC_rot[i*h:(i+1)*h, j*w:(j+1)*w] = CC_normalized[i, j, :h, :]
|
|
580
|
+
CC_rot[i*h, j*w:(j+1)*w] = CC_mean_normalized
|
|
581
|
+
CC_rot[i*h:(i+1)*h, j*w] = CC_mean_normalized
|
|
582
|
+
CC_rot[(i+1)*h-1, j*w:(j+1)*w] = CC_mean_normalized
|
|
583
|
+
CC_rot[i*h:(i+1)*h, (j+1)*w-1] = CC_mean_normalized
|
|
584
|
+
except:
|
|
585
|
+
pri.Error.red(f'Error while generating disparity map image:\n{traceback.format_exc()}\n\n')
|
|
586
|
+
pass
|
|
587
|
+
|
|
588
|
+
return CC_rot
|
|
589
|
+
|
|
590
|
+
def save_CC_image(self,CC,file_path):
|
|
591
|
+
image = Image.fromarray(CC)
|
|
592
|
+
image.save(file_path)
|
|
593
|
+
return
|
PaIRS_UniNa/pivParFor.py
CHANGED
|
@@ -38,16 +38,14 @@ def initPIV(eventFerma,iImg,procId,data:dataTreePar,*args,**kwargs):
|
|
|
38
38
|
#PIV.SetVect([v.astype(np.intc) for v in data.PRO.Vect])
|
|
39
39
|
if data.FlagMIN:
|
|
40
40
|
data.compMin.Imin=[np.zeros(1),np.zeros(1)]
|
|
41
|
-
filename
|
|
42
|
-
if filename:
|
|
41
|
+
for k,filename in enumerate(data.Imin[0]):
|
|
43
42
|
try:
|
|
44
|
-
|
|
45
|
-
data_min:dataTreePar = pickle.load(file)
|
|
46
|
-
data.copyfromfields(data_min,['compMin'])
|
|
43
|
+
data.compMin.Imin[k]=np.ascontiguousarray(Image.open(filename))
|
|
47
44
|
except Exception as inst:
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
45
|
+
errorMessage=f'Error while opening historical minimum background image file: {filename}. PIV process will not be executed!'
|
|
46
|
+
pri.Error.red(errorMessage+f"\n\nError details:\n{inst}\n")
|
|
47
|
+
raise (errorMessage)
|
|
48
|
+
#raise (inst)
|
|
51
49
|
|
|
52
50
|
|
|
53
51
|
PIV.Media=MediaPIV()#data.mediaPIV
|
|
@@ -59,7 +57,7 @@ def exitNoLog(flagOut,varOut):
|
|
|
59
57
|
''' exit without printing the log '''
|
|
60
58
|
varOut[0]=-1
|
|
61
59
|
return (flagOut,varOut)
|
|
62
|
-
def procPIV(i,procId ,PIV,data:dataTreePar,
|
|
60
|
+
def procPIV(i,procId ,PIV,data:dataTreePar,numUsedProcs,*args,**kwargs):
|
|
63
61
|
''' main proc function called for all the images one time per processor
|
|
64
62
|
k=0 always
|
|
65
63
|
In output flagOut and varOut[0] can be:
|
|
@@ -80,7 +78,7 @@ def procPIV(i,procId ,PIV,data:dataTreePar,numUsedThreadsPIV,*args,**kwargs):
|
|
|
80
78
|
where FLAG_FINALIZED_OR_ERR = [ p|e for (p,e) in zip(FLAG_FINALIZED,FLAG_READ_ERR)]
|
|
81
79
|
numProcOrErrTot=sum(1 if (f&FLAG_FINALIZED_OR_ERR[0])or(not f&FLAG_PROC[0]) else 0 for f in flagOut)
|
|
82
80
|
to delete images
|
|
83
|
-
pa='C
|
|
81
|
+
pa='C:\\desk\\dl\\apairs\\jetcross\\'
|
|
84
82
|
no='zR2jet0_0004a'
|
|
85
83
|
I =imread([pa no '.png']);
|
|
86
84
|
I=I*0+1;
|
|
@@ -182,7 +180,7 @@ def procPIV(i,procId ,PIV,data:dataTreePar,numUsedThreadsPIV,*args,**kwargs):
|
|
|
182
180
|
|
|
183
181
|
# to del ma il resto dove va
|
|
184
182
|
#varOut=[i,stampa,[]] if flagOut&FLAG_FINALIZING_PROC_OK[0] else [-1,stampa,[]]
|
|
185
|
-
if (not procId%
|
|
183
|
+
if (not procId%numUsedProcs) and flagOut&FLAG_FINALIZED[0]: # copiare l'img nella coda è un operazione onerosa. TA ha deciso che si copia solo quando serve
|
|
186
184
|
#prLock(f'procMIN Main proc i={i}')
|
|
187
185
|
varOut[2]=campoVel#VarOut=[i,stampa,Var]
|
|
188
186
|
|
PaIRS_UniNa/preProcParFor.py
CHANGED
|
@@ -19,7 +19,7 @@ def initMIN(eventFerma,iImg,procId,data:dataTreePar,*args,**kwargs):
|
|
|
19
19
|
#if procId==2: prTimeLock(f"fine initMIN procId={procId} ")
|
|
20
20
|
return (flagOut,VarOut,compMin)
|
|
21
21
|
|
|
22
|
-
def procMIN(i,procId,compMin:CompMin,data:dataTreePar,
|
|
22
|
+
def procMIN(i,procId,compMin:CompMin,data:dataTreePar,numMaxProcs,*args,**kwargs):
|
|
23
23
|
''' main proc function called for all the images one time per processor
|
|
24
24
|
k=0 or 1 for the first and second image
|
|
25
25
|
In output flagOut and varOut[0] can be:
|
|
@@ -98,15 +98,15 @@ def procMIN(i,procId,compMin:CompMin,data:dataTreePar,numUsedThreadsPIV,*args,**
|
|
|
98
98
|
varOut[1]+=printException(flagMessage=True)
|
|
99
99
|
|
|
100
100
|
|
|
101
|
-
if not procId%
|
|
101
|
+
if not procId%numMaxProcs and flagOut&FLAG_PROC_AB: # copying in the queue is time consuming. This is done only when needed
|
|
102
102
|
varOut[2]=compMin.Imin#VarOut=[i,stampa,Var]
|
|
103
103
|
return (flagOut,varOut)
|
|
104
104
|
|
|
105
|
-
def finalMIN( procId,compMin:CompMin,data:dataTreePar,
|
|
105
|
+
def finalMIN( procId,compMin:CompMin,data:dataTreePar,numMaxProcs,*args,**kwargs):
|
|
106
106
|
#prLock(f'finalMIN procId={procId} {data.compMin.contab}')
|
|
107
107
|
return compMin
|
|
108
108
|
|
|
109
|
-
def saveAndMin(procId,flagHasWorked,compMin:CompMin,data:dataTreePar,
|
|
109
|
+
def saveAndMin(procId,flagHasWorked,compMin:CompMin,data:dataTreePar,numMaxProcs,*args,**kwargs):
|
|
110
110
|
''' saveAndMean is the wrapUp function called once per processor '''
|
|
111
111
|
#prTimeLock(f'saveAndMin procId={procId} {data.compMin.contab}')
|
|
112
112
|
if flagHasWorked:
|