acoular 23.11__py3-none-any.whl → 24.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acoular/__init__.py +118 -50
- acoular/calib.py +29 -38
- acoular/configuration.py +116 -73
- acoular/demo/__init__.py +10 -4
- acoular/demo/acoular_demo.py +78 -53
- acoular/environments.py +265 -262
- acoular/fastFuncs.py +361 -191
- acoular/fbeamform.py +1478 -1407
- acoular/grids.py +501 -545
- acoular/h5cache.py +50 -59
- acoular/h5files.py +154 -137
- acoular/internal.py +10 -11
- acoular/microphones.py +57 -53
- acoular/sdinput.py +47 -52
- acoular/signals.py +167 -179
- acoular/sources.py +818 -693
- acoular/spectra.py +349 -359
- acoular/tbeamform.py +414 -413
- acoular/tfastfuncs.py +178 -101
- acoular/tools/__init__.py +25 -0
- acoular/tools/aiaa.py +186 -0
- acoular/tools/helpers.py +189 -0
- acoular/tools/metrics.py +165 -0
- acoular/tprocess.py +1201 -1143
- acoular/traitsviews.py +513 -501
- acoular/trajectory.py +50 -52
- acoular/version.py +5 -6
- acoular/xml/minidsp_uma-16.xml +20 -0
- acoular/xml/{minidsp_uma16.xml → minidsp_uma-16_mirrored.xml} +3 -0
- {acoular-23.11.dist-info → acoular-24.5.dist-info}/METADATA +47 -40
- acoular-24.5.dist-info/RECORD +50 -0
- {acoular-23.11.dist-info → acoular-24.5.dist-info}/WHEEL +1 -1
- acoular-24.5.dist-info/licenses/LICENSE +28 -0
- acoular/fileimport.py +0 -380
- acoular/nidaqimport.py +0 -273
- acoular/tests/reference_data/BeamformerBase.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseFalse1.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseFalse2.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseFalse3.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseFalse4.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseTrue1.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseTrue2.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseTrue3.npy +0 -0
- acoular/tests/reference_data/BeamformerBaseTrue4.npy +0 -0
- acoular/tests/reference_data/BeamformerCMF.npy +0 -0
- acoular/tests/reference_data/BeamformerCapon.npy +0 -0
- acoular/tests/reference_data/BeamformerClean.npy +0 -0
- acoular/tests/reference_data/BeamformerCleansc.npy +0 -0
- acoular/tests/reference_data/BeamformerCleant.npy +0 -0
- acoular/tests/reference_data/BeamformerCleantSq.npy +0 -0
- acoular/tests/reference_data/BeamformerCleantSqTraj.npy +0 -0
- acoular/tests/reference_data/BeamformerCleantTraj.npy +0 -0
- acoular/tests/reference_data/BeamformerDamas.npy +0 -0
- acoular/tests/reference_data/BeamformerDamasPlus.npy +0 -0
- acoular/tests/reference_data/BeamformerEig.npy +0 -0
- acoular/tests/reference_data/BeamformerEigFalse1.npy +0 -0
- acoular/tests/reference_data/BeamformerEigFalse2.npy +0 -0
- acoular/tests/reference_data/BeamformerEigFalse3.npy +0 -0
- acoular/tests/reference_data/BeamformerEigFalse4.npy +0 -0
- acoular/tests/reference_data/BeamformerEigTrue1.npy +0 -0
- acoular/tests/reference_data/BeamformerEigTrue2.npy +0 -0
- acoular/tests/reference_data/BeamformerEigTrue3.npy +0 -0
- acoular/tests/reference_data/BeamformerEigTrue4.npy +0 -0
- acoular/tests/reference_data/BeamformerFunctional.npy +0 -0
- acoular/tests/reference_data/BeamformerGIB.npy +0 -0
- acoular/tests/reference_data/BeamformerGridlessOrth.npy +0 -0
- acoular/tests/reference_data/BeamformerMusic.npy +0 -0
- acoular/tests/reference_data/BeamformerOrth.npy +0 -0
- acoular/tests/reference_data/BeamformerTime.npy +0 -0
- acoular/tests/reference_data/BeamformerTimeSq.npy +0 -0
- acoular/tests/reference_data/BeamformerTimeSqTraj.npy +0 -0
- acoular/tests/reference_data/BeamformerTimeTraj.npy +0 -0
- acoular/tests/reference_data/Environment.npy +0 -0
- acoular/tests/reference_data/Example1_numerical_values_testsum.h5 +0 -0
- acoular/tests/reference_data/FiltFiltOctave__.npy +0 -0
- acoular/tests/reference_data/FiltFiltOctave_band_100_0_fraction_Thirdoctave_.npy +0 -0
- acoular/tests/reference_data/FiltFreqWeight_weight_A_.npy +0 -0
- acoular/tests/reference_data/FiltFreqWeight_weight_C_.npy +0 -0
- acoular/tests/reference_data/FiltFreqWeight_weight_Z_.npy +0 -0
- acoular/tests/reference_data/FiltOctave__.npy +0 -0
- acoular/tests/reference_data/FiltOctave_band_100_0_fraction_Thirdoctave_.npy +0 -0
- acoular/tests/reference_data/Filter__.npy +0 -0
- acoular/tests/reference_data/GeneralFlowEnvironment.npy +0 -0
- acoular/tests/reference_data/OctaveFilterBank__.npy +0 -0
- acoular/tests/reference_data/OpenJet.npy +0 -0
- acoular/tests/reference_data/PointSource.npy +0 -0
- acoular/tests/reference_data/PowerSpectra_csm.npy +0 -0
- acoular/tests/reference_data/PowerSpectra_ev.npy +0 -0
- acoular/tests/reference_data/RotatingFlow.npy +0 -0
- acoular/tests/reference_data/SlotJet.npy +0 -0
- acoular/tests/reference_data/TimeAverage__.npy +0 -0
- acoular/tests/reference_data/TimeCumAverage__.npy +0 -0
- acoular/tests/reference_data/TimeExpAverage_weight_F_.npy +0 -0
- acoular/tests/reference_data/TimeExpAverage_weight_I_.npy +0 -0
- acoular/tests/reference_data/TimeExpAverage_weight_S_.npy +0 -0
- acoular/tests/reference_data/TimeInOut__.npy +0 -0
- acoular/tests/reference_data/TimePower__.npy +0 -0
- acoular/tests/reference_data/TimeReverse__.npy +0 -0
- acoular/tests/reference_data/UniformFlowEnvironment.npy +0 -0
- acoular/tests/reference_data/beamformer_traj_time_data.h5 +0 -0
- acoular/tests/run_tests.sh +0 -18
- acoular/tests/run_tests_osx.sh +0 -16
- acoular/tests/test.npy +0 -0
- acoular/tests/test_beamformer_results.py +0 -204
- acoular/tests/test_classes.py +0 -60
- acoular/tests/test_digest.py +0 -125
- acoular/tests/test_environments.py +0 -73
- acoular/tests/test_example1.py +0 -124
- acoular/tests/test_grid.py +0 -92
- acoular/tests/test_integrate.py +0 -102
- acoular/tests/test_signals.py +0 -60
- acoular/tests/test_sources.py +0 -65
- acoular/tests/test_spectra.py +0 -38
- acoular/tests/test_timecache.py +0 -35
- acoular/tests/test_tprocess.py +0 -90
- acoular/tests/test_traj_beamformer_results.py +0 -164
- acoular/tests/unsupported/SpeedComparison/OvernightTestcasesBeamformer_nMics32_nGridPoints100_nFreqs4_nTrials10.png +0 -0
- acoular/tests/unsupported/SpeedComparison/cythonBeamformer.pyx +0 -237
- acoular/tests/unsupported/SpeedComparison/mainForCython.py +0 -103
- acoular/tests/unsupported/SpeedComparison/mainForParallelJit.py +0 -143
- acoular/tests/unsupported/SpeedComparison/setupCythonOpenMP.py +0 -63
- acoular/tests/unsupported/SpeedComparison/sharedFunctions.py +0 -153
- acoular/tests/unsupported/SpeedComparison/timeOverNMics_AllImportantMethods.png +0 -0
- acoular/tests/unsupported/SpeedComparison/timeOverNMics_faverage.png +0 -0
- acoular/tests/unsupported/SpeedComparison/vglOptimierungFAverage.py +0 -204
- acoular/tests/unsupported/SpeedComparison/vglOptimierungGaussSeidel.py +0 -182
- acoular/tests/unsupported/SpeedComparison/vglOptimierungR_BEAMFULL_INVERSE.py +0 -764
- acoular/tests/unsupported/SpeedComparison/vglOptimierungR_BEAM_OS.py +0 -231
- acoular/tests/unsupported/SpeedComparison/whatsFastestWayFor_absASquared.py +0 -48
- acoular/tests/unsupported/functionalBeamformer.py +0 -123
- acoular/tests/unsupported/precisionTest.py +0 -153
- acoular/tests/unsupported/validationOfBeamformerFuncsPOSTAcoularIntegration.py +0 -254
- acoular/tests/unsupported/validationOfBeamformerFuncsPREeAcoularIntegration.py +0 -531
- acoular/tools.py +0 -418
- acoular-23.11.dist-info/RECORD +0 -146
- acoular-23.11.dist-info/licenses/LICENSE +0 -29
- {acoular-23.11.dist-info → acoular-24.5.dist-info}/licenses/AUTHORS.rst +0 -0
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python2
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Setup- Datei fuer Paralleles Cython (benutzt OpemMP).
|
|
5
|
-
Siehe "http://cython.readthedocs.io/en/latest/src/userguide/parallelism.html"
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from distutils.core import setup
|
|
9
|
-
from distutils.extension import Extension
|
|
10
|
-
from Cython.Build import cythonize
|
|
11
|
-
|
|
12
|
-
#==============================================================================
|
|
13
|
-
## Laut Cython Doku "http://cython.readthedocs.io/en/latest/src/userguide/parallelism.html#Compiling" muss dieser Code noch rein
|
|
14
|
-
ext_modules = [
|
|
15
|
-
Extension(
|
|
16
|
-
"cythonBeamformer",
|
|
17
|
-
["cythonBeamformer.pyx"],
|
|
18
|
-
extra_compile_args=['-fopenmp'],
|
|
19
|
-
extra_link_args=['-fopenmp'],
|
|
20
|
-
)
|
|
21
|
-
]
|
|
22
|
-
setup(
|
|
23
|
-
name='cythonBeamformer',
|
|
24
|
-
ext_modules=cythonize(ext_modules),
|
|
25
|
-
)
|
|
26
|
-
#==============================================================================
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
#==============================================================================
|
|
30
|
-
## Compiler optionen von Acoular
|
|
31
|
-
#ext_modules = [
|
|
32
|
-
# Extension(
|
|
33
|
-
# "cythonBeamformer",
|
|
34
|
-
# ["cythonBeamformer.pyx"],
|
|
35
|
-
# extra_compile_args=['-O3','-ffast-math','-msse3', \
|
|
36
|
-
# '-Wno-write-strings', '-fopenmp'],
|
|
37
|
-
# extra_link_args=['-lgomp'],
|
|
38
|
-
# )
|
|
39
|
-
# ]
|
|
40
|
-
#setup(
|
|
41
|
-
# name='cythonBeamformer',
|
|
42
|
-
# ext_modules=cythonize(ext_modules),
|
|
43
|
-
# )
|
|
44
|
-
#==============================================================================
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
#==============================================================================
|
|
48
|
-
## Compiler optionen nach "http://nealhughes.net/parallelcomp2/"
|
|
49
|
-
# ext_modules=[
|
|
50
|
-
# Extension("cythonBeamformer",
|
|
51
|
-
# ["cythonBeamformer.pyx"],
|
|
52
|
-
# libraries=["m"],
|
|
53
|
-
# extra_compile_args = ["-O3", "-ffast-math", "-march=native", "-fopenmp" ],
|
|
54
|
-
# extra_link_args=['-fopenmp']
|
|
55
|
-
# )
|
|
56
|
-
# ]
|
|
57
|
-
#
|
|
58
|
-
# setup(
|
|
59
|
-
# name = "cythonBeamformer",
|
|
60
|
-
# cmdclass = {"build_ext": build_ext},
|
|
61
|
-
# ext_modules = ext_modules
|
|
62
|
-
# )
|
|
63
|
-
#==============================================================================
|
|
@@ -1,153 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python2
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Created on Wed May 24 11:09:29 2017
|
|
5
|
-
|
|
6
|
-
@author: tomgensch
|
|
7
|
-
"""
|
|
8
|
-
from cPickle import dump, load
|
|
9
|
-
import matplotlib.pylab as plt
|
|
10
|
-
import numpy as np
|
|
11
|
-
import glob
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def savingTimeConsumption(fileName, saveTuple):
|
|
15
|
-
""" Saves all data in 'saveTuple' into 'fileName.sav'.
|
|
16
|
-
The First entry of the tuple has to be a String, explaining the structure of
|
|
17
|
-
the tuple."""
|
|
18
|
-
fi = open(fileName + '.sav', 'w')
|
|
19
|
-
dump(saveTuple, fi, -1)
|
|
20
|
-
fi.close()
|
|
21
|
-
return 0
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def readingInSAVES(fileName):
|
|
25
|
-
""" Reads in the Data saved with 'savingTimeConsumption'. """
|
|
26
|
-
fi = open(fileName, 'r')
|
|
27
|
-
data = load(fi)
|
|
28
|
-
fi.close()
|
|
29
|
-
helpText = data[0]
|
|
30
|
-
returnData = data[1:]
|
|
31
|
-
return helpText, returnData
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def plottingTimeConsumptions(titleString, trialedFuncs, timesToPlot):
|
|
35
|
-
""" titleString...String to be displayed in Title
|
|
36
|
-
trialedFuncs...list of the strings of the trialed functions
|
|
37
|
-
timesToPlot...dim [numberTrials, numberFunctions]
|
|
38
|
-
"""
|
|
39
|
-
# plt.figure()
|
|
40
|
-
for cnt in range(len(trialedFuncs)):
|
|
41
|
-
if 'vectorized' in trialedFuncs[cnt]:
|
|
42
|
-
lineStyle = '--'
|
|
43
|
-
elif 'faverage' in trialedFuncs[cnt]:
|
|
44
|
-
lineStyle = '--'
|
|
45
|
-
else:
|
|
46
|
-
lineStyle = '-'
|
|
47
|
-
plt.semilogy(timesToPlot[cnt], label=trialedFuncs[cnt], linestyle=lineStyle, marker='o')
|
|
48
|
-
plt.xticks(range(len(timesToPlot[1])))
|
|
49
|
-
plt.xlabel('trials [1]')
|
|
50
|
-
plt.ylabel('Time per Trial [s]')
|
|
51
|
-
plt.grid(which='major')
|
|
52
|
-
plt.grid(which='minor', linestyle='--')
|
|
53
|
-
plt.title(titleString)
|
|
54
|
-
yMin, yMax = plt.ylim()
|
|
55
|
-
newYMin = 10 ** np.floor(np.log10(yMin))
|
|
56
|
-
plt.ylim(newYMin, yMax)
|
|
57
|
-
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
|
|
58
|
-
plt.show()
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def plottingOfOvernightTestcasesBeamformer(fileName):
|
|
62
|
-
helpText, daten = readingInSAVES(fileName)
|
|
63
|
-
|
|
64
|
-
stringForLegend = []
|
|
65
|
-
hFig = plt.figure()
|
|
66
|
-
hAxesRelErr = hFig.add_subplot(3,2,5)
|
|
67
|
-
hAxesRelErr.set_ylabel('relative Error - infNorm')
|
|
68
|
-
hAxesRelErr.set_xlabel('trials [1]')
|
|
69
|
-
|
|
70
|
-
hAxesAbsErr = hFig.add_subplot(3,2,6)
|
|
71
|
-
hAxesAbsErr.set_ylabel('absolute Error - infNorm')
|
|
72
|
-
hAxesAbsErr.set_xlabel('trials [1]')
|
|
73
|
-
|
|
74
|
-
numberMethods = len(daten[0])
|
|
75
|
-
numberTrials = len(daten[1][0, :])
|
|
76
|
-
trials = np.arange(numberTrials)
|
|
77
|
-
|
|
78
|
-
# For barplot
|
|
79
|
-
withPerTrial = 0.75
|
|
80
|
-
widthBar = withPerTrial / numberMethods
|
|
81
|
-
offsetOfXAxes = withPerTrial / 2 - widthBar
|
|
82
|
-
|
|
83
|
-
# plotting error
|
|
84
|
-
for cnt in xrange(numberMethods):
|
|
85
|
-
stringForLegend.append(daten[0][cnt] + ' | Time=%1.2f' %(daten[6][cnt])) # For time consumption
|
|
86
|
-
hAxesRelErr.bar(trials + cnt * widthBar - offsetOfXAxes, daten[1][cnt, :], widthBar, label=daten[0][cnt]) # relative error
|
|
87
|
-
hAxesAbsErr.bar(trials + cnt * widthBar - offsetOfXAxes, daten[7][cnt, :], widthBar, label=daten[0][cnt]) # absolute error
|
|
88
|
-
hAxesAbsErr.legend()
|
|
89
|
-
hAxesAbsErr.set_yscale('log')
|
|
90
|
-
hAxesAbsErr.set_xticks(trials)
|
|
91
|
-
|
|
92
|
-
hAxesRelErr.set_yscale('log')
|
|
93
|
-
hAxesRelErr.set_xticks(trials)
|
|
94
|
-
|
|
95
|
-
# plotting time consumption
|
|
96
|
-
titelString = 'Performance Comparison, nMics = %s, nGridPoints = %s, nFreqs = %s.'\
|
|
97
|
-
'\n With time consumption factor in relation to the \noriginal r_beamfull_inverse'\
|
|
98
|
-
' in the legend'\
|
|
99
|
-
'\n If a method works with manually spawn threads: nThreads = %s.'%(daten[3], daten[4], daten[5], daten[8])
|
|
100
|
-
plt.subplot(3,3,(1,5))
|
|
101
|
-
plottingTimeConsumptions(titelString, stringForLegend, daten[2])
|
|
102
|
-
hFig.canvas.set_window_title(fileName)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def plottingTimeConsumptionOverSpecificOrdinate(dirName, ordinate='nMics'):
|
|
106
|
-
listOfFiles = glob.glob(dirName + '/*.sav')
|
|
107
|
-
helpText, daten = readingInSAVES(listOfFiles[0])
|
|
108
|
-
arrayOrdinate = np.zeros(len(listOfFiles))
|
|
109
|
-
arrayTimeConsump = np.zeros((len(listOfFiles), len(daten[6])))
|
|
110
|
-
cnt = 0
|
|
111
|
-
for currentfile in listOfFiles:
|
|
112
|
-
helpText, daten = readingInSAVES(currentfile)
|
|
113
|
-
if ordinate == 'nMics':
|
|
114
|
-
arrayOrdinate[cnt] = daten[3]
|
|
115
|
-
arrayTimeConsump[cnt, :] = daten[6]
|
|
116
|
-
cnt += 1
|
|
117
|
-
indSorted = np.argsort(arrayOrdinate)
|
|
118
|
-
plt.semilogy(arrayOrdinate[indSorted], arrayTimeConsump[indSorted, :], marker='o')#, label=trialedFuncs[cnt], linestyle=lineStyle, marker='o')
|
|
119
|
-
plt.legend(daten[0])
|
|
120
|
-
plt.grid(which='major')
|
|
121
|
-
plt.grid(which='minor', linestyle='--')
|
|
122
|
-
plt.xlabel(ordinate)
|
|
123
|
-
plt.ylabel('Mean of Time per Trial [s] (normalized to faverage)')
|
|
124
|
-
plt.title('Mean of TimeConsumption over ' + ordinate + '\n asd')
|
|
125
|
-
plt.xticks(arrayOrdinate)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
def plottingOfOvernightTestcasesOnFAVERAGE(fileName):
|
|
129
|
-
helpText, daten = readingInSAVES(fileName)
|
|
130
|
-
titleString = 'NUMBA - using "faverage"\n' \
|
|
131
|
-
'nAverages=%s, nFreqbins=%s, nMics=%s, nTest=%s' % (daten[2], daten[3], daten[4], daten[5])
|
|
132
|
-
plottingTimeConsumptions(titleString, daten[0], daten[1])
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
def plotAllAvailableTestCases(dirName):
|
|
136
|
-
listOfFiles = glob.glob(dirName + '/*.sav')
|
|
137
|
-
for currentfile in listOfFiles:
|
|
138
|
-
try:
|
|
139
|
-
plottingOfOvernightTestcasesBeamformer(currentfile)
|
|
140
|
-
except:
|
|
141
|
-
print('Could not plot Testcase:' + currentfile)
|
|
142
|
-
|
|
143
|
-
def saveAllCurrentlyOpenedFigures():
|
|
144
|
-
for cntFig in plt.get_fignums():
|
|
145
|
-
saveNameForPNGHelp = plt.figure(cntFig).canvas.get_window_title()
|
|
146
|
-
saveNameForPNG = saveNameForPNGHelp.replace('.sav', '.png')
|
|
147
|
-
plt.savefig(saveNameForPNG)
|
|
148
|
-
|
|
149
|
-
#plottingTimeConsumptionOverSpecificOrdinate('Sicherung_DurchgelaufeneTests/faverage/InfluenceOfMics/')
|
|
150
|
-
#plottingOfOvernightTestcasesBeamformer('Sicherung_DurchgelaufeneTests/.sav')
|
|
151
|
-
#plotAllAvailableTestCases('Sicherung_DurchgelaufeneTests/damasSolver/')
|
|
152
|
-
#plottingOfOvernightTestcasesBeamformer('Peter.sav')
|
|
153
|
-
#saveAllCurrentlyOpenedFigures()
|
|
Binary file
|
|
Binary file
|
|
@@ -1,204 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python2
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
"""
|
|
5
|
-
import time as tm
|
|
6
|
-
import gc
|
|
7
|
-
|
|
8
|
-
import numpy as np
|
|
9
|
-
from numba import njit, guvectorize, complex128, void, prange
|
|
10
|
-
|
|
11
|
-
import sharedFunctions as shFncs
|
|
12
|
-
from beamformer import faverage # The benchmark (created with scipy.weave)
|
|
13
|
-
from beamformer_withoutMP import faverage_OhneMP
|
|
14
|
-
|
|
15
|
-
#%% Numba - njit
|
|
16
|
-
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
|
|
17
|
-
def loops_Njit(csm, SpecAllChn):
|
|
18
|
-
nFreqs = csm.shape[0]
|
|
19
|
-
nMics = csm.shape[1]
|
|
20
|
-
for cntFreq in range(nFreqs):
|
|
21
|
-
for cntRow in range(nMics):
|
|
22
|
-
temp = np.conj(SpecAllChn[cntFreq, cntRow])
|
|
23
|
-
for cntColumn in range(nMics):
|
|
24
|
-
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
|
|
25
|
-
return csm
|
|
26
|
-
|
|
27
|
-
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]), parallel=True)
|
|
28
|
-
def loops_Njit_Parallel(csm, SpecAllChn):
|
|
29
|
-
nFreqs = csm.shape[0]
|
|
30
|
-
nMics = csm.shape[1]
|
|
31
|
-
for cntFreq in range(nFreqs):
|
|
32
|
-
for cntRow in range(nMics):
|
|
33
|
-
temp = np.conj(SpecAllChn[cntFreq, cntRow])
|
|
34
|
-
for cntColumn in range(nMics):
|
|
35
|
-
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
|
|
36
|
-
return csm
|
|
37
|
-
|
|
38
|
-
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]), parallel=True)
|
|
39
|
-
def loops_Njit_Parallel_Prange(csm, SpecAllChn):
|
|
40
|
-
nFreqs = csm.shape[0]
|
|
41
|
-
nMics = csm.shape[1]
|
|
42
|
-
for cntFreq in range(nFreqs):
|
|
43
|
-
for cntRow in prange(nMics):
|
|
44
|
-
temp = np.conj(SpecAllChn[cntFreq, cntRow])
|
|
45
|
-
for cntColumn in range(nMics):
|
|
46
|
-
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
|
|
47
|
-
return csm
|
|
48
|
-
|
|
49
|
-
#%% create CSM via complex transpose of lower triangular matrix
|
|
50
|
-
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
|
|
51
|
-
def loopsComplexTranspose_Numpy(csm, SpecAllChn):
|
|
52
|
-
nFreqs = csm.shape[0]
|
|
53
|
-
for cntFreq in range(nFreqs):
|
|
54
|
-
csm[cntFreq, :, :] += np.outer(np.conj(SpecAllChn[cntFreq, :]), SpecAllChn[cntFreq, :])
|
|
55
|
-
return csm
|
|
56
|
-
|
|
57
|
-
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
|
|
58
|
-
def loopsOnlyTriangularMatrix_Njit(csm, SpecAllChn):
|
|
59
|
-
""" one could only build the lower triangular csm and then, after averaging
|
|
60
|
-
over all the ensenbles creating the whole csm by complex transposing.
|
|
61
|
-
One could maybe use sparse CSR/CSC matrices (even though the CSM is not too big, so advantages are maybe small.)
|
|
62
|
-
"""
|
|
63
|
-
nFreqs = csm.shape[0]
|
|
64
|
-
nMics = csm.shape[1]
|
|
65
|
-
for cntFreq in range(nFreqs):
|
|
66
|
-
for cntRow in range(nMics):
|
|
67
|
-
temp = np.conj(SpecAllChn[cntFreq, cntRow])
|
|
68
|
-
for cntColumn in range(cntRow): # only half of the operations in respect to 'loops_Njit'
|
|
69
|
-
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
|
|
70
|
-
return csm
|
|
71
|
-
|
|
72
|
-
#%% Numba - guvectorize
|
|
73
|
-
|
|
74
|
-
# =============================================================================
|
|
75
|
-
# I don't think that parallelizing over the mics is in this case feasible.
|
|
76
|
-
# At least i can't think of a way to abstract the faverage procedure on one level below.
|
|
77
|
-
# It is however feasible to parallelize over the frequencies
|
|
78
|
-
# =============================================================================
|
|
79
|
-
|
|
80
|
-
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
|
|
81
|
-
nopython=True, target='cpu')
|
|
82
|
-
def loops_GuvectorizeOverFreqs_singleThreadedCPU(csm, SpecAllChn, result):
|
|
83
|
-
nMics = csm.shape[0]
|
|
84
|
-
for cntRow in range(nMics):
|
|
85
|
-
temp = np.conj(SpecAllChn[cntRow])
|
|
86
|
-
for cntColumn in range(nMics):
|
|
87
|
-
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
|
|
88
|
-
|
|
89
|
-
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
|
|
90
|
-
nopython=True, target='parallel')
|
|
91
|
-
def loops_GuvectorizeOverFreqs_multiThreadedCPU(csm, SpecAllChn, result):
|
|
92
|
-
nMics = csm.shape[0]
|
|
93
|
-
for cntRow in range(nMics):
|
|
94
|
-
temp = np.conj(SpecAllChn[cntRow])
|
|
95
|
-
for cntColumn in range(nMics):
|
|
96
|
-
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
|
|
97
|
-
|
|
98
|
-
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
|
|
99
|
-
nopython=True, target='cpu')
|
|
100
|
-
def loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_singleThreadedCPU(csm, SpecAllChn, result):
|
|
101
|
-
nMics = csm.shape[0]
|
|
102
|
-
for cntRow in range(nMics):
|
|
103
|
-
temp = np.conj(SpecAllChn[cntRow])
|
|
104
|
-
for cntColumn in range(cntRow):
|
|
105
|
-
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
|
|
106
|
-
|
|
107
|
-
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
|
|
108
|
-
nopython=True, target='parallel')
|
|
109
|
-
def loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_multiThreadedCPU(csm, SpecAllChn, result):
|
|
110
|
-
nMics = csm.shape[0]
|
|
111
|
-
for cntRow in range(nMics):
|
|
112
|
-
temp = np.conj(SpecAllChn[cntRow])
|
|
113
|
-
for cntColumn in range(cntRow):
|
|
114
|
-
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
|
|
115
|
-
|
|
116
|
-
#%% MAIN
|
|
117
|
-
listOfMics = [500, 700, 1000] # default: 64
|
|
118
|
-
listOfNFreqs = [2**cnt for cnt in range(4, 11)] # default: 2048
|
|
119
|
-
nTrials = 10
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
#==============================================================================
|
|
123
|
-
# The benchmark function 'faverage' and also other implementations of
|
|
124
|
-
# the beamformer create a lot of overhead, which influences the computational
|
|
125
|
-
# effort of the succeding function. This is mostly the case, if concurrent
|
|
126
|
-
# calculations are done (multiple cores). So often the first trial of a new
|
|
127
|
-
# function takes some time longer than the other trials.
|
|
128
|
-
#==============================================================================
|
|
129
|
-
|
|
130
|
-
#funcsToTrial = [loopsComplexTranspose_Numpy, loops_Njit, loops_Njit_Parallel, loops_Njit_Parallel_Prange,
|
|
131
|
-
# loopsOnlyTriangularMatrix_Njit, loops_GuvectorizeOverFreqs_singleThreadedCPU,
|
|
132
|
-
# loops_GuvectorizeOverFreqs_multiThreadedCPU,
|
|
133
|
-
# loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_singleThreadedCPU,
|
|
134
|
-
# loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_multiThreadedCPU,
|
|
135
|
-
# faverage_OhneMP, faverage]
|
|
136
|
-
funcsToTrial = [loopsOnlyTriangularMatrix_Njit, faverage]
|
|
137
|
-
|
|
138
|
-
for nMics in listOfMics:
|
|
139
|
-
for nFreqs in listOfNFreqs:
|
|
140
|
-
# Init
|
|
141
|
-
print(10*'-' + 'New Test configuration: nMics=%s, nFreqs=%s' %(nMics, nFreqs) + 10*'-')
|
|
142
|
-
print(10*'-' + 'Creation of inputInputs' + 10*'-')
|
|
143
|
-
|
|
144
|
-
csm = np.zeros((nFreqs, nMics, nMics), np.complex128)
|
|
145
|
-
spectrumInput = np.random.rand(nFreqs, nMics) + \
|
|
146
|
-
1j*np.random.rand(nFreqs, nMics)
|
|
147
|
-
|
|
148
|
-
nameOfFuncsToTrial = map(lambda x: x.__name__, funcsToTrial)
|
|
149
|
-
nameOfFuncsForError = [funcName for funcName in nameOfFuncsToTrial if funcName != 'faverage']
|
|
150
|
-
maxRelativeDeviation = np.zeros((len(funcsToTrial), nTrials))
|
|
151
|
-
maxAbsoluteDeviation = np.zeros((len(funcsToTrial), nTrials))
|
|
152
|
-
timeConsumption = [[] for _ in range(len(funcsToTrial))]
|
|
153
|
-
indOfBaselineFnc = nameOfFuncsToTrial.index('faverage')
|
|
154
|
-
|
|
155
|
-
print(10*'-' + 'Onetime calculation of "faverage" for error reference' + 10*'-')
|
|
156
|
-
faverage(csm, spectrumInput)
|
|
157
|
-
resultReference = csm # For relative/absolute error
|
|
158
|
-
gc.collect()
|
|
159
|
-
|
|
160
|
-
# Testing
|
|
161
|
-
print(10*'-' + 'Testing of functions' + 10*'-')
|
|
162
|
-
cntFunc = 0
|
|
163
|
-
for func in funcsToTrial:
|
|
164
|
-
print(func.__name__)
|
|
165
|
-
for cntTrials in xrange(nTrials):
|
|
166
|
-
csm = np.zeros((nFreqs, nMics, nMics), np.complex128)
|
|
167
|
-
resultHelp = np.zeros((nFreqs, nMics, nMics), np.complex128)
|
|
168
|
-
if func.__name__ == 'faverage' or func.__name__ == 'faverage_OhneMP':
|
|
169
|
-
t0 = tm.time()
|
|
170
|
-
func(csm, spectrumInput)
|
|
171
|
-
t1 = tm.time()
|
|
172
|
-
result = csm
|
|
173
|
-
elif func.__name__ == 'loops_GuvectorizeOverFreqs':
|
|
174
|
-
t0 = tm.time()
|
|
175
|
-
func(csm, spectrumInput, resultHelp)
|
|
176
|
-
t1 = tm.time()
|
|
177
|
-
result = resultHelp
|
|
178
|
-
else:
|
|
179
|
-
t0 = tm.time()
|
|
180
|
-
output = func(csm, spectrumInput)
|
|
181
|
-
t1 = tm.time()
|
|
182
|
-
result = output
|
|
183
|
-
timeConsumption[cntFunc].append(t1 - t0)
|
|
184
|
-
relativeDiffBetweenNewCodeAndRef = (result - resultReference) / (result + resultReference) * 2 # error in relation to the resulting value
|
|
185
|
-
maxRelativeDeviation[cntFunc, cntTrials] = np.amax(np.amax(np.amax(abs(relativeDiffBetweenNewCodeAndRef), axis=0), axis=0), axis=0) + 10.0**-20 # relative error in inf-norm
|
|
186
|
-
maxAbsoluteDeviation[cntFunc, cntTrials] = np.amax(np.amax(np.amax(abs(result - resultReference), axis=0), axis=0), axis=0) + 10.0**-20 # absolute error in inf-norm
|
|
187
|
-
cntFunc += 1
|
|
188
|
-
factorTimeConsump = [np.mean(timeConsumption[cnt]) for cnt in range(0, len(funcsToTrial))] \
|
|
189
|
-
/ np.mean(timeConsumption[indOfBaselineFnc])
|
|
190
|
-
|
|
191
|
-
# Save the current test-config as .sav
|
|
192
|
-
helpString = 'The order of the variables is: \n nameOfFuncsToTrial \n maxRelativeDeviation'\
|
|
193
|
-
'\n timeConsumption [nFuncs, nTrials] \n nMics \n nGridPoints \n nFreqs '\
|
|
194
|
-
'\n Factor of time consumption (in relation to the original .cpp) \n maxAbsoluteDeviation \n nThreadsGlobal'
|
|
195
|
-
saveTupel = (helpString, nameOfFuncsToTrial, maxRelativeDeviation, timeConsumption,
|
|
196
|
-
nMics, 0, nFreqs, factorTimeConsump, maxAbsoluteDeviation, 0)
|
|
197
|
-
stringParameters = 'faverage_TestcasesTimeConsumption_nMics%s_nFreqs%s_nTrials%s' %(nMics, nFreqs, nTrials)
|
|
198
|
-
|
|
199
|
-
# stringSaveName = 'Peter'
|
|
200
|
-
stringSaveName = 'Sicherung_DurchgelaufeneTests/faverage/' + stringParameters
|
|
201
|
-
|
|
202
|
-
shFncs.savingTimeConsumption(stringSaveName, saveTupel) # saving as "stringSaveName.sav"
|
|
203
|
-
# shFncs.plottingOfOvernightTestcasesBeamformer(stringSaveName + '.sav') # plot of the current test-config
|
|
204
|
-
|
|
@@ -1,182 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python2
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
"""
|
|
4
|
-
Checking various implementations of the damas solver
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
Versions used in this script:
|
|
8
|
-
numba=0.34.0
|
|
9
|
-
python=2.7.13
|
|
10
|
-
"""
|
|
11
|
-
import time as tm
|
|
12
|
-
import threading
|
|
13
|
-
import gc
|
|
14
|
-
|
|
15
|
-
import numpy as np
|
|
16
|
-
import numba as nb
|
|
17
|
-
|
|
18
|
-
import sharedFunctions as shFncs
|
|
19
|
-
from beamformer import gseidel1 # The benchmark (created with scipy.weave)
|
|
20
|
-
#from beamformer_withoutMP import r_beamfull_inverse_OhneMP # also created with scipy.weave, but WITHOUT using multiple cores via OpenMP
|
|
21
|
-
|
|
22
|
-
@nb.njit(nb.float32[:](nb.float32[:,:], nb.float32[:], nb.float32[:], nb.int64), cache=True)
|
|
23
|
-
def njit_pure(A, dirtyMap, damasSolution, nIterations):
|
|
24
|
-
nGridPoints = len(dirtyMap)
|
|
25
|
-
for cntIter in xrange(nIterations):
|
|
26
|
-
for cntGrid in xrange(nGridPoints):
|
|
27
|
-
solHelp = np.float32(0)
|
|
28
|
-
for cntGridHelp in xrange(cntGrid):
|
|
29
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
30
|
-
for cntGridHelp in xrange(cntGrid + 1, nGridPoints):
|
|
31
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
32
|
-
solHelp = dirtyMap[cntGrid] - solHelp
|
|
33
|
-
if solHelp > 0.0:
|
|
34
|
-
damasSolution[cntGrid] = solHelp
|
|
35
|
-
else:
|
|
36
|
-
damasSolution[cntGrid] = 0.0
|
|
37
|
-
return damasSolution
|
|
38
|
-
|
|
39
|
-
@nb.njit(nb.float32[:](nb.float32[:,:], nb.float32[:], nb.float32[:], nb.int64), parallel=True)
|
|
40
|
-
def njit_parallel(A, dirtyMap, damasSolution, nIterations):
|
|
41
|
-
nGridPoints = len(dirtyMap)
|
|
42
|
-
for cntIter in xrange(nIterations):
|
|
43
|
-
for cntGrid in xrange(nGridPoints):
|
|
44
|
-
solHelp = np.float32(0)
|
|
45
|
-
for cntGridHelp in xrange(cntGrid):
|
|
46
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
47
|
-
for cntGridHelp in xrange(cntGrid + 1, nGridPoints):
|
|
48
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
49
|
-
solHelp = dirtyMap[cntGrid] - solHelp
|
|
50
|
-
if solHelp > 0.0:
|
|
51
|
-
damasSolution[cntGrid] = solHelp
|
|
52
|
-
else:
|
|
53
|
-
damasSolution[cntGrid] = 0.0
|
|
54
|
-
return damasSolution
|
|
55
|
-
|
|
56
|
-
@nb.guvectorize([(nb.float32[:,:], nb.float32[:], nb.int64, nb.float32[:])], '(g,g),(g),()->(g)', cache=True)
|
|
57
|
-
def guvectorizeOverFreqs(A, dirtyMap, nIterations, damasSolution):
|
|
58
|
-
nGridPoints = len(dirtyMap)
|
|
59
|
-
for cntIter in xrange(nIterations):
|
|
60
|
-
for cntGrid in xrange(nGridPoints):
|
|
61
|
-
solHelp = np.float32(0)
|
|
62
|
-
for cntGridHelp in xrange(cntGrid):
|
|
63
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
64
|
-
for cntGridHelp in xrange(cntGrid + 1, nGridPoints):
|
|
65
|
-
solHelp += A[cntGrid, cntGridHelp] * damasSolution[cntGridHelp]
|
|
66
|
-
solHelp = dirtyMap[cntGrid] - solHelp
|
|
67
|
-
if solHelp > 0.0:
|
|
68
|
-
damasSolution[cntGrid] = solHelp
|
|
69
|
-
else:
|
|
70
|
-
damasSolution[cntGrid] = 0.0
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def njit_multiThreading(A, dirtyMap, damasSolution, nIterations):
|
|
74
|
-
nGridPoints = len(dirtyMap)
|
|
75
|
-
for cntIter in xrange(nIterations):
|
|
76
|
-
for cntGrid in xrange(nGridPoints):
|
|
77
|
-
sumHelp = np.zeros((2), np.float32)
|
|
78
|
-
threadLowerSum = threading.Thread(target=njit_coreSum, args=(A[cntGrid, :], damasSolution, 0, cntGrid, sumHelp, 0))
|
|
79
|
-
threadUpperSum = threading.Thread(target=njit_coreSum, args=(A[cntGrid, :], damasSolution, cntGrid+1, nGridPoints, sumHelp, 1))
|
|
80
|
-
threadLowerSum.start()
|
|
81
|
-
threadUpperSum.start()
|
|
82
|
-
threadLowerSum.join()
|
|
83
|
-
threadUpperSum.join()
|
|
84
|
-
solHelp = np.float32(dirtyMap[cntGrid] - (sumHelp[0] + sumHelp[1]))
|
|
85
|
-
if solHelp > 0.0:
|
|
86
|
-
damasSolution[cntGrid] = solHelp
|
|
87
|
-
else:
|
|
88
|
-
damasSolution[cntGrid] = 0.0
|
|
89
|
-
return damasSolution
|
|
90
|
-
@nb.njit(nb.void(nb.float32[:], nb.float32[:], nb.int64, nb.int64, nb.float32[:], nb.int64), cache=True, nogil=True)
|
|
91
|
-
def njit_coreSum(A, damasSolution, start, stop, result, indRes):
|
|
92
|
-
for cntGridHelp in xrange(start, stop):
|
|
93
|
-
result[indRes] += A[cntGridHelp] * damasSolution[cntGridHelp]
|
|
94
|
-
|
|
95
|
-
#%% MAIN
|
|
96
|
-
listOfMics = [0] # not really needed
|
|
97
|
-
listGridPoints = [500, 1000, 2000, 5000, 10000] # Standard value: 12000 # The number of gridpoints doesn't seeme to have to great of an influence
|
|
98
|
-
nTrials = 10
|
|
99
|
-
nIterations = 10
|
|
100
|
-
listOfNFreqs = [20]
|
|
101
|
-
|
|
102
|
-
funcsToTrial = [njit_pure, njit_parallel, guvectorizeOverFreqs, njit_multiThreading, gseidel1]
|
|
103
|
-
for nMics in listOfMics:
|
|
104
|
-
for nGridPoints in listGridPoints:
|
|
105
|
-
for nFreqs in listOfNFreqs:
|
|
106
|
-
# Init
|
|
107
|
-
print(10*'-' + 'New Test configuration: nMics=%s, nGridPoints=%s, nFreqs=%s' %(nMics, nGridPoints, nFreqs) + 10*'-')
|
|
108
|
-
print(10*'-' + 'Creation of inputInputs' + 10*'-')
|
|
109
|
-
|
|
110
|
-
A = np.float32(np.random.rand(1, nGridPoints, nGridPoints)) #A = np.float32(np.ones((1, nGridPoints, nGridPoints)))
|
|
111
|
-
# for cntFreqs in range(nFreqs):
|
|
112
|
-
# A[cntFreqs, :, :] += A[cntFreqs, :, :].T.conj()
|
|
113
|
-
dirtyMap = np.float32(np.random.rand(1, nGridPoints)) #dirtyMap = np.float32(np.ones((1, nGridPoints)))
|
|
114
|
-
damasSolution = np.zeros((nGridPoints), np.float32)
|
|
115
|
-
|
|
116
|
-
# create nFreqs times the same matrix for comparing reasons
|
|
117
|
-
A = np.tile(A, [nFreqs, 1, 1])
|
|
118
|
-
dirtyMap = np.tile(dirtyMap, [nFreqs, 1])
|
|
119
|
-
|
|
120
|
-
nameOfFuncsToTrial = map(lambda x: x.__name__, funcsToTrial)
|
|
121
|
-
nameOfFuncsForError = [funcName for funcName in nameOfFuncsToTrial if funcName != 'gseidel1']
|
|
122
|
-
maxRelativeDeviation = np.zeros((len(funcsToTrial), nTrials))
|
|
123
|
-
maxAbsoluteDeviation = np.zeros((len(funcsToTrial), nTrials))
|
|
124
|
-
timeConsumption = [[] for _ in range(len(funcsToTrial))]
|
|
125
|
-
indOfBaselineFnc = nameOfFuncsToTrial.index('gseidel1')
|
|
126
|
-
|
|
127
|
-
print(10*'-' + 'Onetime calculation of error reference' + 10*'-')
|
|
128
|
-
gseidel1(A[0,:,:], dirtyMap[0,:], damasSolution, nIterations)
|
|
129
|
-
resultReference = damasSolution # For relative/absolute error
|
|
130
|
-
gc.collect()
|
|
131
|
-
|
|
132
|
-
# Testing
|
|
133
|
-
print(10*'-' + 'Testing of functions' + 10*'-')
|
|
134
|
-
cntFunc = 0
|
|
135
|
-
for func in funcsToTrial:
|
|
136
|
-
print(func.__name__)
|
|
137
|
-
for cntTrials in xrange(nTrials):
|
|
138
|
-
damasSolution = np.zeros((1, nGridPoints), np.float32)
|
|
139
|
-
damasSolution = np.tile(damasSolution, [nFreqs, 1])
|
|
140
|
-
if func.__name__ == 'guvectorizeOverFreqs':
|
|
141
|
-
t0 = tm.time()
|
|
142
|
-
func(A, dirtyMap, nIterations, damasSolution)
|
|
143
|
-
t1 = tm.time()
|
|
144
|
-
result = damasSolution[0, :]
|
|
145
|
-
elif func.__name__ == 'gseidel1':
|
|
146
|
-
t0 = tm.time()
|
|
147
|
-
for cntFreqsHelp in xrange(nFreqs):
|
|
148
|
-
func(A[cntFreqsHelp,:,:], dirtyMap[cntFreqsHelp,:], damasSolution[cntFreqsHelp,:], nIterations)
|
|
149
|
-
t1 = tm.time()
|
|
150
|
-
result = damasSolution[0, :]
|
|
151
|
-
else:
|
|
152
|
-
t0 = tm.time()
|
|
153
|
-
for cntFreqsHelp in xrange(nFreqs):
|
|
154
|
-
output = func(A[cntFreqsHelp,:,:], dirtyMap[cntFreqsHelp,:], damasSolution[cntFreqsHelp,:], nIterations)
|
|
155
|
-
t1 = tm.time()
|
|
156
|
-
result = output
|
|
157
|
-
timeConsumption[cntFunc].append(t1 - t0)
|
|
158
|
-
relativeDiffBetweenNewCodeAndRef = (result - resultReference) / (result + resultReference) * 2 # error in relation to the resulting value
|
|
159
|
-
maxRelativeDeviation[cntFunc, cntTrials] = np.amax(abs(relativeDiffBetweenNewCodeAndRef), axis=0) # relative error in inf-norm
|
|
160
|
-
maxAbsoluteDeviation[cntFunc, cntTrials] = np.amax(abs(result - resultReference), axis=0) # absolute error in inf-norm
|
|
161
|
-
cntFunc += 1
|
|
162
|
-
factorTimeConsump = [np.mean(timeConsumption[cnt]) for cnt in range(0, len(funcsToTrial))] \
|
|
163
|
-
/ np.mean(timeConsumption[indOfBaselineFnc])
|
|
164
|
-
|
|
165
|
-
# Save the current test-config as .sav
|
|
166
|
-
helpString = 'The order of the variables is: \n nameOfFuncsToTrial \n maxRelativeDeviation'\
|
|
167
|
-
'\n timeConsumption [nFuncs, nTrials] \n nMics \n nGridPoints \n nFreqs '\
|
|
168
|
-
'\n Factor of time consumption (in relation to the original .cpp) \n maxAbsoluteDeviation \n nThreadsGlobal'
|
|
169
|
-
saveTupel = (helpString, nameOfFuncsToTrial, maxRelativeDeviation, timeConsumption,
|
|
170
|
-
nMics, nGridPoints, nFreqs, factorTimeConsump, maxAbsoluteDeviation, 0)
|
|
171
|
-
stringParameters = 'OvernightTestcasesBeamformer_nMics%s_nGridPoints%s_nFreqs%s_nTrials%s' %(nMics, nGridPoints, nFreqs, nTrials)
|
|
172
|
-
|
|
173
|
-
# stringSaveName = 'Peter'
|
|
174
|
-
stringSaveName = 'Sicherung_DurchgelaufeneTests/damasSolver/' + stringParameters
|
|
175
|
-
# stringSaveName = 'Sicherung_DurchgelaufeneTests/Beamformer/AllImportantMethods/' + stringParameters
|
|
176
|
-
# stringSaveName = 'Sicherung_DurchgelaufeneTests/Beamformer/EinflussGridpoints/AMDFX6100/' + stringParameters
|
|
177
|
-
# stringSaveName = 'Sicherung_DurchgelaufeneTests/Beamformer/JitPrange/' + stringParameters
|
|
178
|
-
# stringSaveName = 'Sicherung_DurchgelaufeneTests/Beamformer/Multithreading_02Threads/' + stringParameters
|
|
179
|
-
|
|
180
|
-
shFncs.savingTimeConsumption(stringSaveName, saveTupel) # saving as "stringSaveName.sav"
|
|
181
|
-
|
|
182
|
-
# shFncs.plottingOfOvernightTestcasesBeamformer(stringSaveName + '.sav') # plot of the current test-config
|