pybatts-mathematics 1.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2021 Frank Mobley, Gregory Bowers
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.1
2
+ Name: pybatts-mathematics
3
+ Version: 1.0.5
4
+ Summary: Python representations of mathematical functions and methods that are required for acoustic processing.
5
+ Author-Email: "Dr. Frank Mobley" <frank.mobley.1@afrl.af.mil>
6
+ License: MIT
7
+ Requires-Python: >=3.9
8
+ Requires-Dist: numpy>=1.21.5
9
+ Requires-Dist: pymeasurable_objects>1.0.0
10
+ Description-Content-Type: text/markdown
11
+
12
+ Create README.md for Mathematics
@@ -0,0 +1 @@
1
+ Create README.md for Mathematics
@@ -0,0 +1,34 @@
1
+ [project]
2
+ name = "pybatts-mathematics"
3
+ version = "1.0.5"
4
+ description = "Python representations of mathematical functions and methods that are required for acoustic processing."
5
+ authors = [
6
+ { name = "Dr. Frank Mobley", email = "frank.mobley.1@afrl.af.mil" },
7
+ ]
8
+ dependencies = [
9
+ "numpy>=1.21.5",
10
+ "pymeasurable_objects>1.0.0",
11
+ ]
12
+ requires-python = ">=3.9"
13
+ readme = "README.md"
14
+ keywords = []
15
+
16
+ [project.license]
17
+ text = "MIT"
18
+
19
+ [project.urls]
20
+
21
+ [build-system]
22
+ requires = [
23
+ "pdm-backend",
24
+ ]
25
+ build-backend = "pdm.backend"
26
+
27
+ [tool.pdm]
28
+ distribution = true
29
+ package-dir = "src"
30
+
31
+ [tool.pdm.build]
32
+ excludes = [
33
+ "tests",
34
+ ]
@@ -0,0 +1,285 @@
1
+ import numpy as np
2
+ from scipy.special import erf, erfinv
3
+
4
+ """
5
+ These classes and functions provide the ability to fit a variety of functions to the provided data. The ErfFitParameters
6
+ also provides the ability to invert the function and determine the x-value that produces a specific y-value. These
7
+ classes map to the C# code, though the SciPy.optimize.curve_fit function can also produce the coefficients, these
8
+ functions reproduce the coefficients in a manual way for the three functions implemented for the auditory detection.
9
+ """
10
+
11
+
12
+ class ErfFitParameters:
13
+ def __init__(self, a: float = 0, b: float = 0, c: float = 0, d: float = 0, n: int = 0, sigma: float = 0):
14
+ self._amplitude = a
15
+ self._horizontal_offset = b
16
+ self._vertical_offset = d
17
+ self._scale = c
18
+ self._iteration_count = n
19
+ self._uncertainty = sigma
20
+
21
+ @property
22
+ def amplitude(self):
23
+ return self._amplitude
24
+
25
+ @amplitude.setter
26
+ def amplitude(self, value):
27
+ self._amplitude = value
28
+
29
+ @property
30
+ def horizontal_offset(self):
31
+ return self._horizontal_offset
32
+
33
+ @horizontal_offset.setter
34
+ def horizontal_offset(self, value):
35
+ self._horizontal_offset = value
36
+
37
+ @property
38
+ def vertical_offset(self):
39
+ return self._vertical_offset
40
+
41
+ @vertical_offset.setter
42
+ def vertical_offset(self, value):
43
+ self._vertical_offset = value
44
+
45
+ @property
46
+ def scale(self):
47
+ return self._scale
48
+
49
+ @scale.setter
50
+ def scale(self, value):
51
+ self._scale = value
52
+
53
+ @property
54
+ def iteration_count(self):
55
+ return self._iteration_count
56
+
57
+ @iteration_count.setter
58
+ def iteration_count(self, value):
59
+ self._iteration_count = value
60
+
61
+ @property
62
+ def uncertainty(self):
63
+ return self._uncertainty
64
+
65
+ @uncertainty.setter
66
+ def uncertainty(self, value):
67
+ self._uncertainty = value
68
+
69
+ def value(self, x):
70
+ return self.amplitude * erf((x-self.horizontal_offset) / self.scale) + self.vertical_offset
71
+
72
+ def value_inverted(self, y):
73
+ return self.horizontal_offset + self.scale * erfinv((y-self.vertical_offset)/self.amplitude)
74
+
75
+
76
+ def erf_fit(x, y, init_amp=1, init_vertical_offset=0, init_horizontal_offset=0, scale=2.5, max_iterations=100,
77
+ error_tolerance=1e-8):
78
+ """
79
+ Determine a least squares regression curve fit to the input arrays through an error function of the form:
80
+ y = a * erf((x-b)/c)+d
81
+
82
+ x : double, array-like
83
+ the independent variable list
84
+ y : double, array-like
85
+ the dependent variable list
86
+ init_amp : double
87
+ the initial guess for the amplitude of the error function (a)
88
+ init_vertical_offset : double
89
+ the initial guess for the vertical offset of the error function (d)
90
+ init_horizontal_offset : double
91
+ the initial guess for the horizontal offset of the error function (b)
92
+ scale : double
93
+ the initial guess of the slope of scale of the error function (b)
94
+ max_iterations : double or int
95
+ the maximum number of iterations for the looping before the algorithm terminates
96
+ error_tolerance : double
97
+ the required minimum error prior to the termination of the algorithm
98
+
99
+ returns : tuple
100
+ the coefficients (a, b, c, d), iteration count, and error
101
+ """
102
+
103
+ if isinstance(x, list):
104
+ x = np.array(x)
105
+ if isinstance(y, list):
106
+ y = np.array(y)
107
+
108
+ # Set the initial conditions for the least squares regression
109
+ current_error = 1000
110
+ n = 0
111
+ a = init_amp
112
+ b = init_horizontal_offset
113
+ c = scale
114
+ d = init_vertical_offset
115
+
116
+ # A collection of adjustments to be made to the coefficients. This is the same size as the number of unknown
117
+ # coefficients.
118
+ gradient = np.zeros(shape=(len(x), 4))
119
+
120
+ # The difference in actual function using the current coefficients and the curve fit
121
+ residuals = [0 for num in x]
122
+
123
+ # The current evaluation of the error function at the given independent values using the previous coefficients
124
+ current_approximation = [0 for num in x]
125
+
126
+ # Loop until we either meet the maximum number of iterations or the error between the current and previous
127
+ # approximations is less than the error_tolerance
128
+ while n < max_iterations and current_error > error_tolerance:
129
+
130
+ # Fill the gradient and current approximation values
131
+ gradient[:, 0] = erf((x - b) / c)
132
+ gradient[:, 1] = -(2 * a * np.exp(-(x - b)**2.0 / c / c) / np.sqrt(np.pi) / c)
133
+ gradient[:, 2] = -(2 * a * (x - b) * np.exp(-(x - b)**2.0 / c / c)) / np.sqrt(np.pi) / c / c
134
+ gradient[:, 3] = 1.0
135
+ current_approximation = a * erf((x - b) / c) + d
136
+ residuals = y - current_approximation
137
+
138
+ # These may need to be dot operators, not multiplication. Is it point wise?
139
+ grad_mul = gradient.transpose().dot(gradient)
140
+ inverse = np.linalg.inv(grad_mul)
141
+ pseudo_inverse = inverse.dot(gradient.transpose())
142
+ delta_coefficients = pseudo_inverse.dot(residuals)
143
+ current_error = abs(max(delta_coefficients))
144
+
145
+ # Adjust the coefficients
146
+ a += delta_coefficients[0]
147
+ b += delta_coefficients[1]
148
+ c += delta_coefficients[2]
149
+ d += delta_coefficients[3]
150
+
151
+ # Update the iteration count
152
+ n += 1
153
+
154
+ return ErfFitParameters(a, b, c, d, n - 1, current_error)
155
+
156
+
157
+ def gaussian_fit(x, y, initial_variance=0.1, initial_center=1, initial_amp=100, error_tolerance=1e-6,
158
+ max_iterations=100):
159
+ """
160
+ Using the Newton approximation start with the initial guesses for the output parameters and use least-squares
161
+ regression to fit the data provided in the input arrays with a normal distribution (Guassian function): i.e.
162
+ y = a * np.exp(-((x-b)**2)/c)
163
+
164
+ x : double, array-like
165
+ The collection of independent variables values
166
+ y : double, array-like
167
+ The collection of dependent variable values
168
+ initial_variance : double, default value=0.1
169
+ The initial width of the normal distribution
170
+ initial_center : double, default value=1
171
+ The initial location of the center of the normal distribution
172
+ initial_amp : double, default value=100
173
+ The initial amplitude of the normal distribution
174
+ error_tolerance : double, default value=0.75
175
+ The minimum different between the variables that results in the termination of the fitting
176
+ max_iterations : double, default value=100
177
+ The maximum number of iterations prior to the termination of the fitting
178
+
179
+ returns a, b, c, iteration count, error
180
+ """
181
+ import sklearn.metrics as metrics
182
+
183
+ # Ensure that we are using arrays rather than lists
184
+ if isinstance(x, list):
185
+ x = np.array(x)
186
+ if isinstance(y, list):
187
+ y = np.array(y)
188
+
189
+ # Initialize the variables
190
+ current_error = 1000
191
+ iteration_count = 0
192
+ a = initial_amp
193
+ b = initial_center
194
+ c = initial_variance
195
+
196
+ # Setup the arrays and matrices that will hold the data
197
+ gradient = np.zeros(shape=(len(x), 3))
198
+
199
+ # Loop until either the error or iteration termination condition is reached
200
+ while current_error > error_tolerance and iteration_count < max_iterations:
201
+
202
+ # Loop through the elements of the input array and build up the objects
203
+ gradient[:, 0] = np.exp(-(x - b)**2.0 / c)
204
+ gradient[:, 1] = (2 * a * (x - b) / c) * np.exp(-(b - x)**2 / c)
205
+ gradient[:, 2] = ((a * (b - x)**2.0) / (c**2.0)) * np.exp(-(b - x)**2.0 / c)
206
+ current_y = a * np.exp(-(x - b)**2.0 / c)
207
+ residuals = y - current_y
208
+
209
+ # Determine the adjustments to the coefficients
210
+ pseudo_inverse = np.linalg.pinv(gradient)
211
+ delta_coefficients = pseudo_inverse.dot(residuals)
212
+ current_error = np.mean(abs(residuals))
213
+ a += delta_coefficients[0]
214
+ b += delta_coefficients[1]
215
+ c += delta_coefficients[2]
216
+
217
+ # Increment the iteration count
218
+ iteration_count += 1
219
+
220
+ return a, b, c, (iteration_count - 1), current_error
221
+
222
+
223
+ def gaussian_fit_with_floor(x, y, initial_variance=0.1, initial_center=1, initial_amp=100, initial_floor=10,
224
+ error_tolerance=1e-6, max_iterations=100):
225
+ """
226
+ Using the Newton approximation start with the initial guesses for the output parameters and use least-squares
227
+ regression to fit the data provided in the input arrays with a normal distribution (Guassian function): i.e.
228
+ y = a * np.exp(-((x-b)**2)/c) + d
229
+
230
+ x : double, array-like
231
+ The collection of independent variables values
232
+ y : double, array-like
233
+ The collection of dependent variable values
234
+ initial_variance : double, default value=0.1
235
+ The initial width of the normal distribution
236
+ initial_center : double, default value=1
237
+ The initial location of the center of the normal distribution
238
+ initial_amp : double, default value=100
239
+ The initial amplitude of the normal distribution
240
+ initial_floor : double, default value=10
241
+ The initial floor of the normal distribution
242
+ error_tolerance : double, default value=0.75
243
+ The minimum different between the variables that results in the termination of the fitting
244
+ max_iterations : double, default value=100
245
+ The maximum number of iterations prior to the termination of the fitting
246
+
247
+ returns a, b, c, d, iteration count, error
248
+ """
249
+
250
+ if isinstance(x, list):
251
+ x = np.array(x)
252
+ if isinstance(y, list):
253
+ y = np.array(y)
254
+
255
+ # Setup the present variables
256
+ current_error = 1000
257
+ iteration_count = 0
258
+ a = initial_amp
259
+ b = initial_center
260
+ c = initial_variance
261
+ d = initial_floor
262
+
263
+ # Create the objects that will hold the information regarding the values during the regression
264
+ gradient = np.zeros(shape=(len(x), 4))
265
+
266
+ # Loop until the terminating conditions are reached
267
+ while current_error > error_tolerance and iteration_count < max_iterations:
268
+ gradient[:, 0] = np.exp(-(x - b)**2.0 / c)
269
+ gradient[:, 1] = (2 * a * (x - b) / c) * np.exp(-(b - x)**2.0 / c)
270
+ gradient[:, 2] = ((a * (b - x)**2.0) / (c**2.0)) * np.exp(-(b - x)**2.0 / c)
271
+ gradient[:, 3] = 1.0
272
+
273
+ current_y = a * np.exp(-(x - b)**2.0 / c) + d
274
+ residuals = y - current_y
275
+
276
+ # Invert the matrices for the determination of the adjustments for the coefficients.
277
+ pseudo_inverse = np.linalg.inv(gradient.transpose().dot(gradient)).dot(gradient.transpose())
278
+ delta_coefficients = pseudo_inverse.dot(residuals)
279
+ current_error = np.mean(abs(residuals))
280
+ a += delta_coefficients[0]
281
+ b += delta_coefficients[1]
282
+ c += delta_coefficients[2]
283
+ d += delta_coefficients[3]
284
+
285
+ return a, b, c, d, (iteration_count - 1), current_error
@@ -0,0 +1,88 @@
1
+ import numpy as np
2
+ from numpy import zeros, linalg, asarray, array
3
+
4
+ """
5
+ The polyfit/polyval function implement a solution that relies on the singular value decomposition on the coefficient
6
+ matrix, which have caused issues on certain matricies that are close to a singular value (determinant close to zero).
7
+ These functions implement the least-squares regression to construct the coefficient matrix and determine the
8
+ coefficients in a more direct and manual way.
9
+ """
10
+
11
+
12
+ def regression(x, y, order):
13
+ """
14
+ Determine the polynomial regression for the set of data in x and y
15
+
16
+ x : double, array-like
17
+ the independent variables
18
+ y : double, array-like
19
+ the dependent variables
20
+ order : int
21
+ the maximum order of the polynomial
22
+
23
+ returns : double, array-like
24
+ The list of coefficients ordered from the smallest to largest magnitude of the polynomial coefficients, i.e.
25
+ the c[0] coefficients corresponds with x**0 term.
26
+ """
27
+
28
+ z = zeros(shape=(order + 1, order + 1))
29
+
30
+ # Build the matrix that forms the L.H.S. of the matrix inversion equation
31
+
32
+ for row in range(order + 1):
33
+ for col in range(order + 1):
34
+ for index in range(len(x)):
35
+ val = x[index] ** (row + col)
36
+ z[row, col] += val
37
+
38
+ # Form the vector that is the R.H.S. of the matrix inversion equation
39
+
40
+ a = zeros((order + 1,))
41
+
42
+ for row in range(order + 1):
43
+ for index in range(len(y)):
44
+ val = y[index] * x[index] ** row
45
+ a[row] += val
46
+
47
+ # Compute the coefficients as the inversion of Z and multiplication with a
48
+
49
+ c = linalg.inv(z).dot(a)
50
+
51
+ return c
52
+
53
+
54
+ def polynomial_value(c, x):
55
+ """
56
+ Determine the value of hte polynomial curve fit based on the coefficients that were returned with the regression
57
+ function
58
+
59
+ c : double, array-like
60
+ The collection of coefficients that were determined with regression
61
+ x : double, possible array-like
62
+
63
+ returns : double, possible array-like
64
+ the values of the polynomial at the provided x values
65
+ """
66
+ c = asarray(c)
67
+ c = c.flatten()
68
+
69
+ if isinstance(x, float):
70
+ y = 0
71
+
72
+ for i in range(len(c)):
73
+ y += c[i] * x ** i
74
+
75
+ return y
76
+
77
+ if isinstance(x, list):
78
+ x = array(x)
79
+
80
+ if isinstance(x, np.ndarray) or isinstance(x, list):
81
+ y = zeros(x.shape)
82
+
83
+ for j in range(len(x)):
84
+
85
+ for i in range(len(c)):
86
+ y[j] += c[i] * x[j] ** i
87
+
88
+ return y
@@ -0,0 +1,326 @@
1
+ from datetime import datetime
2
+ from pymeasurable_objects.measurables import Measurable
3
+ import numpy as np
4
+ import pandas as pd
5
+ from scipy import ndimage as nd
6
+
7
+ """
8
+ This is a simple class to determine the interpolated values for a set of input values
9
+
10
+ @author: Dr. Frank Mobley, Gregory Bowers
11
+ """
12
+
13
+
14
+ def linear(
15
+ x1: float | int | datetime | Measurable = None, x2: float | int | datetime | Measurable = None,
16
+ y1: float | int | datetime | Measurable = None, y2: float | int | datetime | Measurable = None,
17
+ xt: float | int | datetime | Measurable = None
18
+ ) -> float | int | datetime | Measurable:
19
+ """
20
+ This function will compute the slope and intercept for a line between the first and second elements of the
21
+ arguments, then return the value that is linearly at the desired point.
22
+
23
+ x1 : double, int, datetime
24
+ the independent variable for the first point of the data to be interpolated
25
+ x2 : double, int, datetime
26
+ the independent variable for the second point of the data to be interpolated
27
+ y1 : double, int, measurable
28
+ the dependent variable for the first point of the data to be interpolated
29
+ y2 : double, int, measurable
30
+ the dependent variable for the second point of the data to be interpolated
31
+ xt : double, int, datetime
32
+ the independent variable value to the evaluated
33
+
34
+ returns : double, int, measurable
35
+ Returns the linearly interpolated value at the xt argument. If xt is a number, this function returns the
36
+ same type. If xt is a datetime, the expectation is that y1, and y2 are measurable and the interpolated
37
+ measurable is returned.
38
+ """
39
+ if all([isinstance(x, datetime) for x in [x1, x2, xt]]):
40
+ x1 = (datetime.combine(datetime.min, x1.time()) - datetime.min).total_seconds()
41
+ x2 = (datetime.combine(datetime.min, x2.time()) - datetime.min).total_seconds()
42
+ xt = (datetime.combine(datetime.min, xt.time()) - datetime.min).total_seconds()
43
+
44
+ if isinstance(y1, Measurable) and isinstance(y2, Measurable):
45
+ y1 = y1.underlying_value()
46
+ y2 = y2.underlying_value()
47
+
48
+ m = (y2 - y1) / (x2 - x1)
49
+ b = y1 - m * x1
50
+
51
+ return m * xt + b
52
+
53
+ """
54
+ def bilinear(self, corner_sequence: Sequence = list(), coor_interest: Sequence = list()) -> float | int:
55
+ /// <summary>
56
+ /// Determine the value using a bilinear interpolation. This is accomplished with the
57
+ /// following geometric considerations:
58
+ /// (x2,y2,z2) o----------------o(x3,y3,z3)
59
+ /// | |
60
+ /// | |
61
+ /// | o(x,y) |
62
+ /// | |
63
+ /// | |
64
+ /// (x1,y1,z1) o----------------o(x4,y4,z4)
65
+ ///
66
+ /// To determine the value at (x,y) we first interpolate between (x1,y1) and (x2,y2) and
67
+ /// determine the value on that edge at y. This is repeated for the (x3,y3) and (x4,y4)
68
+ /// edge. Then the value for each edge is interpolated to determine the value for the
69
+ /// horizontal interpolation.
70
+ /// </summary>
71
+ /// <returns></returns>
72
+ if not all([isinstance(x, Sequence) for x in [corner_sequence, coor_interest]]):
73
+ raise TypeError("Incorrect argument type. bilinear takes python sequences as arguments.")
74
+ if not all([isinstance(x, {float, int})] for x in corner_sequence) and all(
75
+ isinstance(x, (float, int)) for x in coor_interest):
76
+ raise TypeError("Incorrect argument type. Passed sequences should only contain float or int data types.")
77
+ if not all([len(x) == 3] for x in corner_sequence):
78
+ raise ValueError("Incorrect argument lengths for required corner 3-dimensional coordinates. "
79
+ "Bilinear interpolation requires 3-dimensional coordinates.")
80
+ coor_1, coor_2, coor_3, coor_4 = corner_sequence
81
+ x, y = coor_interest
82
+ y1_to_4 = self.linear(coor_1[0], coor_4[0], coor_1[2], coor_4[2], x)
83
+ y2_to_3 = self.linear(coor_2[0], coor_3[0], coor_2[2], coor_3[2], x)
84
+ return self.linear(coor_2[1], coor_1[1], y2_to_3, y1_to_4, y)
85
+ """
86
+
87
+ """
88
+ Conducting acoustic measurements over large areas often requires significant numbers of conditions, or measurement
89
+ sites. In effort to understand the acoustic area where airmen operate during routine maintenance, a measurement was
90
+ conducted on a single engine fighter aircraft. Due to the complexity of the measurement array, fewer sites were used
91
+ than required to completely characterize this region. In order to complete the analysis, a series of Python scripts
92
+ were developed to use nearest-neighbor interpolation of the sparse matrix, which was subsequently smoothed using a
93
+ bi-linear interpolation.
94
+
95
+ This collection of functions grew out of this research. If you intend to use this work, please reference:
96
+ Mobley, Frank S., Alan T. Wall, and Stephen C. Campbell. "Translating jet noise measurements to near-field level
97
+ maps with nearest neighbor bilinear smoothing interpolation." The Journal of the Acoustical Society of America 150.2
98
+ (2021): 687-693.
99
+ """
100
+
101
+
102
+ def bi_linear_smoothing(starting_mesh, measured_data, measured_x_index, measured_y_index,
103
+ tolerance: float = 1e-4, maximum_iterations: int = 1000,
104
+ verbose_info: bool = True):
105
+ """
106
+ This computes the bilinear average of the mesh until the RMSE reaches the
107
+ tolerance passed as an argument.
108
+
109
+ Parameters
110
+ ----------
111
+ :param verbose_info: bool
112
+ Flag detailing whether information is printed to the Python console during execution of the algorithm
113
+ :param starting_mesh : double array-like
114
+ the starting dense matrix determined from the nearest neighbor
115
+ :param tolerance : double
116
+ The minimum error level to finish the averaging
117
+ :param measured_data : DataFrame
118
+ A data frame with the X, Y, La values for each of the measured points
119
+ :param measured_x_index : array-like double
120
+ The X indices for where to insert the measured levels
121
+ :param measured_y_index : array-like double
122
+ The X indices for where to insert the measured levels
123
+ :param maximum_iterations: int
124
+ The maximum number of iterations that we want to iterate through before returning an error.
125
+
126
+ Returns
127
+ -------
128
+ The dense matrix smoothed
129
+
130
+
131
+ """
132
+
133
+ zz = starting_mesh.copy()
134
+
135
+ # Perform the iterative smoothing
136
+ rmse = 100
137
+ i = 0
138
+
139
+ zz_last = zz.copy()
140
+
141
+ while rmse > tolerance and i < maximum_iterations:
142
+ # Replace the data with the measured information
143
+ zz[measured_y_index, measured_x_index] = measured_data
144
+
145
+ if verbose_info:
146
+ print('Starting iteration {}'.format(i + 1))
147
+
148
+ # Loop through the array and create an approximate of the interpolation
149
+ if verbose_info:
150
+ print('iterating over the surface')
151
+
152
+ npts = 3
153
+ for xidx in range(zz.shape[0]):
154
+ for yidx in range(zz.shape[1]):
155
+
156
+ # Add the central point
157
+ nn_mean = 0
158
+
159
+ # Set the count for the points in the average
160
+ n = 0
161
+
162
+ # Determine the span in each direction
163
+ span = int((npts - 1) / 2)
164
+
165
+ # Try to determine the value for the lower index of the y-axis
166
+ ylo = yidx - span
167
+ if ylo < 0:
168
+ ylo = 0
169
+
170
+ yhi = yidx + span
171
+ if yhi >= zz.shape[1]:
172
+ yhi = zz.shape[1] - 1
173
+
174
+ # Now the x-axis
175
+ xlo = xidx - span
176
+ if xlo < 0:
177
+ xlo = 0
178
+ xhi = xidx + span
179
+ if xhi >= zz.shape[0]:
180
+ xhi = zz.shape[0] - 1
181
+
182
+ for p in range(ylo, yhi + 1):
183
+ for q in range(xlo, xhi + 1):
184
+ nn_mean += zz[q, p]
185
+ n += 1
186
+
187
+ zz[xidx, yidx] = nn_mean / n
188
+
189
+ # Compute the error between this and the previous surface
190
+ rmse = np.std(np.std(zz - zz_last, axis=1))
191
+
192
+ if verbose_info:
193
+ print('RMSE:{:.5f}\n***************************'.format(rmse))
194
+
195
+ # Copy the current surface to the previous surface
196
+ zz_last = zz.copy()
197
+ i += 1
198
+ return zz, i, rmse
199
+
200
+
201
+ def nearest_neighbor_dense_sampling(true_data: pd.DataFrame, out_x: np.ndarray, out_y: np.ndarray,
202
+ smoothing_error_tolerance: float = 1e-5, iteration_max: int = 1000,
203
+ verbose_info: bool = True):
204
+ """
205
+ From research in the acoustic measurement of near-field noise around fifth generation fighter aircraft (see
206
+ Mobley, Frank S., Alan T. Wall, and Stephen C. Campbell. Translating jet noise measurements to near-field level maps
207
+ with nearest neighbor bilinear smoothing interpolation. The Journal of the Acoustical Society of America 150.2
208
+ (2021): 687-693) this code was created. Initially it was focused on the methods and data storage for the research
209
+ within the paper. However, this version has been made more generic to assist in using this interpolation method
210
+ across a wider range of data.
211
+
212
+ To use this you must supply a DataFrame that contains the sparse matrix in column representation. You must have a
213
+ column called 'x', 'y', and 'z'. These will be used to determine the 'measured', 'known', or 'true' points on the
214
+ sparse surface. This object is the 'true_data' argument of the function.
215
+
216
+ Parameters
217
+ ----------
218
+ :param verbose_info: bool
219
+ A flag to determine whether debug information is shown in the Python console
220
+ :param true_data : Pandas.DataFrame
221
+ This is the sparse data that we want to interpolate. It must contain a column for the 'x', 'y', and 'z' data
222
+ elements. These will be used in the replacement step.
223
+ :param out_x : Numpy.ndarray
224
+ This is the desired output values for the x-axis of the dense matrix surface
225
+ :param out_y : Numpy.ndarray
226
+ This is the desired output values for the y-axis of the dense matrix surface
227
+ :param smoothing_error_tolerance : double
228
+ The error tolerance that terminate the smoothing
229
+ :param iteration_max : int
230
+ The maximum number of iterations that are required for the construction of the dense mesh
231
+
232
+ Returns
233
+ -------
234
+ tuple containing the dense matrix x, y, smoothed z, and rough z values
235
+
236
+ """
237
+
238
+ # Run checks on the input surface
239
+ if not _check_true_data_arguments(true_data):
240
+ raise ValueError("The input true_data is not properly formed")
241
+
242
+ # Build the mesh of output Cartesian Locations
243
+ xx, yy = np.meshgrid(out_x, out_y)
244
+ zz = np.ones(xx.shape) * -999
245
+
246
+ # Find the index within the desired mesh where these data fall
247
+ m_x_idx, m_y_idx = _find_true_data_indices(out_x, out_y, true_data)
248
+
249
+ # Update the values of the surface with the information from the true data
250
+ zz[m_y_idx, m_x_idx] = true_data['z']
251
+
252
+ # Perform the nearest neighbor approximation
253
+ invalid = np.isin(zz, -999)
254
+ ind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)
255
+
256
+ # Assign the value based on the selected indices
257
+ zz = zz[tuple(ind)]
258
+
259
+ # Smooth the coarse surface
260
+ smoothed_zz, iterations, error = bi_linear_smoothing(starting_mesh=zz,
261
+ measured_x_index=m_x_idx,
262
+ measured_y_index=m_y_idx,
263
+ measured_data=true_data['z'],
264
+ tolerance=smoothing_error_tolerance,
265
+ maximum_iterations=iteration_max,
266
+ verbose_info=verbose_info)
267
+
268
+ return xx, yy, smoothed_zz, zz
269
+
270
+
271
+ def _check_true_data_arguments(x):
272
+ """
273
+ This function checks the input argument of the interpolation to ensure that it possesses all the correct
274
+ information prior to running the analysis.
275
+
276
+ :param x: Pandas.DataFrame
277
+ The collection of input data that we want to ensure is the correct format prior to creating the interpolation
278
+ grid.
279
+ :return:
280
+ True if the DataFrame is correctly formed, otherwise it raises ValueError exceptions.
281
+ """
282
+
283
+ if not isinstance(x, pd.DataFrame):
284
+ raise ValueError("The true data input must be a Pandas.DataFrame")
285
+
286
+ if 'x' not in x.columns.values or 'y' not in x.columns.values or 'z' not in x.columns.values:
287
+ raise ValueError("The DataFrame must contain a column for each of the standard Cartesian coordinates")
288
+
289
+ return True
290
+
291
+
292
+ def _find_true_data_indices(x, y, true_data):
293
+ """
294
+ Part of this algorithm determines where in the dense grid the true/measured data exists. Then it will replace the
295
+ current value in the matrix with the true data. In this manner was can constrain some of the simplification of
296
+ the system to ensure that we are close to the points that we know.
297
+
298
+ This function will determine where in the dense matrix the true data will exist, insert the true data into the
299
+ list, and return the indices and the new surface.
300
+ :param x: Numpy.ndarray
301
+ This is the single dimensioned array of the values of the function along the x-direction
302
+ :param y: Numpy.ndarray
303
+ This is the single dimensioned array of the values of the function along the y-direction
304
+ :param true_data: Pandas.DataFrame
305
+ The collection of true data. This must contain columns for the 'x', 'y', and 'z' elements of the sparse data
306
+ :return: tuple
307
+ The first element of the tuple is the x-direction indices of the placement for the true data, and the second
308
+ is the y-direction indices.
309
+ """
310
+
311
+ measured_x_index = np.zeros(shape=(true_data.shape[0],), dtype=int)
312
+ measured_y_index = np.zeros(shape=(true_data.shape[0],), dtype=int)
313
+
314
+ x_idx = np.where(true_data.columns.values == 'x')[0][0]
315
+ y_idx = np.where(true_data.columns.values == 'y')[0][0]
316
+
317
+ # Find the indices for the measured data
318
+ for i in range(true_data.shape[0]):
319
+ try:
320
+ measured_x_index[i] = np.where(x - true_data.iloc[i, x_idx] >= 0)[0][0]
321
+ measured_y_index[i] = np.where(y - true_data.iloc[i, y_idx] >= 0)[0][0]
322
+
323
+ except IndexError:
324
+ print('Error at the {}th element'.format(i))
325
+
326
+ return measured_x_index, measured_y_index
@@ -0,0 +1,76 @@
1
+ from numpy import array
2
+ from pymeasurable_objects.measurables import InvalidUnitOfMeasureException, Angle
3
+
4
+ """
5
+ A collection of functions that provide the matrices for specific canonical rotations about Cartesian coordinate
6
+ direction vectors.
7
+ """
8
+
9
+
10
+ def geo_to_body():
11
+ """
12
+ This is the matrix that converts the coordinate system from a geographic reference to the center of mass of the
13
+ vehicle. This amounts to a rz(numpy.pi/2).dot(rz(numpy.pi)).
14
+ """
15
+ return array([
16
+ [0, 1, 0],
17
+ [1, 0, 0],
18
+ [0, 0, -1]
19
+ ])
20
+
21
+
22
+ def rx(x):
23
+ """
24
+ Canonical rotation about the Cartesian X-axis
25
+
26
+ x : Angle
27
+ The rotation that must be executed
28
+
29
+ returns : double, array-like
30
+ the 3x3 matrix that represents the rotation around the x-axis
31
+ """
32
+ if isinstance(x, Angle):
33
+ return array([[1, 0, 0],
34
+ [0, x.cos(), x.sin()],
35
+ [0, -x.sin(), x.cos()]])
36
+ else:
37
+ raise InvalidUnitOfMeasureException
38
+
39
+
40
+ def ry(y):
41
+ """
42
+ Canonical rotation about the Cartesian y-axis
43
+
44
+ y : Angle
45
+ The rotation that must be executed
46
+
47
+ returns : double, array-like
48
+ the 3x3 matrix that represents the rotation around the y-axis
49
+ """
50
+ if isinstance(y, Angle):
51
+ return array([[y.cos(), 0, -y.sin()],
52
+ [0, 1, 0],
53
+ [y.sin(), 0, y.cos()]])
54
+
55
+ else:
56
+ raise InvalidUnitOfMeasureException
57
+
58
+
59
+ def rz(z):
60
+ """
61
+ Canonical rotation about the Cartesian z-axis
62
+
63
+ z : Angle
64
+ The rotation that must be executed
65
+
66
+ returns : double, array-like
67
+ the 3x3 matrix that represents the rotation around the z-axis
68
+ """
69
+
70
+ if isinstance(z, Angle):
71
+ return array([[z.cos(), z.sin(), 0],
72
+ [-z.sin(), z.cos(), 0],
73
+ [0, 0, 1]])
74
+
75
+ else:
76
+ raise InvalidUnitOfMeasureException
@@ -0,0 +1,108 @@
1
+ def erfc(a):
2
+ """
3
+ The complex error function that is required for the determination of the excess ground attenuation.
4
+
5
+ a : complex
6
+ the independent variable to determine the complex error function value.
7
+
8
+ returns : complex
9
+ the value of the complex error function at a
10
+
11
+ Remarks:
12
+ 2022-11-29 - FSM - Running the F-35 A data suggests that there may be instances where the calculation of P2 and
13
+ Q2 may result in Infinite/NaN values
14
+ """
15
+ from numpy import sin, cos, pi, exp, isinf
16
+
17
+ if isinstance(a, complex):
18
+ x = a.real
19
+ y = a.imag
20
+ rho2 = a ** 2
21
+ if x > 6.0 or y > 6.0:
22
+ return complex(0, 1) * a * (0.5124242 / (rho2 - 0.2752551) + 0.05176536 / (rho2 - 2.724745))
23
+ else:
24
+ if x > 3.9 or y > 3.0:
25
+ w = 0.4613135 / (rho2 - 0.1901635)
26
+ w += (0.09999216 / (rho2 - 1.7844927))
27
+ w += (0.002883894 / (rho2 - 5.5253437))
28
+ w *= (complex(0, 1) * a)
29
+ return w
30
+ else:
31
+ h = 0.8
32
+ A = cos(2 * x * y)
33
+ B = sin(2 * x * y)
34
+ C = exp(-2 * y * pi / h) - cos(2 * x * pi / h)
35
+ D = sin(2 * x * pi / h)
36
+ P2 = 2.0 * exp(-(x * x + (2.0 * y * pi / h) - y * y)) * ((A * C - B * D) /
37
+ (C * C + D * D))
38
+ Q2 = 2.0 * exp(-(x * x + (2.0 * y * pi / h) - y * y)) * ((A * D + B * C) /
39
+ (C * C + D * D))
40
+
41
+ if isinf(P2) or isinf(Q2):
42
+ raise ValueError(
43
+ "There is an issue determining the correct values for the curve fit for the "
44
+ "complex error function"
45
+ )
46
+
47
+ H = 0
48
+ K = 0
49
+ for n in range(1, 5):
50
+ H += (2.0 * y * h / pi) * (exp(-n * n * h * h) * (y * y + x * x + n * n * h * h)) / (
51
+ ((y * y - x * x + n * n * h * h) ** 2.0) + 4 * y * y * x * x)
52
+ K += (2.0 * x * h / pi) * (exp(-n * n * h * h) * (y * y + x * x - n * n * h * h)) / (
53
+ ((y * y - x * x + n * n * h * h) ** 2.0) + 4 * y * y * x * x)
54
+ H += h * y / (pi * (y * y + x * x))
55
+ K += h * x / (pi * (y * y + x * x))
56
+
57
+ if y == pi / h:
58
+ H = H + 0.5 * P2
59
+ K = K - 0.5 * Q2
60
+ elif y < (pi / h):
61
+ H = H + P2
62
+ K = K - Q2
63
+ return complex(H, K)
64
+
65
+ def yml(l, m, polar, azimuthal):
66
+ """
67
+ Calculate the normalized spherical harmonics for the provided order (l) and power (m) at the provided angle set.
68
+ This function was replaced with the scipy function for the calculation of the spherical harmonics.
69
+
70
+ l : int
71
+ the order of the series
72
+ m : int
73
+ the power of the series
74
+ polar : double (units: radians)
75
+ the polar angle of the spherical harmonics
76
+ azimuthal : double (units: radians)
77
+ the azimuthal angle of the spherical harmonics
78
+
79
+ returns : complex
80
+ the values of the spherical harmonics at these angles, order and power.
81
+
82
+ 20220329 - FSM - Refactored the calculation of the angles that are used within the determination of the
83
+ spherical harmonic value
84
+ """
85
+
86
+ from scipy.special import sph_harm
87
+ from measurable_objects.measurables import Angle
88
+
89
+ if isinstance(azimuthal, Angle):
90
+ az_angle_radians = azimuthal.radians
91
+ elif isinstance(azimuthal, tuple):
92
+ if isinstance(azimuthal[0], Angle):
93
+ az_angle_radians = azimuthal[0].radians
94
+ else:
95
+ az_angle_radians = azimuthal[0]
96
+ else:
97
+ az_angle_radians = azimuthal
98
+
99
+ if isinstance(polar, Angle):
100
+ pol_angle_radians = polar.radians
101
+ elif isinstance(polar, tuple):
102
+ if isinstance(polar[0], Angle):
103
+ pol_angle_radians = polar[0].radians
104
+ else:
105
+ pol_angle_radians = polar[0]
106
+ else:
107
+ pol_angle_radians = polar
108
+ return sph_harm(m, l, az_angle_radians, pol_angle_radians)