PyESPER 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PyESPER/__init__.py +28 -0
- PyESPER/adjust_pH_DIC.py +73 -0
- PyESPER/coefs_AAinds.py +53 -0
- PyESPER/defaults.py +71 -0
- PyESPER/define_polygons.py +68 -0
- PyESPER/emlr_estimate.py +156 -0
- PyESPER/emlr_nn.py +54 -0
- PyESPER/errors.py +39 -0
- PyESPER/fetch_data.py +54 -0
- PyESPER/fetch_polys_NN.py +16 -0
- PyESPER/final_formatting.py +25 -0
- PyESPER/input_AAinds.py +113 -0
- PyESPER/inputdata_organize.py +56 -0
- PyESPER/interpolate.py +62 -0
- PyESPER/iterations.py +254 -0
- PyESPER/lir.py +191 -0
- PyESPER/lir_uncertainties.py +99 -0
- PyESPER/mixed.py +37 -0
- PyESPER/nn.py +134 -0
- PyESPER/organize_data.py +378 -0
- PyESPER/organize_nn_output.py +266 -0
- PyESPER/pH_DIC_nn_adjustment.py +189 -0
- PyESPER/pH_adjcalc.py +36 -0
- PyESPER/pH_adjustment.py +179 -0
- PyESPER/process_netresults.py +105 -0
- PyESPER/run_nets.py +85 -0
- PyESPER/simplecantestimatelr.py +43 -0
- PyESPER/temperature_define.py +48 -0
- pyesper-1.0.0.dist-info/METADATA +16 -0
- pyesper-1.0.0.dist-info/RECORD +33 -0
- pyesper-1.0.0.dist-info/WHEEL +5 -0
- pyesper-1.0.0.dist-info/entry_points.txt +4 -0
- pyesper-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
def process_uncertainties(param, default_factor, MeasUncerts, PredictorMeasurements, n):
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Helps proces uncertainties as needed by formatting any possible inputs, calculating
|
|
5
|
+
outputs, and processing "nan" values
|
|
6
|
+
|
|
7
|
+
Inputs:
|
|
8
|
+
param: String name of uncertainty to be calculated
|
|
9
|
+
default_factor: Scalar default uncertainty value
|
|
10
|
+
MeasUncerts: Dictionary of measurement uncertainty values
|
|
11
|
+
PredictorMeasurements: Dictionary of input measurements
|
|
12
|
+
n: Scalar number of estimates
|
|
13
|
+
|
|
14
|
+
Ouputs:
|
|
15
|
+
result: Numpy array of user uncertainty-based uncertainties for each param
|
|
16
|
+
dresult: Numpy array of default uncertainties for each param
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import numpy as np
|
|
20
|
+
|
|
21
|
+
# Determining whether the defined seawater property uncertainty has been used as input
|
|
22
|
+
if param in MeasUncerts:
|
|
23
|
+
# Obtain the predefined uncertainty if provided
|
|
24
|
+
result = np.array(MeasUncerts.get(param))
|
|
25
|
+
# Determining if unique uncertainties for each measurement were provided of if
|
|
26
|
+
# only one uncertainty per property was given for all properties, and formatting
|
|
27
|
+
# to the same length if so
|
|
28
|
+
if len(result) < n:
|
|
29
|
+
result = np.tile(result, n)
|
|
30
|
+
# Formatting the naming convention of default uncertainties
|
|
31
|
+
if param.replace('_u', '') in PredictorMeasurements:
|
|
32
|
+
dresult = np.array([i * default_factor for i in PredictorMeasurements[param.replace('_u', '')]])
|
|
33
|
+
else:
|
|
34
|
+
dresult = result
|
|
35
|
+
# Giving defaults in the case where user-provided uncertainties were not provided
|
|
36
|
+
else:
|
|
37
|
+
if param.replace('_u', '') in PredictorMeasurements:
|
|
38
|
+
result = np.array([i * default_factor for i in PredictorMeasurements[param.replace('_u', '')]])
|
|
39
|
+
dresult = result
|
|
40
|
+
else:
|
|
41
|
+
result = np.tile('nan', n)
|
|
42
|
+
dresult = np.tile(0, n)
|
|
43
|
+
return result, dresult
|
|
44
|
+
|
|
45
|
+
def measurement_uncertainty_defaults(n, PredictorMeasurements={}, MeasUncerts={}):
|
|
46
|
+
|
|
47
|
+
"""
|
|
48
|
+
Inputs:
|
|
49
|
+
n: Scalar number of estimates requested
|
|
50
|
+
PredictorMeasurements: Dictionary of predictor measurements used for analyses
|
|
51
|
+
MeasUncerts: User-provided measurement uncertainties or empty dictionary, if not provided
|
|
52
|
+
|
|
53
|
+
Outputs:
|
|
54
|
+
Uncertainties_pre: Dictionary of user-provided measurement uncertainties
|
|
55
|
+
DUncertainties_pre: Dictionary of default measurement uncertainties
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
import numpy as np
|
|
59
|
+
|
|
60
|
+
Uncertainties_pre, DUncertainties_pre = {}, {}
|
|
61
|
+
|
|
62
|
+
# User-input salinity measurement uncertainties
|
|
63
|
+
sal_u = np.array(MeasUncerts.get("sal_u", [0.003]))
|
|
64
|
+
sal_u = np.tile(sal_u, n) if len(sal_u) < n else sal_u
|
|
65
|
+
# Default salinity measurement uncertainties
|
|
66
|
+
sal_defu = np.tile(0.003, n)
|
|
67
|
+
|
|
68
|
+
# User-defined and default temperature measurement uncertainties
|
|
69
|
+
temp_u = np.tile(np.array(MeasUncerts.get("temp_u", [0.003])), n) if "temp_u" in MeasUncerts or "temperature" in PredictorMeasurements else np.tile("nan", n)
|
|
70
|
+
temp_defu = np.tile(0.003 if "temp_u" in MeasUncerts or "temperature" in PredictorMeasurements else 0, n)
|
|
71
|
+
|
|
72
|
+
# Process other parameters
|
|
73
|
+
parameters = {
|
|
74
|
+
"phosphate_u": 0.02,
|
|
75
|
+
"nitrate_u": 0.02,
|
|
76
|
+
"silicate_u": 0.02,
|
|
77
|
+
"oxygen_u": 0.01
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
# User process_uncertainties function to calculate defaults and user-defined uncertainties for each
|
|
81
|
+
# parameter in a dictionary
|
|
82
|
+
for param, factor in parameters.items():
|
|
83
|
+
Uncertainties_pre[param], DUncertainties_pre[param] = process_uncertainties(
|
|
84
|
+
param,
|
|
85
|
+
factor,
|
|
86
|
+
MeasUncerts,
|
|
87
|
+
PredictorMeasurements,
|
|
88
|
+
n
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Update MeasUncerts and DefaultUAll dictionary keys to include salinity, temperature, and all other properties
|
|
92
|
+
meas_uncerts_keys = ["sal_u", "temp_u", *parameters.keys()]
|
|
93
|
+
|
|
94
|
+
# Populating the dictionaries
|
|
95
|
+
Uncertainties_pre.update(dict(zip(meas_uncerts_keys, [sal_u, temp_u, *Uncertainties_pre.values()])))
|
|
96
|
+
DUncertainties_pre.update(dict(zip(meas_uncerts_keys, [sal_defu, temp_defu, *DUncertainties_pre.values()])))
|
|
97
|
+
|
|
98
|
+
return Uncertainties_pre, DUncertainties_pre
|
|
99
|
+
|
PyESPER/mixed.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
def mixed(DesiredVariables, Path, OutputCoordinates={}, PredictorMeasurements={}, **kwargs):
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Python interpretation of ESPER_Mixedv1.1
|
|
5
|
+
|
|
6
|
+
Empirical Seawater Property Estimation Routines: Estimates seawater properties and estimate uncertainty from combinations of other parameter
|
|
7
|
+
measurements. PYESPER_Mixed refers specifically to code that averages estunated from PyESPER_NN and PyESPER_LIR. See either subfunction for
|
|
8
|
+
comments. The input arguments are the same for this function and for both subfunctions.
|
|
9
|
+
|
|
10
|
+
*************************************************************************
|
|
11
|
+
Please send questions or related requests about PyESPER to lmdias@uw.edu.
|
|
12
|
+
*************************************************************************
|
|
13
|
+
"""
|
|
14
|
+
import time
|
|
15
|
+
import numpy as np
|
|
16
|
+
from .lir import lir
|
|
17
|
+
from .nn import nn
|
|
18
|
+
|
|
19
|
+
tic = time.perf_counter()
|
|
20
|
+
|
|
21
|
+
# Fetch estimates and uncertainties from PyESPER_LIR and PyESPER_NN
|
|
22
|
+
EstimatesLIR, _, UncertaintiesLIR = lir(DesiredVariables, Path, OutputCoordinates, PredictorMeasurements, **kwargs)
|
|
23
|
+
EstimatesNN, UncertaintiesNN = nn(DesiredVariables, Path, OutputCoordinates, PredictorMeasurements, **kwargs)
|
|
24
|
+
|
|
25
|
+
Estimates, Uncertainties = {}, {}
|
|
26
|
+
for est_type in EstimatesLIR.keys():
|
|
27
|
+
estimates_lir = np.array(EstimatesLIR[est_type])
|
|
28
|
+
estimates_nn = np.array(EstimatesNN[est_type])
|
|
29
|
+
uncertainties_lir = np.array(UncertaintiesLIR[est_type])
|
|
30
|
+
uncertainties_nn = np.array(UncertaintiesNN[est_type])
|
|
31
|
+
Estimates[est_type] = np.mean([estimates_lir, estimates_nn], axis=0).tolist()
|
|
32
|
+
Uncertainties[est_type] = np.minimum(uncertainties_lir, uncertainties_nn).tolist()
|
|
33
|
+
|
|
34
|
+
toc = time.perf_counter()
|
|
35
|
+
print(f"PyESPER_Mixed took {toc - tic:0.4f} seconds, or {(toc-tic)/60:0.4f} minutes to run")
|
|
36
|
+
|
|
37
|
+
return Estimates, Uncertainties
|
PyESPER/nn.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
def nn(DesiredVariables, Path, OutputCoordinates={}, PredictorMeasurements={}, **kwargs):
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Neural networks for seawater property estimation as part of PyESPERsv1.0.0
|
|
5
|
+
|
|
6
|
+
Inputs:
|
|
7
|
+
DesiredVariables: list of desired variables to estimate
|
|
8
|
+
Path: User-defined computer path
|
|
9
|
+
OutputCoordinates: List of coordinates to produce estimates for
|
|
10
|
+
PredictorMeasurements: List of predictor measurements to use in NNs
|
|
11
|
+
**kwargs: Optional inputs specific to users (please see README for full description)
|
|
12
|
+
|
|
13
|
+
Outputs:
|
|
14
|
+
Estimates: Dictionary of estimates for each equation-desired variable combination
|
|
15
|
+
Uncertainties: Dictionary of uncertainties for each equation-desired variable combination
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import time
|
|
19
|
+
from PyESPER.errors import errors
|
|
20
|
+
from PyESPER.defaults import defaults
|
|
21
|
+
from PyESPER.lir_uncertainties import measurement_uncertainty_defaults
|
|
22
|
+
from PyESPER.inputdata_organize import inputdata_organize
|
|
23
|
+
from PyESPER.temperature_define import temperature_define
|
|
24
|
+
from PyESPER.iterations import iterations
|
|
25
|
+
from PyESPER.fetch_polys_NN import fetch_polys_NN
|
|
26
|
+
from PyESPER.define_polygons import define_polygons
|
|
27
|
+
from PyESPER.run_nets import run_nets
|
|
28
|
+
from PyESPER.process_netresults import process_netresults
|
|
29
|
+
from PyESPER.organize_nn_output import organize_nn_output
|
|
30
|
+
from PyESPER.pH_DIC_nn_adjustment import pH_DIC_nn_adjustment
|
|
31
|
+
from PyESPER.final_formatting import final_formatting
|
|
32
|
+
|
|
33
|
+
# Starting the timer
|
|
34
|
+
tic = time.perf_counter()
|
|
35
|
+
|
|
36
|
+
# Function that provides custom error messages for erroneous inputs
|
|
37
|
+
errors(OutputCoordinates, PredictorMeasurements)
|
|
38
|
+
|
|
39
|
+
# Function which calculates default measurement uncertainties
|
|
40
|
+
Equations, n, VerboseTF, EstDates, C, PerKgSwTF, MeasUncerts = defaults(
|
|
41
|
+
DesiredVariables,
|
|
42
|
+
PredictorMeasurements,
|
|
43
|
+
OutputCoordinates,
|
|
44
|
+
**kwargs
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# Function that processes the input values and default uncertainties and makes sense of it
|
|
48
|
+
Uncertainties_pre, DUncertainties_pre = measurement_uncertainty_defaults(
|
|
49
|
+
n,
|
|
50
|
+
PredictorMeasurements,
|
|
51
|
+
MeasUncerts
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Creating a dictionary of input data
|
|
55
|
+
InputAll = inputdata_organize(
|
|
56
|
+
EstDates,
|
|
57
|
+
C,
|
|
58
|
+
PredictorMeasurements,
|
|
59
|
+
Uncertainties_pre
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Defining temperature if not provided in the correct format
|
|
63
|
+
PredictorMeasurements, InputAll = temperature_define(
|
|
64
|
+
DesiredVariables,
|
|
65
|
+
PredictorMeasurements,
|
|
66
|
+
InputAll,
|
|
67
|
+
**kwargs
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Iterating through possible variable-equation combinations
|
|
71
|
+
# to produce a usable dictionary of predictor values
|
|
72
|
+
code, unc_combo_dict, dunc_combo_dict = iterations(
|
|
73
|
+
DesiredVariables,
|
|
74
|
+
Equations,
|
|
75
|
+
PerKgSwTF,
|
|
76
|
+
C,
|
|
77
|
+
PredictorMeasurements,
|
|
78
|
+
InputAll,
|
|
79
|
+
Uncertainties_pre,
|
|
80
|
+
DUncertainties_pre
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Creating boolean indicators for different ocean regions
|
|
84
|
+
df = define_polygons(C)
|
|
85
|
+
|
|
86
|
+
# Running the actual neural nets
|
|
87
|
+
EstAtl, EstOther = run_nets(
|
|
88
|
+
DesiredVariables,
|
|
89
|
+
Equations,
|
|
90
|
+
code
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Processing and organizing results from nets, including regional
|
|
94
|
+
# smoothing based on boolean indicators
|
|
95
|
+
Estimates = process_netresults(
|
|
96
|
+
Equations,
|
|
97
|
+
code,
|
|
98
|
+
df,
|
|
99
|
+
EstAtl,
|
|
100
|
+
EstOther
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Organize output and iteratively calculate uncertainties
|
|
104
|
+
Uncertainties = organize_nn_output(
|
|
105
|
+
Path,
|
|
106
|
+
DesiredVariables,
|
|
107
|
+
OutputCoordinates,
|
|
108
|
+
PredictorMeasurements,
|
|
109
|
+
**kwargs
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Adjust pH and DIC for anthropogenic carbon
|
|
113
|
+
YouHaveBeenWarnedCanth=False
|
|
114
|
+
Cant_adjusted = pH_DIC_nn_adjustment(
|
|
115
|
+
Path,
|
|
116
|
+
DesiredVariables,
|
|
117
|
+
Estimates,
|
|
118
|
+
YouHaveBeenWarnedCanth,
|
|
119
|
+
OutputCoordinates,
|
|
120
|
+
PredictorMeasurements,
|
|
121
|
+
**kwargs
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Final formatting and presentation of code
|
|
125
|
+
Estimates = final_formatting(
|
|
126
|
+
DesiredVariables,
|
|
127
|
+
Cant_adjusted,
|
|
128
|
+
Estimates
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
toc = time.perf_counter()
|
|
132
|
+
print(f"PyESPER_NN took {toc - tic:0.4f} seconds, or {(toc-tic)/60:0.4f} minutes to run")
|
|
133
|
+
|
|
134
|
+
return Estimates, Uncertainties
|
PyESPER/organize_data.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
def organize_data(
|
|
2
|
+
aaLCs,
|
|
3
|
+
elLCs,
|
|
4
|
+
aaInterpolants_pre,
|
|
5
|
+
elInterpolants_pre,
|
|
6
|
+
Gdf={},
|
|
7
|
+
AAdata={},
|
|
8
|
+
Elsedata={}
|
|
9
|
+
):
|
|
10
|
+
|
|
11
|
+
"""
|
|
12
|
+
Organize interpolation output into more usable formatting and compute estimates
|
|
13
|
+
|
|
14
|
+
Inputs:
|
|
15
|
+
aaLCs: List of coefficients for input data from Atlantic/Arctic regions
|
|
16
|
+
elLCs: List of coefficients for input data not from Atlantic/Arctic
|
|
17
|
+
aaInterpolants_pre: Scipy interpolant for Atlantic/Arctic
|
|
18
|
+
elInterpolants_pre: Scipy interpolant for not Atlantic/Arctic
|
|
19
|
+
Gdf: Dictionary of grid for interpolation, separated into regions
|
|
20
|
+
AAdata: Dictionary of user input data for Atlantic/Arctic
|
|
21
|
+
ElseData: Dictionary of user input data not for Atlantic/Arctic
|
|
22
|
+
|
|
23
|
+
Outputs:
|
|
24
|
+
Estimate: Dictionary of estimates for each equation-desired variable combination
|
|
25
|
+
CoefficientsUsed: Dictionary of dictionaries of coefficients for each equation-
|
|
26
|
+
desired variable combination
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import numpy as np
|
|
30
|
+
|
|
31
|
+
# Determine combinations
|
|
32
|
+
Gkeys = list(Gdf.keys())
|
|
33
|
+
|
|
34
|
+
AAOvalues, ElseOvalues = list(AAdata.values()), list(Elsedata.values())
|
|
35
|
+
# Initialize lists for storing interpolated values
|
|
36
|
+
aaIntCT2, aaIntCA2, aaIntCB2, aaIntCC2, aaTo2, aaAo2, aaBo2, aaCo2 = [[] for _ in range(8)]
|
|
37
|
+
elIntCT2, elIntCA2, elIntCB2, elIntCC2, elTo2, elAo2, elBo2, elCo2 = [[] for _ in range(8)]
|
|
38
|
+
aaInterpolants, elInterpolants = {}, {}
|
|
39
|
+
|
|
40
|
+
# Separate out stored values
|
|
41
|
+
for i in range(0, len(aaLCs)):
|
|
42
|
+
aaIntalpha, elIntalpha = aaLCs[i][:, 0], elLCs[i][:, 0]
|
|
43
|
+
aaIntCS, elIntCS = aaLCs[i][:, 1], elLCs[i][:, 1]
|
|
44
|
+
aaIntCT, elIntCT = aaLCs[i][:, 2], elLCs[i][:, 2]
|
|
45
|
+
aaIntCA, elIntCA = aaLCs[i][:, 3], elLCs[i][:, 3]
|
|
46
|
+
aaIntCB, elIntCB = aaLCs[i][:, 4], elLCs[i][:, 4]
|
|
47
|
+
aaIntCC, elIntCC = aaLCs[i][:, 5], elLCs[i][:, 5]
|
|
48
|
+
|
|
49
|
+
# A function to process data in the correct manner and
|
|
50
|
+
# proper handling of nan's
|
|
51
|
+
def process_list(int_values, val_values):
|
|
52
|
+
int_values = np.asarray(int_values, dtype=float)
|
|
53
|
+
val_values = np.asarray(val_values, dtype=object)
|
|
54
|
+
|
|
55
|
+
# Replace NaNs in int_values with 0
|
|
56
|
+
int2 = np.where(np.isnan(int_values), 0, int_values)
|
|
57
|
+
|
|
58
|
+
# Replace string "nan" and float NaN in val_values with 0
|
|
59
|
+
val_cleaned = []
|
|
60
|
+
for val in val_values:
|
|
61
|
+
if isinstance(val, str) and val.lower() == "nan":
|
|
62
|
+
val_cleaned.append(0)
|
|
63
|
+
elif isinstance(val, float) and np.isnan(val):
|
|
64
|
+
val_cleaned.append(0)
|
|
65
|
+
else:
|
|
66
|
+
val_cleaned.append(val)
|
|
67
|
+
val2 = np.array(val_cleaned, dtype=object)
|
|
68
|
+
|
|
69
|
+
return int2, val2
|
|
70
|
+
|
|
71
|
+
# Correcting equation instances from nan to zero
|
|
72
|
+
# for calculations
|
|
73
|
+
key = Gkeys[i]
|
|
74
|
+
is_key_1 = key[-1] == "1" and key[-2] != "1"
|
|
75
|
+
is_key_2 = key[-1] == "2" and key[-2] != "1"
|
|
76
|
+
is_key_3 = key[-1] == "3" and key[-2] != "1"
|
|
77
|
+
is_key_4 = key[-1] == "4" and key[-2] != "1"
|
|
78
|
+
is_key_5 = key[-1] == "5" and key[-2] != "1"
|
|
79
|
+
is_key_6 = key[-1] == "6" and key[-2] != "1"
|
|
80
|
+
is_key_7 = key[-1] == "7"
|
|
81
|
+
is_key_8 = key[-1] == "8"
|
|
82
|
+
is_key_9 = key[-1] == "9"
|
|
83
|
+
is_key_10 = key[-1] == "0" and Gkeys[i][-2] == "1"
|
|
84
|
+
is_key_11 = key[-1] == "1" and key[-2] == "1"
|
|
85
|
+
is_key_12 = key[-1] == "2" and key[-2] == "1"
|
|
86
|
+
is_key_13 = key[-1] == "3" and key[-2] == "1"
|
|
87
|
+
is_key_14 = key[-1] == "4" and key[-2] == "1"
|
|
88
|
+
is_key_15 = key[-1] == "5" and key[-2] == "1"
|
|
89
|
+
is_key_16 = key[-1] == "6" and key[-2] == "1"
|
|
90
|
+
|
|
91
|
+
# Atlantic and Arctic data equation processing
|
|
92
|
+
aaDatao = AAOvalues[i]
|
|
93
|
+
aaSo, aaTo, aaAo, aaBo, aaCo = aaDatao['S'], aaDatao['T'], aaDatao['A'], aaDatao['B'], aaDatao['C']
|
|
94
|
+
|
|
95
|
+
# Determine which values to use
|
|
96
|
+
if is_key_1:
|
|
97
|
+
aaIntCT2, aaIntCA2, aaIntCB2, aaIntCC2 = aaIntCT, aaIntCA, aaIntCB, aaIntCC
|
|
98
|
+
aaTo2, aaAo2, aaBo2, aaCo2 = aaTo, aaAo, aaBo, aaCo
|
|
99
|
+
|
|
100
|
+
elif is_key_2:
|
|
101
|
+
aaIntCT2, aaIntCA2, aaIntCC2 = aaIntCT, aaIntCA, aaIntCC
|
|
102
|
+
aaTo2, aaAo2, aaCo2 = aaTo, aaAo, aaCo
|
|
103
|
+
|
|
104
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
105
|
+
|
|
106
|
+
elif is_key_3:
|
|
107
|
+
aaIntCT2, aaIntCB2, aaIntCC2 = aaIntCT, aaIntCB, aaIntCC
|
|
108
|
+
aaTo2, aaBo2, aaCo2 = aaTo, aaBo, aaCo
|
|
109
|
+
|
|
110
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
111
|
+
|
|
112
|
+
elif is_key_4:
|
|
113
|
+
aaIntCT2, aaIntCC2 = aaIntCT, aaIntCC
|
|
114
|
+
aaTo2, aaCo2 = aaTo, aaCo
|
|
115
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
116
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
117
|
+
|
|
118
|
+
elif is_key_5:
|
|
119
|
+
aaIntCT2, aaIntCA2, aaIntCB2 = aaIntCT, aaIntCA, aaIntCB
|
|
120
|
+
aaTo2, aaAo2, aaBo2 = aaTo, aaAo, aaBo
|
|
121
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
122
|
+
|
|
123
|
+
elif is_key_6:
|
|
124
|
+
aaIntCT2, aaIntCA2 = aaIntCT, aaIntCA
|
|
125
|
+
aaTo2, aaAo2 = aaTo, aaAo
|
|
126
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
127
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
128
|
+
|
|
129
|
+
elif is_key_7:
|
|
130
|
+
aaIntCT2, aaIntCB2 = aaIntCT, aaIntCB
|
|
131
|
+
aaTo2, aaBo2 = aaTo, aaBo
|
|
132
|
+
|
|
133
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
134
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
135
|
+
|
|
136
|
+
elif is_key_8:
|
|
137
|
+
aaIntCT2 = aaIntCT
|
|
138
|
+
aaTo2 = aaTo
|
|
139
|
+
|
|
140
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
141
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
142
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
143
|
+
|
|
144
|
+
elif is_key_9:
|
|
145
|
+
aaIntCA2, aaIntCB2, aaIntCC2 = aaIntCA, aaIntCB, aaIntCC
|
|
146
|
+
aaAo2, aaBo2, aaCo2 = aaAo, aaBo, aaCo
|
|
147
|
+
|
|
148
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
149
|
+
|
|
150
|
+
elif is_key_10:
|
|
151
|
+
aaIntCA2, aaIntCC2 = aaIntCA, aaIntCC
|
|
152
|
+
aaAo2, aaCo2 = aaAo, aaCo
|
|
153
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
154
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
155
|
+
|
|
156
|
+
elif is_key_11:
|
|
157
|
+
aaIntCB2, aaIntCC2 = aaIntCB, aaIntCC
|
|
158
|
+
aaBo2, aaCo2 = aaBo, aaCo
|
|
159
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
160
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
161
|
+
|
|
162
|
+
elif is_key_12:
|
|
163
|
+
aaIntCC2 = aaIntCC
|
|
164
|
+
aaCo2 = aaCo
|
|
165
|
+
|
|
166
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
167
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
168
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
169
|
+
|
|
170
|
+
elif is_key_13:
|
|
171
|
+
aaIntCA2, aaIntCB2 = aaIntCA, aaIntCB
|
|
172
|
+
aaAo2, aaBo2 = aaAo, aaBo
|
|
173
|
+
|
|
174
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
175
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
176
|
+
|
|
177
|
+
elif is_key_14:
|
|
178
|
+
aaIntCA2 = aaIntCA
|
|
179
|
+
aaAo2 = aaAo
|
|
180
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
181
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
182
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
183
|
+
|
|
184
|
+
elif is_key_15:
|
|
185
|
+
aaIntCB2 = aaIntCB
|
|
186
|
+
aaBo2 = aaBo
|
|
187
|
+
|
|
188
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
189
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
190
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
191
|
+
|
|
192
|
+
elif is_key_16:
|
|
193
|
+
aaIntCT2, aaTo2 = process_list(aaIntCT, aaTo)
|
|
194
|
+
aaIntCA2, aaAo2 = process_list(aaIntCA, aaAo)
|
|
195
|
+
aaIntCB2, aaBo2 = process_list(aaIntCB, aaBo)
|
|
196
|
+
aaIntCC2, aaCo2 = process_list(aaIntCC, aaCo)
|
|
197
|
+
|
|
198
|
+
# Convert data lists to NumPy arrays and fix specific value
|
|
199
|
+
aaAo2 = ['-0.000002' if x == '-2.4319000000000003e-' else x for x in aaAo2]
|
|
200
|
+
data = [aaIntalpha, aaIntCS, aaIntCT2, aaIntCA2, aaIntCB2, aaIntCC2, aaSo, aaTo2, aaAo2, aaBo2, aaCo2]
|
|
201
|
+
aaIal, aaICS, aaICT, aaICA, aaICB, aaICC, aaS, aaT, aaA, aaB, aaC = map(lambda x: np.array(x, dtype=float), data)
|
|
202
|
+
|
|
203
|
+
# Compute `aaEst`, the estimate for Atlantic and Arctic
|
|
204
|
+
aaEst = np.array([a + b*c + d*e + f*g + h*i + j*k
|
|
205
|
+
for a, b, c, d, e, f, g, h, i, j, k
|
|
206
|
+
in zip(aaIal, aaICS, aaS, aaICT, aaT, aaICA, aaA, aaICB, aaB, aaICC, aaC)])
|
|
207
|
+
|
|
208
|
+
# Store results
|
|
209
|
+
aaInterpolants[key] = (aaIal, aaICS, aaICT, aaICA, aaICB, aaICC, aaEst)
|
|
210
|
+
|
|
211
|
+
# Reprocessing "NaN" to 0 as needed for calculations for non-Atlantic and Arctic
|
|
212
|
+
elDatao = ElseOvalues[i]
|
|
213
|
+
elSo, elTo, elAo, elBo, elCo = elDatao['S'], elDatao['T'], elDatao['A'], elDatao['B'], elDatao['C']
|
|
214
|
+
|
|
215
|
+
# Determine which values to use
|
|
216
|
+
if is_key_1:
|
|
217
|
+
elIntCT2, elIntCA2, elIntCB2, elIntCC2 = elIntCT, elIntCA, elIntCB, elIntCC
|
|
218
|
+
elTo2, elAo2, elBo2, elCo2 = elTo, elAo, elBo, elCo
|
|
219
|
+
|
|
220
|
+
elif is_key_2:
|
|
221
|
+
elIntCT2, elIntCA2, elIntCC2 = elIntCT, elIntCA, elIntCC
|
|
222
|
+
elTo2, elAo2, elCo2 = elTo, elAo, elCo
|
|
223
|
+
|
|
224
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
225
|
+
|
|
226
|
+
elif is_key_3:
|
|
227
|
+
elIntCT2, elIntCB2, elIntCC2 = elIntCT, elIntCB, elIntCC
|
|
228
|
+
elTo2, elBo2, elCo2 = elTo, elBo, elCo
|
|
229
|
+
|
|
230
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
231
|
+
|
|
232
|
+
elif is_key_4:
|
|
233
|
+
elIntCT2, elIntCC2 = elIntCT, elIntCC
|
|
234
|
+
elTo2, elCo2 = elTo, elCo
|
|
235
|
+
|
|
236
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
237
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
238
|
+
|
|
239
|
+
elif is_key_5:
|
|
240
|
+
elIntCT2, elIntCA2, elIntCB2 = elIntCT, elIntCA, elIntCB
|
|
241
|
+
elTo2, elAo2, elBo2 = elTo, elAo, elBo
|
|
242
|
+
|
|
243
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
244
|
+
|
|
245
|
+
elif is_key_6:
|
|
246
|
+
elIntCT2, elIntCA2 = elIntCT, elIntCA
|
|
247
|
+
elTo2, elAo2 = elTo, elAo
|
|
248
|
+
|
|
249
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
250
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
251
|
+
|
|
252
|
+
elif is_key_7:
|
|
253
|
+
elIntCT2, elIntCB2 = elIntCT, elIntCB
|
|
254
|
+
elTo2, elBo2 = elTo, elBo
|
|
255
|
+
|
|
256
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
257
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
258
|
+
|
|
259
|
+
elif is_key_8:
|
|
260
|
+
elIntCT2 = elIntCT
|
|
261
|
+
elTo2 = elTo
|
|
262
|
+
|
|
263
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
264
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
265
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
266
|
+
|
|
267
|
+
elif is_key_9:
|
|
268
|
+
elIntCA2, elIntCB2, elIntCC2 = elIntCA, elIntCB, elIntCC
|
|
269
|
+
elAo2, elBo2, elCo2 = elAo, elBo, elCo
|
|
270
|
+
|
|
271
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
272
|
+
|
|
273
|
+
elif is_key_10:
|
|
274
|
+
elIntCA2, elIntCC2 = elIntCA, elIntCC
|
|
275
|
+
elAo2, elCo2 = elAo, elCo
|
|
276
|
+
|
|
277
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
278
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
279
|
+
|
|
280
|
+
elif is_key_11:
|
|
281
|
+
elIntCB2, elIntCC2 = elIntCB, elIntCC
|
|
282
|
+
elBo2, elCo2 = elBo, elCo
|
|
283
|
+
|
|
284
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
285
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
286
|
+
|
|
287
|
+
elif is_key_12:
|
|
288
|
+
elIntCC2 = elIntCC
|
|
289
|
+
elCo2 = elCo
|
|
290
|
+
|
|
291
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
292
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
293
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
294
|
+
|
|
295
|
+
elif is_key_13:
|
|
296
|
+
elIntCA2, elIntCB2 = elIntCA, elIntCB
|
|
297
|
+
elAo2, elBo2 = elAo, elBo
|
|
298
|
+
|
|
299
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
300
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
301
|
+
|
|
302
|
+
elif is_key_14:
|
|
303
|
+
elIntCA2 = elIntCA
|
|
304
|
+
elAo2 = elAo
|
|
305
|
+
|
|
306
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
307
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
308
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
309
|
+
|
|
310
|
+
elif is_key_15:
|
|
311
|
+
elIntCB2 = elIntCB
|
|
312
|
+
elBo2 = elBo
|
|
313
|
+
|
|
314
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
315
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
316
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
317
|
+
|
|
318
|
+
elif is_key_16:
|
|
319
|
+
|
|
320
|
+
elIntCT2, elTo2 = process_list(elIntCT, elTo)
|
|
321
|
+
elIntCA2, elAo2 = process_list(elIntCA, elAo)
|
|
322
|
+
elIntCB2, elBo2 = process_list(elIntCB, elBo)
|
|
323
|
+
elIntCC2, elCo2 = process_list(elIntCC, elCo)
|
|
324
|
+
|
|
325
|
+
# Convert all input lists to NumPy arrays in one go
|
|
326
|
+
data2 = [elIntalpha, elIntCS, elIntCT2, elIntCA2, elIntCB2, elIntCC2, elSo, elTo2, elAo2, elBo2, elCo2]
|
|
327
|
+
elIal, elICS, elICT, elICA, elICB, elICC, elS, elT, elA, elB, elC = map(lambda x: np.array(x, dtype=float), data2)
|
|
328
|
+
|
|
329
|
+
# compute 'elEst', the estimate for not Alantic or Atctic
|
|
330
|
+
elEst = np.array([a + b*c + d*e + f*g + h*i + j*k
|
|
331
|
+
for a, b, c, d, e, f, g, h, i, j, k
|
|
332
|
+
in zip(elIal, elICS, elS, elICT, elT, elICA, elA, elICB, elB, elICC, elC)])
|
|
333
|
+
# Store the results
|
|
334
|
+
elInterpolants[key] = (elIal, elICS, elICT, elICA, elICB, elICC, elEst)
|
|
335
|
+
|
|
336
|
+
Estimate, CoefficientsUsed = {}, {}
|
|
337
|
+
for kcombo in AAdata.keys():
|
|
338
|
+
AAdata[kcombo]["C0"] = aaInterpolants[kcombo][0]
|
|
339
|
+
AAdata[kcombo]["CS"] = aaInterpolants[kcombo][1]
|
|
340
|
+
AAdata[kcombo]["CT"] = aaInterpolants[kcombo][2]
|
|
341
|
+
AAdata[kcombo]["CA"] = aaInterpolants[kcombo][3]
|
|
342
|
+
AAdata[kcombo]["CB"] = aaInterpolants[kcombo][4]
|
|
343
|
+
AAdata[kcombo]["CC"] = aaInterpolants[kcombo][5]
|
|
344
|
+
AAdata[kcombo]["Estimate"] = aaInterpolants[kcombo][6]
|
|
345
|
+
for kcombo in Elsedata.keys():
|
|
346
|
+
Elsedata[kcombo]["C0"] = elInterpolants[kcombo][0]
|
|
347
|
+
Elsedata[kcombo]["CS"] = elInterpolants[kcombo][1]
|
|
348
|
+
Elsedata[kcombo]["CT"] = elInterpolants[kcombo][2]
|
|
349
|
+
Elsedata[kcombo]["CA"] = elInterpolants[kcombo][3]
|
|
350
|
+
Elsedata[kcombo]["CB"] = elInterpolants[kcombo][4]
|
|
351
|
+
Elsedata[kcombo]["CC"] = elInterpolants[kcombo][5]
|
|
352
|
+
Elsedata[kcombo]["Estimate"] = elInterpolants[kcombo][6]
|
|
353
|
+
|
|
354
|
+
# Merge AA and Else data by key
|
|
355
|
+
merged = {}
|
|
356
|
+
for key in AAdata[kcombo].keys():
|
|
357
|
+
merged[key] = np.concatenate([AAdata[kcombo][key], Elsedata[kcombo][key]])
|
|
358
|
+
|
|
359
|
+
# Get sort order based on "Order"
|
|
360
|
+
sort_index = np.argsort(merged["Order"])
|
|
361
|
+
|
|
362
|
+
# Sort each field in the merged dictionary
|
|
363
|
+
TotData = {key: val[sort_index] for key, val in merged.items()}
|
|
364
|
+
|
|
365
|
+
# Store estimate values as dictionary with 1 key
|
|
366
|
+
Estimate[kcombo] = TotData["Estimate"]
|
|
367
|
+
|
|
368
|
+
# Store coefficients as dictionary with named keys
|
|
369
|
+
CoefficientsUsed[kcombo] = {
|
|
370
|
+
"Intercept": TotData["C0"],
|
|
371
|
+
"Coef S": TotData["CS"],
|
|
372
|
+
"Coef T": TotData["CT"],
|
|
373
|
+
"Coef A": TotData["CA"],
|
|
374
|
+
"Coef B": TotData["CB"],
|
|
375
|
+
"Coef C": TotData["CC"]
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
return Estimate, CoefficientsUsed
|