rock-physics-open 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. rock_physics_open/__init__.py +0 -0
  2. rock_physics_open/equinor_utilities/__init__.py +0 -0
  3. rock_physics_open/equinor_utilities/anisotropy.py +211 -0
  4. rock_physics_open/equinor_utilities/classification_functions/__init__.py +17 -0
  5. rock_physics_open/equinor_utilities/classification_functions/class_stats.py +68 -0
  6. rock_physics_open/equinor_utilities/classification_functions/lin_class.py +53 -0
  7. rock_physics_open/equinor_utilities/classification_functions/mahal_class.py +63 -0
  8. rock_physics_open/equinor_utilities/classification_functions/norm_class.py +73 -0
  9. rock_physics_open/equinor_utilities/classification_functions/poly_class.py +45 -0
  10. rock_physics_open/equinor_utilities/classification_functions/post_prob.py +27 -0
  11. rock_physics_open/equinor_utilities/classification_functions/two_step_classification.py +60 -0
  12. rock_physics_open/equinor_utilities/conversions.py +10 -0
  13. rock_physics_open/equinor_utilities/gen_utilities/__init__.py +11 -0
  14. rock_physics_open/equinor_utilities/gen_utilities/dict_to_float.py +38 -0
  15. rock_physics_open/equinor_utilities/gen_utilities/dim_check_vector.py +113 -0
  16. rock_physics_open/equinor_utilities/gen_utilities/filter_input.py +131 -0
  17. rock_physics_open/equinor_utilities/gen_utilities/filter_output.py +88 -0
  18. rock_physics_open/equinor_utilities/machine_learning_utilities/__init__.py +15 -0
  19. rock_physics_open/equinor_utilities/machine_learning_utilities/base_pressure_model.py +170 -0
  20. rock_physics_open/equinor_utilities/machine_learning_utilities/dummy_vars.py +53 -0
  21. rock_physics_open/equinor_utilities/machine_learning_utilities/exponential_model.py +137 -0
  22. rock_physics_open/equinor_utilities/machine_learning_utilities/import_ml_models.py +77 -0
  23. rock_physics_open/equinor_utilities/machine_learning_utilities/polynomial_model.py +132 -0
  24. rock_physics_open/equinor_utilities/machine_learning_utilities/run_regression.py +209 -0
  25. rock_physics_open/equinor_utilities/machine_learning_utilities/sigmoidal_model.py +241 -0
  26. rock_physics_open/equinor_utilities/optimisation_utilities/__init__.py +19 -0
  27. rock_physics_open/equinor_utilities/optimisation_utilities/opt_subst_utilities.py +455 -0
  28. rock_physics_open/equinor_utilities/snapshot_test_utilities/__init__.py +10 -0
  29. rock_physics_open/equinor_utilities/snapshot_test_utilities/compare_snapshots.py +184 -0
  30. rock_physics_open/equinor_utilities/snapshot_test_utilities/snapshots.py +97 -0
  31. rock_physics_open/equinor_utilities/std_functions/__init__.py +43 -0
  32. rock_physics_open/equinor_utilities/std_functions/backus_ave.py +68 -0
  33. rock_physics_open/equinor_utilities/std_functions/dvorkin_nur.py +77 -0
  34. rock_physics_open/equinor_utilities/std_functions/gassmann.py +165 -0
  35. rock_physics_open/equinor_utilities/std_functions/hashin_shtrikman.py +224 -0
  36. rock_physics_open/equinor_utilities/std_functions/hertz_mindlin.py +51 -0
  37. rock_physics_open/equinor_utilities/std_functions/moduli_velocity.py +67 -0
  38. rock_physics_open/equinor_utilities/std_functions/reflection_eq.py +120 -0
  39. rock_physics_open/equinor_utilities/std_functions/rho.py +69 -0
  40. rock_physics_open/equinor_utilities/std_functions/voigt_reuss_hill.py +149 -0
  41. rock_physics_open/equinor_utilities/std_functions/walton.py +45 -0
  42. rock_physics_open/equinor_utilities/std_functions/wood_brie.py +94 -0
  43. rock_physics_open/equinor_utilities/various_utilities/Equinor_logo.gif +0 -0
  44. rock_physics_open/equinor_utilities/various_utilities/Equinor_logo.ico +0 -0
  45. rock_physics_open/equinor_utilities/various_utilities/__init__.py +24 -0
  46. rock_physics_open/equinor_utilities/various_utilities/display_result_statistics.py +90 -0
  47. rock_physics_open/equinor_utilities/various_utilities/gassmann_dry_mod.py +56 -0
  48. rock_physics_open/equinor_utilities/various_utilities/gassmann_mod.py +56 -0
  49. rock_physics_open/equinor_utilities/various_utilities/gassmann_sub_mod.py +64 -0
  50. rock_physics_open/equinor_utilities/various_utilities/hs_average.py +59 -0
  51. rock_physics_open/equinor_utilities/various_utilities/pressure.py +96 -0
  52. rock_physics_open/equinor_utilities/various_utilities/reflectivity.py +101 -0
  53. rock_physics_open/equinor_utilities/various_utilities/timeshift.py +104 -0
  54. rock_physics_open/equinor_utilities/various_utilities/vp_vs_rho_set_statistics.py +170 -0
  55. rock_physics_open/equinor_utilities/various_utilities/vrh_3_min.py +83 -0
  56. rock_physics_open/fluid_models/__init__.py +9 -0
  57. rock_physics_open/fluid_models/brine_model/__init__.py +5 -0
  58. rock_physics_open/fluid_models/brine_model/brine_properties.py +178 -0
  59. rock_physics_open/fluid_models/gas_model/__init__.py +5 -0
  60. rock_physics_open/fluid_models/gas_model/gas_properties.py +319 -0
  61. rock_physics_open/fluid_models/oil_model/__init__.py +5 -0
  62. rock_physics_open/fluid_models/oil_model/dead_oil_density.py +65 -0
  63. rock_physics_open/fluid_models/oil_model/dead_oil_velocity.py +30 -0
  64. rock_physics_open/fluid_models/oil_model/live_oil_density.py +82 -0
  65. rock_physics_open/fluid_models/oil_model/live_oil_velocity.py +24 -0
  66. rock_physics_open/fluid_models/oil_model/oil_bubble_point.py +69 -0
  67. rock_physics_open/fluid_models/oil_model/oil_properties.py +146 -0
  68. rock_physics_open/sandstone_models/__init__.py +59 -0
  69. rock_physics_open/sandstone_models/cemented_shalysand_sandyshale_models.py +304 -0
  70. rock_physics_open/sandstone_models/constant_cement_models.py +204 -0
  71. rock_physics_open/sandstone_models/constant_cement_optimisation.py +125 -0
  72. rock_physics_open/sandstone_models/contact_cement_model.py +138 -0
  73. rock_physics_open/sandstone_models/curvefit_sandstone_models.py +143 -0
  74. rock_physics_open/sandstone_models/friable_models.py +177 -0
  75. rock_physics_open/sandstone_models/friable_optimisation.py +115 -0
  76. rock_physics_open/sandstone_models/friable_shalysand_sandyshale_models.py +235 -0
  77. rock_physics_open/sandstone_models/patchy_cement_fluid_substitution_model.py +477 -0
  78. rock_physics_open/sandstone_models/patchy_cement_model.py +384 -0
  79. rock_physics_open/sandstone_models/patchy_cement_optimisation.py +254 -0
  80. rock_physics_open/sandstone_models/unresolved_cemented_sandshale_models.py +134 -0
  81. rock_physics_open/sandstone_models/unresolved_friable_sandshale_models.py +126 -0
  82. rock_physics_open/shale_models/__init__.py +19 -0
  83. rock_physics_open/shale_models/dem.py +174 -0
  84. rock_physics_open/shale_models/dem_dual_por.py +61 -0
  85. rock_physics_open/shale_models/kus_tok.py +59 -0
  86. rock_physics_open/shale_models/multi_sca.py +133 -0
  87. rock_physics_open/shale_models/pq.py +102 -0
  88. rock_physics_open/shale_models/sca.py +90 -0
  89. rock_physics_open/shale_models/shale4_mineral.py +147 -0
  90. rock_physics_open/shale_models/shale4_mineral_dem_overlay.py +92 -0
  91. rock_physics_open/span_wagner/__init__.py +5 -0
  92. rock_physics_open/span_wagner/co2_properties.py +444 -0
  93. rock_physics_open/span_wagner/coefficients.py +165 -0
  94. rock_physics_open/span_wagner/equations.py +104 -0
  95. rock_physics_open/span_wagner/tables/__init__.py +0 -0
  96. rock_physics_open/span_wagner/tables/carbon_dioxide_density.npz +0 -0
  97. rock_physics_open/span_wagner/tables/lookup_table.py +33 -0
  98. rock_physics_open/t_matrix_models/Equinor_logo.ico +0 -0
  99. rock_physics_open/t_matrix_models/__init__.py +35 -0
  100. rock_physics_open/t_matrix_models/carbonate_pressure_substitution.py +124 -0
  101. rock_physics_open/t_matrix_models/curvefit_t_matrix_exp.py +123 -0
  102. rock_physics_open/t_matrix_models/curvefit_t_matrix_min.py +86 -0
  103. rock_physics_open/t_matrix_models/parse_t_matrix_inputs.py +297 -0
  104. rock_physics_open/t_matrix_models/run_t_matrix.py +243 -0
  105. rock_physics_open/t_matrix_models/t_matrix_C.py +210 -0
  106. rock_physics_open/t_matrix_models/t_matrix_opt_fluid_sub_exp.py +137 -0
  107. rock_physics_open/t_matrix_models/t_matrix_opt_fluid_sub_petec.py +167 -0
  108. rock_physics_open/t_matrix_models/t_matrix_opt_forward_model_exp.py +76 -0
  109. rock_physics_open/t_matrix_models/t_matrix_opt_forward_model_min.py +89 -0
  110. rock_physics_open/t_matrix_models/t_matrix_parameter_optimisation_exp.py +176 -0
  111. rock_physics_open/t_matrix_models/t_matrix_parameter_optimisation_min.py +162 -0
  112. rock_physics_open/t_matrix_models/t_matrix_vector/__init__.py +12 -0
  113. rock_physics_open/t_matrix_models/t_matrix_vector/array_functions.py +75 -0
  114. rock_physics_open/t_matrix_models/t_matrix_vector/calc_c_eff.py +163 -0
  115. rock_physics_open/t_matrix_models/t_matrix_vector/calc_isolated.py +95 -0
  116. rock_physics_open/t_matrix_models/t_matrix_vector/calc_kd.py +40 -0
  117. rock_physics_open/t_matrix_models/t_matrix_vector/calc_kd_eff.py +116 -0
  118. rock_physics_open/t_matrix_models/t_matrix_vector/calc_kd_uuv.py +18 -0
  119. rock_physics_open/t_matrix_models/t_matrix_vector/calc_pressure.py +140 -0
  120. rock_physics_open/t_matrix_models/t_matrix_vector/calc_t.py +71 -0
  121. rock_physics_open/t_matrix_models/t_matrix_vector/calc_td.py +42 -0
  122. rock_physics_open/t_matrix_models/t_matrix_vector/calc_theta.py +43 -0
  123. rock_physics_open/t_matrix_models/t_matrix_vector/calc_x.py +33 -0
  124. rock_physics_open/t_matrix_models/t_matrix_vector/calc_z.py +50 -0
  125. rock_physics_open/t_matrix_models/t_matrix_vector/check_and_tile.py +43 -0
  126. rock_physics_open/t_matrix_models/t_matrix_vector/g_tensor.py +140 -0
  127. rock_physics_open/t_matrix_models/t_matrix_vector/iso_av.py +60 -0
  128. rock_physics_open/t_matrix_models/t_matrix_vector/iso_ave_all.py +55 -0
  129. rock_physics_open/t_matrix_models/t_matrix_vector/pressure_input.py +44 -0
  130. rock_physics_open/t_matrix_models/t_matrix_vector/t_matrix_vec.py +278 -0
  131. rock_physics_open/t_matrix_models/t_matrix_vector/velocity_vti_angles.py +81 -0
  132. rock_physics_open/t_matrix_models/tmatrix_python.dll +0 -0
  133. rock_physics_open/t_matrix_models/tmatrix_python.so +0 -0
  134. rock_physics_open/ternary_plots/__init__.py +3 -0
  135. rock_physics_open/ternary_plots/gen_ternary_plot.py +73 -0
  136. rock_physics_open/ternary_plots/shale_prop_ternary.py +337 -0
  137. rock_physics_open/ternary_plots/ternary_patches.py +277 -0
  138. rock_physics_open/ternary_plots/ternary_plot_utilities.py +197 -0
  139. rock_physics_open/ternary_plots/unconventionals_ternary.py +75 -0
  140. rock_physics_open/version.py +34 -0
  141. rock_physics_open-0.3.2.dist-info/METADATA +90 -0
  142. rock_physics_open-0.3.2.dist-info/RECORD +145 -0
  143. rock_physics_open-0.3.2.dist-info/WHEEL +5 -0
  144. rock_physics_open-0.3.2.dist-info/licenses/LICENSE +165 -0
  145. rock_physics_open-0.3.2.dist-info/top_level.txt +1 -0
File without changes
File without changes
@@ -0,0 +1,211 @@
1
+ from typing import cast
2
+
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+
6
+ from rock_physics_open.equinor_utilities import gen_utilities
7
+
8
+ # These routines are not finalised or used in any plugins yet
9
+ """
10
+ c11, c12, c13, c33, c44, c66 = c_ij_2_c_factors(cij)
11
+
12
+ Transform a single stiffness tensor into components. VTI medium is assumed
13
+ """
14
+
15
+
16
+ def c_ij_2_c_factors(
17
+ cij: npt.NDArray[np.float64],
18
+ ) -> (
19
+ tuple[
20
+ npt.NDArray[np.float64],
21
+ npt.NDArray[np.float64],
22
+ npt.NDArray[np.float64],
23
+ npt.NDArray[np.float64],
24
+ npt.NDArray[np.float64],
25
+ npt.NDArray[np.float64],
26
+ ]
27
+ | None
28
+ ):
29
+ """Transform a single stiffness tensor into components. VTI medium is assumed
30
+
31
+ Parameters
32
+ ----------
33
+ cij : np.ndarray
34
+ A 6x6 matrix.
35
+
36
+ Returns
37
+ -------
38
+ tuple
39
+ (c11, c12, c13, c33, c44, c66).
40
+ """
41
+ if not isinstance(cij, np.ndarray): # pyright: ignore[reportUnnecessaryIsInstance]
42
+ try: # pyright: ignore[reportUnreachable]
43
+ cij = np.array(cij, dtype=float) # pyright: ignore[reportUnreachable]
44
+ except ValueError:
45
+ print("Input data can't be transformed into a NumPy array") # pyright: ignore[reportUnreachable]
46
+ try:
47
+ num_samp = int(cij.size / 36)
48
+ cij = cij.reshape((6, 6, num_samp))
49
+ c11 = cij[0, 0, :].reshape(num_samp, 1)
50
+ c12 = cij[0, 1, :].reshape(num_samp, 1)
51
+ c13 = cij[0, 2, :].reshape(num_samp, 1)
52
+ c33 = cij[2, 2, :].reshape(num_samp, 1)
53
+ c44 = cij[3, 3, :].reshape(num_samp, 1)
54
+ c66 = cij[5, 5, :].reshape(num_samp, 1)
55
+ return c11, c12, c13, c33, c44, c66
56
+
57
+ except ValueError:
58
+ print("Input data is not a 6x6xN array")
59
+
60
+
61
+ def cfactors2cij(
62
+ c11: npt.NDArray[np.float64],
63
+ c12: npt.NDArray[np.float64],
64
+ c13: npt.NDArray[np.float64],
65
+ c33: npt.NDArray[np.float64],
66
+ c44: npt.NDArray[np.float64],
67
+ c66: npt.NDArray[np.float64],
68
+ ) -> npt.NDArray[np.float64]:
69
+ """Transform individual stiffness factors to stiffness tensor 6x6x(number of samples).
70
+
71
+ Parameters
72
+ ----------
73
+ c11, c12, c13, c33, c44, c66 : np.ndarray
74
+ All 1-dimensional of same length.
75
+
76
+ Returns
77
+ -------
78
+ np.ndarray
79
+ A 6x6x(number of samples) stiffness tensor.
80
+ """
81
+ c11, c12, c13, c33, c44, c66 = cast(
82
+ list[npt.NDArray[np.float64]],
83
+ gen_utilities.dim_check_vector((c11, c12, c13, c33, c44, c66)),
84
+ )
85
+
86
+ num_samp = c11.shape[1]
87
+
88
+ cij = np.zeros((6, 6, num_samp))
89
+ cij[0, 0, :] = c11
90
+ cij[0, 1, :] = c12
91
+ cij[0, 2, :] = c13
92
+ cij[1, 0, :] = c12
93
+ cij[1, 1, :] = c11
94
+ cij[1, 2, :] = c13
95
+ cij[2, 0, :] = c13
96
+ cij[2, 1, :] = c13
97
+ cij[2, 1, :] = c33
98
+ cij[3, 3, :] = c44
99
+ cij[4, 4, :] = c44
100
+ cij[5, 5, :] = c66
101
+
102
+ return cij
103
+
104
+
105
+ def c_ij_2_thomsen(
106
+ c: npt.NDArray[np.float64], rho: npt.NDArray[np.float64]
107
+ ) -> (
108
+ tuple[
109
+ npt.NDArray[np.float64],
110
+ npt.NDArray[np.float64],
111
+ npt.NDArray[np.float64],
112
+ npt.NDArray[np.float64],
113
+ npt.NDArray[np.float64],
114
+ ]
115
+ | None
116
+ ):
117
+ """Thomsen parameter for weak anisotropy.
118
+
119
+ Parameters
120
+ ----------
121
+ c : np.ndarray
122
+ A (log of or single instance of) 6x6 elastic tensor.
123
+ rho : np.ndarray
124
+ Density - log of same length as c.
125
+
126
+ Returns
127
+ -------
128
+ tuple
129
+ alpha, beta, gamma, delta, epsilon.
130
+ """
131
+ # C matrix should be 6x6
132
+ if not isinstance(c, np.ndarray): # pyright: ignore[reportUnnecessaryIsInstance]
133
+ try: # pyright: ignore[reportUnreachable]
134
+ c = np.array(c, dtype=float) # pyright: ignore[reportUnreachable]
135
+ except ValueError:
136
+ print("Input data can't be transformed into a NumPy array") # pyright: ignore[reportUnreachable]
137
+ try:
138
+ num_samp = int(c.size / 36)
139
+ c = c.reshape((6, 6, num_samp))
140
+ rho = rho.reshape(num_samp, 1)
141
+ alpha = np.sqrt(c[2, 2, :].reshape(num_samp, 1) / rho)
142
+ beta = np.sqrt(c[3, 3, :].reshape(num_samp, 1) / rho)
143
+ gamma = ((c[5, 5, :] - c[3, 3, :]) / (2 * c[3, 3, :])).reshape(num_samp, 1)
144
+ epsilon = ((c[0, 0, :] - c[2, 2, :]) / (2 * c[2, 2, :])).reshape(num_samp, 1)
145
+ delta = (
146
+ ((c[0, 2, :] + c[3, 3, :]) ** 2 - (c[2, 2, :] - c[3, 3, :]) ** 2)
147
+ / (2 * c[2, 2, :] * (c[2, 2, :] - c[3, 3, :]))
148
+ ).reshape(num_samp, 1)
149
+
150
+ return alpha, beta, epsilon, gamma, delta
151
+ except ValueError:
152
+ print("Input data is not a 6x6xN array")
153
+
154
+
155
+ def thomsen_2_c_ij(
156
+ alpha: npt.NDArray[np.float64],
157
+ beta: npt.NDArray[np.float64],
158
+ gamma: npt.NDArray[np.float64],
159
+ delta: npt.NDArray[np.float64],
160
+ epsilon: npt.NDArray[np.float64],
161
+ rho: npt.NDArray[np.float64],
162
+ ) -> tuple[
163
+ npt.NDArray[np.float64],
164
+ npt.NDArray[np.float64],
165
+ npt.NDArray[np.float64],
166
+ npt.NDArray[np.float64],
167
+ npt.NDArray[np.float64],
168
+ npt.NDArray[np.float64],
169
+ ]:
170
+ """Elastic stiffness. Assumptions:
171
+ Thomsen's parameters apply for weak anisotropy in a transversely isotropic medium:
172
+
173
+ c11 c12 c13 0 0 0
174
+
175
+ c12 c11 c13 0 0 0
176
+
177
+ c13 c13 c33 0 0 0
178
+
179
+ 0 0 0 c44 0 0
180
+
181
+ 0 0 0 0 c44 0
182
+
183
+ 0 0 0 0 0 c66
184
+
185
+ Where c66 = 1/2(c11 - c12)
186
+
187
+ Parameters
188
+ ----------
189
+ alpha, beta, gamma, delta, epsilon :
190
+ Thomsen's parameters.
191
+ rho :
192
+ Bulk density.
193
+
194
+ Returns
195
+ -------
196
+ tuple
197
+ Elastic stiffness c11, c12, c13, c33, c44, c66.
198
+ """
199
+ alpha, beta, gamma, delta, epsilon = cast(
200
+ list[npt.NDArray[np.float64]],
201
+ gen_utilities.dim_check_vector((alpha, beta, gamma, delta, epsilon)),
202
+ )
203
+
204
+ c33 = rho * alpha**2
205
+ c44 = rho * beta**2
206
+ c11 = c33 * (1 + 2 * epsilon)
207
+ c66 = c44 * (1 + 2 * gamma)
208
+ c12 = c11 - 2 * c66
209
+ c13 = np.sqrt(2 * c33 * (c33 - c44) * delta + (c33 - c44) ** 2) - c44
210
+
211
+ return c11, c12, c13, c33, c44, c66
@@ -0,0 +1,17 @@
1
+ from .class_stats import gen_class_stats
2
+ from .lin_class import lin_class
3
+ from .mahal_class import mahal_class
4
+ from .norm_class import norm_class
5
+ from .poly_class import poly_class
6
+ from .post_prob import posterior_probability
7
+ from .two_step_classification import gen_two_step_class_stats
8
+
9
+ __all__ = [
10
+ "gen_class_stats",
11
+ "lin_class",
12
+ "mahal_class",
13
+ "norm_class",
14
+ "poly_class",
15
+ "posterior_probability",
16
+ "gen_two_step_class_stats",
17
+ ]
@@ -0,0 +1,68 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+ NULL_CLASS = 0
5
+
6
+
7
+ def gen_class_stats(
8
+ obs: npt.NDArray[np.float64],
9
+ class_val: npt.NDArray[np.int64],
10
+ ) -> tuple[
11
+ npt.NDArray[np.float64],
12
+ npt.NDArray[np.float64],
13
+ npt.NDArray[np.float64],
14
+ npt.NDArray[np.int64],
15
+ npt.NDArray[np.int64],
16
+ ]:
17
+ """
18
+ Generate statistics - mean, covariance and prior probability - for each
19
+ class in the training data. The observations are an n x m array, where n
20
+ is the number of observations and m is the number of variables. With p
21
+ classes the returned mean value will be an array of dimension p x m,
22
+ covariance m x m x p and the class_id and prior probability p length vector.
23
+ class_mean, class_cov, prior_prob, class_counts, class_id = gen_class_stats(obs, class_val).
24
+
25
+ Parameters
26
+ ----------
27
+ obs : np.ndarray
28
+ An nxm array of data samples (observations).
29
+ class_val : np.ndarray
30
+ n length vector with class ID of the observations. Assumed to
31
+ be integer.
32
+
33
+ Returns
34
+ -------
35
+ tuple
36
+ class_mean, class_cov, prior_prob, class_counts, class_id : (np.ndarray, np.ndarray, np.ndarray, np.ndarray,
37
+ np.ndarray).
38
+ """
39
+
40
+ n, m = obs.shape
41
+ # Find number of classes. If class_val input is not integer, raise an exception
42
+ if not (
43
+ isinstance(class_val, np.ndarray) # pyright: ignore[reportUnnecessaryIsInstance] | Kept for backward compatibility
44
+ and issubclass(class_val.dtype.type, np.integer) # pyright: ignore[reportUnnecessaryIsInstance] | Kept for backward compatibility
45
+ ):
46
+ raise ValueError(f"{__file__}: class values are not discrete numbers")
47
+
48
+ class_id, class_counts = np.unique(class_val, return_counts=True)
49
+ # Remove Null class
50
+ idx_null = np.where(class_id == NULL_CLASS)
51
+ class_id = np.delete(class_id, idx_null)
52
+ class_counts = np.delete(class_counts, idx_null)
53
+ p = class_id.shape[0]
54
+
55
+ # Very simple prior probability - number of observations in each class
56
+ # divided by total number of observations
57
+ prior_prob = class_counts / n
58
+
59
+ # Assign output arrays
60
+ class_mean = np.zeros((p, m))
61
+ class_cov = np.zeros((m, m, p))
62
+
63
+ for i in range(len(class_id)):
64
+ idx = class_val == class_id[i]
65
+ class_mean[i, :] = np.mean(obs[idx, :], axis=0)
66
+ class_cov[:, :, i] = np.cov(obs[idx, :], rowvar=False)
67
+
68
+ return class_mean, class_cov, prior_prob, class_counts, class_id
@@ -0,0 +1,53 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+ NULL_CLASS = 0
5
+
6
+
7
+ def lin_class(
8
+ obs: npt.NDArray[np.float64],
9
+ class_mean: npt.NDArray[np.float64],
10
+ class_id: npt.NDArray[np.float64],
11
+ thresh: float = np.inf,
12
+ ) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]:
13
+ """
14
+ Linear classification routine. All data points are assigned a class, unless a threshold is set.
15
+
16
+ Parameters
17
+ ----------
18
+ obs : np.ndarray
19
+ An nxm array, where n is the number of samples and m is the number of features.
20
+ class_mean : np.ndarray
21
+ A pxm array, where p is the number of classes and m is the number of features.
22
+ class_id : np.ndarray
23
+ A p length vector, where p is the number of classes, containing class_id (integer numbers).
24
+ thresh : float
25
+ Unclassified threshold.
26
+
27
+ Returns
28
+ -------
29
+ tuple
30
+ lin_class_arr, lin_dist : (np.ndarray, np.ndarray).
31
+ lin_class_arr: nx1 vector. The classes are numbered according to class_id,
32
+ and unclassified samples (with distance greater than thresh) are set to 0,
33
+ lin_dist: nx1 vector with linear distance from the closest class centre to each sample.
34
+ """
35
+
36
+ # Find dimensions
37
+ n = obs.shape[0]
38
+ p = class_mean.shape[0]
39
+
40
+ # Assign matrices
41
+ dist = np.zeros((n, p))
42
+
43
+ # Calculate distance for each class
44
+ for i in range(p):
45
+ dist[:, i] = np.sqrt(np.sum((obs - class_mean[i, :]) ** 2, axis=1))
46
+
47
+ # Find the shortest distance, assign class, filter out observations with distance
48
+ # greater than the threshold
49
+ lin_class_arr = np.choose(np.argmin(dist, axis=1), class_id)
50
+ lin_dist = np.amin(dist, axis=1)
51
+ lin_class_arr[lin_dist > thresh] = NULL_CLASS
52
+
53
+ return lin_class_arr, lin_dist
@@ -0,0 +1,63 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+ from .post_prob import posterior_probability
5
+
6
+ NULL_CLASS = 0
7
+
8
+
9
+ def mahal_class(
10
+ obs: npt.NDArray[np.float64],
11
+ class_mean: npt.NDArray[np.float64],
12
+ class_cov: npt.NDArray[np.float64],
13
+ class_id: npt.NDArray[np.int64],
14
+ thresh: float = np.inf,
15
+ ):
16
+ """
17
+ Mahalanobis classification routine. All data points are assigned a class, unless a threshold is set
18
+
19
+ Parameters
20
+ ----------
21
+ obs : np.ndarray
22
+ An nxm array, where n is the number of samples and m is the number of variables.
23
+ class_mean : np.ndarray
24
+ A pxm array, where p is the number of classes and m is the number of variables.
25
+ class_cov : np.ndarray
26
+ A pxm array, where p is the number of classes and m is the number of variables.
27
+ class_id : np.ndarray
28
+ A p length vector, where p is the number of classes, containing class_id (integer numbers).
29
+ thresh : float
30
+ Unclassified threshold.
31
+
32
+ Returns
33
+ -------
34
+ tuple
35
+ mahal_class_arr, mahal_dist, mahal_pp : (np.ndarray, np.ndarray, np.ndarray).
36
+ mahal_class_arr: nx1 vector. The classes are numbered 1 to m, and unclassified samples (with distance
37
+ greater than thresh) are set to 0,
38
+ mahal_dist: nx1 vector with mahalanobis distance from the closest class centre to sample,
39
+ mahal_pp: nx1 vector with posterior probability based on the distance to each class
40
+ """
41
+
42
+ # Find dimensions
43
+ n = obs.shape[0]
44
+ p = class_mean.shape[0]
45
+
46
+ # Assign matrices
47
+ dist = np.zeros((n, p))
48
+
49
+ # Calculate distance for each class
50
+ for i in range(p):
51
+ cov_inv = np.linalg.inv(class_cov[:, :, i])
52
+ delta = obs - class_mean[i, :]
53
+ dist[:, i] = np.sqrt(np.einsum("nj,jk,nk->n", delta, cov_inv, delta))
54
+
55
+ # Find the shortest distance, assign class, calculate posterior probability and
56
+ # filter out observations with distance greater than the threshold
57
+ mahal_class_arr = np.choose(np.argmin(dist, axis=1), class_id)
58
+ mahal_dist = np.amin(dist, axis=1)
59
+ mahal_pp = posterior_probability(mahal_dist, dist)
60
+ d_idx = mahal_dist > thresh
61
+ mahal_class_arr[d_idx] = NULL_CLASS
62
+
63
+ return mahal_class_arr, mahal_dist, mahal_pp
@@ -0,0 +1,73 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+ NULL_CLASS = 0
5
+
6
+
7
+ def norm_class(
8
+ obs: npt.NDArray[np.float64],
9
+ class_mean: npt.NDArray[np.float64],
10
+ class_cov: npt.NDArray[np.float64],
11
+ prior_prob: npt.NDArray[np.float64],
12
+ class_id: npt.NDArray[np.int64],
13
+ thresh: float = np.inf,
14
+ ) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.float64]]:
15
+ """
16
+ Normal distribution classification routine. All data points are assigned a
17
+ class, unless a threshold is set. The "dist" calculated here is the quadratic
18
+ discriminant function according to a Bayes classification. This is a negative
19
+ number, and the closest class has the smallest absolute value.
20
+
21
+ Parameters
22
+ ----------
23
+ obs : np.ndarray
24
+ An nxm array, where n is the number of samples and m is the number of variables.
25
+ class_mean : np.ndarray
26
+ A pxm array, where p is the number of classes and m is the number of variables.
27
+ class_cov : np.ndarray
28
+ A pxm array, where p is the number of classes and m is the number of variables.
29
+ prior_prob : np.ndarray
30
+ A p length vector, where p is the number of classes containing prior probabilities for each class.
31
+ class_id : np.ndarray
32
+ A p length vector, where p is the number of classes, containing class_id (integer numbers).
33
+ thresh : float
34
+ Unclassified threshold.
35
+
36
+
37
+ Returns
38
+ --------
39
+ tuple
40
+ norm_class_id, norm_dist, norm_pp : (np.ndarray, np.ndarray, np.ndarray).
41
+ norm_class_id: nx1 vector. The classes are numbered 1 to m, and unclassified
42
+ samples (with absolute distance greater than thresh) are set to 0,
43
+ norm_dist: nx1 vector with quadratic discriminant distance from the closest class centre to sample,
44
+ norm_pp: nx1 vector with posterior probability based on the distance to each class.
45
+ """
46
+
47
+ # Find dimensions
48
+ n = obs.shape[0]
49
+ p = class_mean.shape[0]
50
+
51
+ # Assign matrices
52
+ dist = np.zeros((n, p))
53
+
54
+ # Calculate distance for each class
55
+ for i in range(p):
56
+ cov_inv = np.linalg.inv(class_cov[:, :, i])
57
+ delta = obs - class_mean[i, :]
58
+ dist[:, i] = (
59
+ -0.5 * np.log(np.linalg.det(class_cov[:, :, i]))
60
+ - 0.5 * np.sqrt(np.einsum("nj,jk,nk->n", delta, cov_inv, delta))
61
+ + np.log(prior_prob[i])
62
+ )
63
+
64
+ # The discrimination function ("dist") are negative numbers. Choose the one
65
+ # with the smallest value as the closest class
66
+ norm_class_id = np.choose(np.argmax(dist, axis=1), class_id)
67
+ norm_dist = np.amax(dist, axis=1)
68
+ norm_pp = -np.exp(norm_dist) / np.sum(np.exp(dist), axis=1)
69
+
70
+ # Compare the absolute value of the discriminator with the threshold
71
+ norm_class_id[np.abs(norm_dist) > thresh] = NULL_CLASS
72
+
73
+ return norm_class_id, norm_dist, norm_pp
@@ -0,0 +1,45 @@
1
+ import matplotlib.path as mplpath
2
+ import numpy as np
3
+ import numpy.typing as npt
4
+
5
+
6
+ def poly_class(
7
+ train_data: npt.NDArray[np.float64],
8
+ polygons: npt.NDArray[np.float64],
9
+ labels: npt.NDArray[np.float64],
10
+ ) -> npt.NDArray[np.float64]:
11
+ """
12
+ Points within the polygons are assigned to class labels. Point that do not
13
+ fall within any polygon are set to NULL_CLASS.
14
+
15
+ Parameters
16
+ ----------
17
+
18
+ train_data : np.ndarray
19
+ Data points of two variables.
20
+ polygons : np.ndarray
21
+ Vertices of polygons in two-dimensional space.
22
+ labels : np.ndarray
23
+ Class label for each polygon.
24
+
25
+ Returns
26
+ -------
27
+ np.ndarray
28
+ Class id.
29
+ """
30
+ if len(labels) != len(polygons):
31
+ raise ValueError("Number of labels are not matching number of polygons")
32
+
33
+ # Create output variables
34
+ idx_filtered = np.zeros(train_data.shape[0]).astype("bool")
35
+ poly_class_id = np.zeros(train_data.shape[0]).astype("int")
36
+
37
+ for i in range(len(polygons)):
38
+ class_polygon = polygons[i]
39
+ path = mplpath.Path(class_polygon)
40
+ # Only points within the given polygon are used
41
+ idx_poly = path.contains_points(train_data)
42
+ poly_class_id[idx_poly] = labels[i]
43
+ idx_filtered = np.logical_or(idx_filtered, idx_poly)
44
+ # idx_filtered is no longer returned
45
+ return poly_class_id
@@ -0,0 +1,27 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+
5
+ def posterior_probability(
6
+ min_dist: npt.NDArray[np.float64], dist: npt.NDArray[np.float64]
7
+ ) -> npt.NDArray[np.float64]:
8
+ """
9
+ Posterior probability, which is defined as the exponential of minimum distance divided by the sum of the
10
+ exponentials of distance to all classes.
11
+
12
+ Parameters
13
+ ----------
14
+ min_dist : np.ndarray
15
+ Minimum class distance according to some metric.
16
+ dist : np.ndarray
17
+ All class distances, each class in a column in a two-dimensional array.
18
+
19
+ Returns
20
+ -------
21
+ np.ndarray
22
+ Posterior probability array.
23
+ """
24
+ dist *= -1.0
25
+ n_exp = np.exp(dist)
26
+ d_sum = n_exp.sum(axis=1)
27
+ return np.exp(-min_dist) / d_sum
@@ -0,0 +1,60 @@
1
+ import numpy as np
2
+ import numpy.typing as npt
3
+
4
+ from .class_stats import gen_class_stats
5
+ from .mahal_class import mahal_class
6
+
7
+ NULL_CLASS = 0
8
+
9
+
10
+ def gen_two_step_class_stats(
11
+ obs: npt.NDArray[np.float64],
12
+ class_val: npt.NDArray[np.int64],
13
+ thresh: float,
14
+ ) -> tuple[
15
+ npt.NDArray[np.float64],
16
+ npt.NDArray[np.float64],
17
+ npt.NDArray[np.float64],
18
+ npt.NDArray[np.int64],
19
+ npt.NDArray[np.int64],
20
+ npt.NDArray[np.float64],
21
+ ]:
22
+ """
23
+ The observations are an n x m array, where n
24
+ is the number of observations and m is the number of variables. With p
25
+ classes the returned mean value will be an array of dimension p x m,
26
+ covariance m x m x p and the class_id and prior probability p length vector.
27
+
28
+ Generate statistics - mean, covariance and prior probability - for each
29
+ class in the training data. Run a mahalanobis classification, and exclude
30
+ values that have distance above the threshold. Generate class statistics again
31
+ and return them.
32
+
33
+ Parameters
34
+ ----------
35
+ obs : np.ndarray
36
+ An nxm array, where n is the number of samples and m is the number of variables.
37
+ class_val : np.ndarray
38
+ A p length vector, where p is the number of classes, containing class_id (integer numbers).
39
+ thresh : float
40
+ Unclassified threshold.
41
+
42
+ Returns
43
+ -------
44
+ tuple
45
+ (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray).
46
+ class_mean, class_cov: statistics for each class after rejects;
47
+ prior_prob, class_counts: based on number of observations in each class,
48
+ find the prior probability of each class;
49
+ class_id: label for each class;
50
+ mahal_class_id: class id from mahalanobis classification.
51
+ """
52
+ mean_class_id, class_cov, _, _, class_id = gen_class_stats(obs, class_val)
53
+ mahal_class_id = mahal_class(obs, mean_class_id, class_cov, class_id, thresh)[0]
54
+
55
+ idx = mahal_class_id != NULL_CLASS
56
+ mean_class_id, class_cov, prior_prob, class_counts, class_id = gen_class_stats(
57
+ obs[idx], class_val[idx]
58
+ )
59
+
60
+ return mean_class_id, class_cov, prior_prob, class_counts, class_id, mahal_class_id
@@ -0,0 +1,10 @@
1
+ """
2
+ Simple conversions required for the material models.
3
+ """
4
+
5
+
6
+ def celsius_to_kelvin(temperature: float) -> float:
7
+ """
8
+ Convert temperature from Celsius to kelvin
9
+ """
10
+ return temperature + 273.15
@@ -0,0 +1,11 @@
1
+ from .dict_to_float import dict_value_to_float
2
+ from .dim_check_vector import dim_check_vector
3
+ from .filter_input import filter_input_log
4
+ from .filter_output import filter_output
5
+
6
+ __all__ = [
7
+ "dict_value_to_float",
8
+ "dim_check_vector",
9
+ "filter_input_log",
10
+ "filter_output",
11
+ ]
@@ -0,0 +1,38 @@
1
+ from typing import Any
2
+
3
+
4
+ def dict_value_to_float(
5
+ input_dict: dict[str, Any],
6
+ ) -> dict[str, float | list[float]]:
7
+ """
8
+ Convert dictionary strings to floating point numbers. Each value can have multiple floats.
9
+
10
+ Parameters
11
+ ----------
12
+ input_dict : dict
13
+ Input dictionary.
14
+
15
+ Returns
16
+ -------
17
+ dict
18
+ Output dictionary.
19
+ """
20
+
21
+ for item in input_dict:
22
+ if isinstance(input_dict[item], float):
23
+ pass
24
+ else:
25
+ try:
26
+ ff = float(input_dict[item])
27
+ input_dict[item] = ff
28
+ except ValueError: # if a list or tuple is hidden in the string
29
+ try:
30
+ ll = eval(input_dict[item])
31
+ ff = [float(i) for i in ll]
32
+ input_dict[item] = ff
33
+ except ValueError:
34
+ raise ValueError(
35
+ "dict_value_to_float: not possible to convert value to float"
36
+ )
37
+
38
+ return input_dict