xslope 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xslope/__init__.py ADDED
@@ -0,0 +1 @@
1
+ from ._version import __version__
xslope/_version.py ADDED
@@ -0,0 +1,4 @@
1
+ __all__ = ["__version__"]
2
+ __version__ = "0.1.2"
3
+
4
+
xslope/advanced.py ADDED
@@ -0,0 +1,460 @@
1
+ # Copyright 2025 Norman L. Jones
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import pandas as pd
17
+ from scipy.stats import norm
18
+ from tabulate import tabulate
19
+
20
+
21
+ def rapid_drawdown(df, method_name, debug_level=1):
22
+ """
23
+ Performs rapid drawdown analysis using a three-stage approach.
24
+
25
+ Parameters:
26
+ df : pandas.DataFrame
27
+ Slice data with all required columns including rapid drawdown specific data:
28
+ - c, phi: current strength parameters
29
+ - c1, phi1: original strength parameters (for stage 3)
30
+ - d, psi: rapid drawdown parameters for low-K materials
31
+ - u: pore pressure (stage 1)
32
+ - u2: pore pressure for lowered pool (stage 2)
33
+ - dload, d_x, d_y: distributed loads (stage 1)
34
+ - dload2, d_x2, d_y2: distributed loads for lowered pool (stage 2)
35
+ method_name : str
36
+ The method name to use ('oms', 'bishop', 'spencer', etc.)
37
+ debug_level : int
38
+ 0: no output, 1: print FS at each stage, >1: detailed debug info
39
+
40
+ Returns:
41
+ Tuple(bool, dict): (True, result_dict) or (False, error_message)
42
+ """
43
+
44
+ # Import solve module and get the method function
45
+ from . import solve
46
+ method_func = getattr(solve, method_name)
47
+
48
+ if debug_level >= 1:
49
+ print("=== RAPID DRAWDOWN ANALYSIS ===")
50
+
51
+ # Stage 1: Pre-drawdown conditions
52
+ if debug_level >= 1:
53
+ print("Stage 1: Pre-drawdown conditions...")
54
+
55
+ # Use original conditions (c, phi, u, dload, d_x, d_y)
56
+ success, result_stage1 = method_func(df)
57
+ if not success:
58
+ return False, f"Stage 1 failed: {result_stage1}"
59
+
60
+ stage1_FS = result_stage1['FS']
61
+ if debug_level >= 1:
62
+ print(f"Stage 1 FS = {stage1_FS:.4f}")
63
+
64
+ # Calculate consolidation stresses for each slice
65
+ # N_eff should be available from the method function
66
+ if 'n_eff' not in df.columns:
67
+ return False, "Stage 1 did not compute n_eff values"
68
+
69
+ # Calculate sigma_fc and tau_fc for each slice
70
+ sigma_fc = df['n_eff'] / df['dl'] # Equation (2)
71
+ tau_fc = (1.0 / stage1_FS) * (df['c'] + sigma_fc * np.tan(np.radians(df['phi']))) # Equation (3)
72
+
73
+ if debug_level >= 2:
74
+ print("Stage 1 consolidation stresses:")
75
+ for i in range(len(df)):
76
+ print(f" Slice {i+1}: sigma_fc = {sigma_fc.iloc[i]:.2f}, tau_fc = {tau_fc.iloc[i]:.2f}")
77
+
78
+ # Stage 2: Post-drawdown conditions with undrained strengths
79
+ if debug_level >= 1:
80
+ print("Stage 2: Post-drawdown conditions with undrained strengths...")
81
+
82
+ # Update pore pressures and distributed loads for stage 2
83
+ df['u'] = df['u2']
84
+ df['dload'] = df['dload2']
85
+ df['d_x'] = df['d_x2']
86
+ df['d_y'] = df['d_y2']
87
+
88
+ # Process each slice for undrained strength calculation
89
+ for i in range(len(df)):
90
+ # Check if this slice has low-K material (d and psi are not zero)
91
+ d_val = df.iloc[i]['d']
92
+ psi_val = df.iloc[i]['psi']
93
+
94
+ if d_val > 0 and psi_val > 0:
95
+ # Low-K material - calculate undrained strength
96
+ if debug_level >= 2:
97
+ print(f"Processing low-K material for slice {i+1}")
98
+
99
+ # Get consolidation stresses for this slice
100
+ sigma_fc_i = sigma_fc.iloc[i]
101
+ tau_fc_i = tau_fc.iloc[i]
102
+ phi_deg = df.iloc[i]['phi1'] # Use original phi for calculations
103
+ c_val = df.iloc[i]['c1'] # Use original c for calculations
104
+
105
+ # Calculate K1 using equation (4)
106
+ phi_rad = np.radians(phi_deg)
107
+ if abs(np.cos(phi_rad)) < 1e-12:
108
+ if debug_level >= 2:
109
+ print(f" Warning: cos(phi) near zero for slice {i+1}, skipping K1 calculation")
110
+ continue
111
+
112
+ K1 = (sigma_fc_i + tau_fc_i * (np.sin(phi_rad) + 1) / np.cos(phi_rad)) / \
113
+ (sigma_fc_i + tau_fc_i * (np.sin(phi_rad) - 1) / np.cos(phi_rad))
114
+
115
+ if debug_level >= 2:
116
+ print(f" K1 = {K1:.4f}")
117
+
118
+ # Calculate Kf using equation (6)
119
+ if abs(sigma_fc_i - c_val * np.cos(phi_rad)) < 1e-12:
120
+ if debug_level >= 2:
121
+ print(f" Warning: denominator near zero for Kf calculation in slice {i+1}")
122
+ continue
123
+
124
+ Kf = ((sigma_fc_i + c_val * np.cos(phi_rad)) * (1 + np.sin(phi_rad))) / \
125
+ ((sigma_fc_i - c_val * np.cos(phi_rad)) * (1 - np.sin(phi_rad)))
126
+
127
+ if debug_level >= 2:
128
+ print(f" Kf = {Kf:.4f}")
129
+
130
+ # Check for negative stresses using equations (7) and (8)
131
+ sigma3_k1 = sigma_fc_i + tau_fc_i * (np.sin(phi_rad) - 1) / np.cos(phi_rad) # Equation (7)
132
+ sigma3_kf = (sigma_fc_i - c_val * np.cos(phi_rad)) * (1 - np.sin(phi_rad)) / (np.cos(phi_rad)**2) # Equation (8)
133
+
134
+ if debug_level >= 2:
135
+ print(f" sigma3_k1 = {sigma3_k1:.4f}, sigma3_kf = {sigma3_kf:.4f}")
136
+
137
+ # Calculate tau_ff values for both curves
138
+ tau_ff_k1 = d_val + sigma_fc_i * np.tan(np.radians(psi_val)) # d-psi curve
139
+ tau_ff_kf = c_val + sigma_fc_i * np.tan(phi_rad) # c-phi curve
140
+
141
+ if debug_level >= 2:
142
+ print(f" tau_ff_k1 = {tau_ff_k1:.4f}, tau_ff_kf = {tau_ff_kf:.4f}")
143
+
144
+ # Determine which tau_ff to use
145
+ if sigma3_k1 < 0 or sigma3_kf < 0:
146
+ # Use the lower of the two curves
147
+ tau_ff = min(tau_ff_k1, tau_ff_kf)
148
+ if debug_level >= 2:
149
+ print(f" Negative stress detected, using lower curve: tau_ff = {tau_ff:.4f}")
150
+ else:
151
+ # Interpolate using equation (5)
152
+ if abs(Kf - 1) < 1e-12:
153
+ tau_ff = tau_ff_k1
154
+ else:
155
+ tau_ff = ((Kf - K1) * tau_ff_k1 + (K1 - 1) * tau_ff_kf) / (Kf - 1)
156
+
157
+ if debug_level >= 2:
158
+ print(f" Interpolated tau_ff = {tau_ff:.4f}")
159
+
160
+ # Set undrained strength parameters
161
+ df.iloc[i, df.columns.get_loc('c')] = float(tau_ff)
162
+ df.iloc[i, df.columns.get_loc('phi')] = 0.0
163
+
164
+ if debug_level >= 2:
165
+ print(f" Set c = {tau_ff:.4f}, phi = 0.0 for slice {i+1}")
166
+ else:
167
+ # High-K material - keep original c and phi
168
+ if debug_level >= 2:
169
+ print(f"Slice {i+1}: High-K material, keeping original c and phi")
170
+
171
+ # Calculate Stage 2 FS
172
+ success, result_stage2 = method_func(df)
173
+ if not success:
174
+ return False, f"Stage 2 failed: {result_stage2}"
175
+
176
+ stage2_FS = result_stage2['FS']
177
+ if debug_level >= 1:
178
+ print(f"Stage 2 FS = {stage2_FS:.4f}")
179
+
180
+ # Stage 3: Check drained strengths
181
+ if debug_level >= 1:
182
+ print("Stage 3: Checking drained strengths...")
183
+
184
+ # Check if any low-K slices need drained strength
185
+ need_stage3 = False
186
+
187
+ for i in range(len(df)):
188
+ d_val = df.iloc[i]['d']
189
+ psi_val = df.iloc[i]['psi']
190
+
191
+ if d_val > 0 and psi_val > 0:
192
+ # This is a low-K material slice
193
+ if 'n_eff' not in df.columns:
194
+ return False, "Stage 2 did not compute n_eff values"
195
+
196
+ # Calculate drained strength using equations (9) and (10)
197
+ sigma_prime = df.iloc[i]['n_eff'] / df.iloc[i]['dl'] # Equation (9)
198
+ tau_drained = df.iloc[i]['c1'] + sigma_prime * np.tan(np.radians(df.iloc[i]['phi1'])) # Equation (10)
199
+
200
+ # Compare with undrained strength (current c value)
201
+ tau_undrained = df.iloc[i]['c']
202
+
203
+ if debug_level >= 2:
204
+ print(f"Slice {i+1}: tau_drained = {tau_drained:.4f}, tau_undrained = {tau_undrained:.4f}")
205
+
206
+ if tau_drained < tau_undrained:
207
+ # Use drained strength
208
+ df.iloc[i, df.columns.get_loc('c')] = float(df.iloc[i]['c1'])
209
+ df.iloc[i, df.columns.get_loc('phi')] = float(df.iloc[i]['phi1'])
210
+ need_stage3 = True
211
+
212
+ if debug_level >= 2:
213
+ print(f" Using drained strength for slice {i+1}")
214
+
215
+ if need_stage3:
216
+ if debug_level >= 1:
217
+ print("Stage 3: Recalculating FS with drained strengths...")
218
+
219
+ success, result_stage3 = method_func(df)
220
+ if not success:
221
+ return False, f"Stage 3 failed: {result_stage3}"
222
+
223
+ stage3_FS = result_stage3['FS']
224
+ if debug_level >= 1:
225
+ print(f"Stage 3 FS = {stage3_FS:.4f}")
226
+ else:
227
+ stage3_FS = stage2_FS
228
+ if debug_level >= 1:
229
+ print("Stage 3: No drained strength adjustments needed")
230
+
231
+ # Final FS is the lower of Stage 2 and Stage 3
232
+ if stage2_FS < stage3_FS:
233
+ final_FS = stage2_FS
234
+ result = result_stage2
235
+ else:
236
+ final_FS = stage3_FS
237
+ result = result_stage3
238
+
239
+ if debug_level >= 1:
240
+ print(f"Final rapid drawdown FS = {final_FS:.4f}")
241
+ print("=== END RAPID DRAWDOWN ANALYSIS ===")
242
+
243
+ # Append stage FS to result
244
+ result['stage1_FS'] = stage1_FS
245
+ result['stage2_FS'] = stage2_FS
246
+ result['stage3_FS'] = stage3_FS
247
+
248
+ return True, result
249
+
250
+
251
+ def reliability(slope_data, method, rapid=False, circular=True, debug_level=0):
252
+ """
253
+ Performs reliability analysis using the Taylor Series Probability Method (TSPM).
254
+
255
+ Parameters:
256
+ slope_data : dict
257
+ Dictionary containing slope geometry, materials, and other input data
258
+ method : str
259
+ The limit equilibrium method name to use ('oms', 'bishop', 'janbu', 'spencer', etc.)
260
+ rapid : bool, optional
261
+ If True, performs rapid drawdown analysis (default: False)
262
+ circular : bool, optional
263
+ If True, uses circular search; if False, uses noncircular search (default: True)
264
+ debug_level : int, optional
265
+ Debug output level: 0=basic, 1=intermediate, 2=detailed (default: 0)
266
+
267
+ Returns:
268
+ tuple: (success, result) where result contains reliability analysis results
269
+ """
270
+
271
+ # Import search functions and solve module here to avoid circular import
272
+ from .search import circular_search, noncircular_search
273
+ from . import solve
274
+
275
+ if debug_level >= 1:
276
+ print("=== RELIABILITY ANALYSIS ===")
277
+ print(f"Method: {method}")
278
+ print(f"Rapid drawdown: {rapid}")
279
+ print(f"Circular search: {circular}")
280
+
281
+ # Step 1: Find the critical failure surface using search
282
+ if circular:
283
+ if debug_level >= 1:
284
+ print("Performing circular search...")
285
+ fs_cache, converged, search_path = circular_search(slope_data, method, rapid=rapid)
286
+ else:
287
+ if debug_level >= 1:
288
+ print("Performing noncircular search...")
289
+ fs_cache, converged, search_path = noncircular_search(slope_data, method, rapid=rapid)
290
+
291
+ if not fs_cache:
292
+ return False, "Search failed - no results found"
293
+
294
+ if not converged and debug_level >= 1:
295
+ print("Warning: Search did not fully converge - results may be less reliable")
296
+
297
+ # Get the critical (minimum FS) result
298
+ critical_result = fs_cache[0] # First item has minimum FS
299
+ F_MLV = critical_result["FS"]
300
+ critical_slices = critical_result["slices"]
301
+ critical_surface = critical_result["failure_surface"]
302
+
303
+ if debug_level >= 1:
304
+ print(f"Critical factor of safety (F_MLV): {F_MLV:.4f}")
305
+
306
+ # Store the fs_cache for plotting
307
+ reliability_fs_cache = [{"name": "MLV", "result": critical_result}]
308
+
309
+ # Step 2: Identify parameters with standard deviations
310
+ materials = slope_data['materials']
311
+
312
+ # Find parameters that have standard deviations
313
+ param_info = []
314
+
315
+ for i, material in enumerate(materials):
316
+ mat_name = material.get('name', f'Material_{i+1}')
317
+
318
+ # Check each parameter for standard deviation
319
+ param_mappings = {
320
+ 'gamma': 'sigma_gamma',
321
+ 'c': 'sigma_c',
322
+ 'phi': 'sigma_phi'
323
+ }
324
+
325
+ for param, std_key in param_mappings.items():
326
+ if std_key in material and material[std_key] > 0:
327
+ param_info.append({
328
+ 'material_id': i + 1, # Use 1-based index
329
+ 'material_name': mat_name,
330
+ 'param': param,
331
+ 'mlv': material[param],
332
+ 'std': material[std_key]
333
+ })
334
+
335
+ if debug_level >= 1:
336
+ print(f"Found {len(param_info)} parameters with standard deviations:")
337
+ for p in param_info:
338
+ print(f" Material {p['material_id']}: {p['param']} = {p['mlv']:.3f} ± σ={p['std']:.3f}")
339
+
340
+ # Step 3: Calculate F+ and F- for each parameter using TSPM
341
+ delta_F_values = []
342
+
343
+ for i, param in enumerate(param_info):
344
+ if debug_level >= 1:
345
+ print(f"\nProcessing parameter {i+1}/{len(param_info)}: Material {param['material_id']}, {param['param']}")
346
+
347
+ # Create modified slope_data copies
348
+ slope_data_plus = slope_data.copy()
349
+ slope_data_minus = slope_data.copy()
350
+ slope_data_plus['materials'] = [mat.copy() for mat in materials]
351
+ slope_data_minus['materials'] = [mat.copy() for mat in materials]
352
+
353
+ # Find the material and modify the parameter (use 0-based index)
354
+ mat_index = param['material_id'] - 1
355
+ if mat_index < len(slope_data_plus['materials']):
356
+ slope_data_plus['materials'][mat_index][param['param']] = param['mlv'] + param['std']
357
+
358
+ if mat_index < len(slope_data_minus['materials']):
359
+ slope_data_minus['materials'][mat_index][param['param']] = param['mlv'] - param['std']
360
+
361
+ # Calculate F+ and F-
362
+ if circular:
363
+ fs_cache_plus, _, _ = circular_search(slope_data_plus, method, rapid=rapid)
364
+ fs_cache_minus, _, _ = circular_search(slope_data_minus, method, rapid=rapid)
365
+ else:
366
+ fs_cache_plus, _, _ = noncircular_search(slope_data_plus, method, rapid=rapid)
367
+ fs_cache_minus, _, _ = noncircular_search(slope_data_minus, method, rapid=rapid)
368
+
369
+ if not fs_cache_plus or not fs_cache_minus:
370
+ return False, f"Failed to calculate F+ or F- for parameter {param['param']}"
371
+
372
+ F_plus = fs_cache_plus[0]["FS"]
373
+ F_minus = fs_cache_minus[0]["FS"]
374
+
375
+ # Store results for plotting
376
+ reliability_fs_cache.append({
377
+ "name": f"{param['param']}+",
378
+ "result": fs_cache_plus[0]
379
+ })
380
+ reliability_fs_cache.append({
381
+ "name": f"{param['param']}-",
382
+ "result": fs_cache_minus[0]
383
+ })
384
+
385
+ delta_F = abs(F_plus - F_minus)
386
+ delta_F_values.append(delta_F)
387
+
388
+ param['F_plus'] = F_plus
389
+ param['F_minus'] = F_minus
390
+ param['delta_F'] = delta_F
391
+
392
+ if debug_level >= 1:
393
+ print(f" F+ = {F_plus:.4f}, F- = {F_minus:.4f}, ΔF = {delta_F:.4f}")
394
+
395
+ # Step 4: Calculate sigma_F and COV_F
396
+ sigma_F = np.sqrt(sum([(df / 2)**2 for df in delta_F_values]))
397
+ COV_F = sigma_F / F_MLV
398
+
399
+ # Step 5: Calculate reliability index and probability of failure
400
+ if COV_F == 0:
401
+ return False, "COV_F is zero - no parameter variability"
402
+
403
+ beta_ln = np.log(F_MLV / np.sqrt(1 + COV_F**2)) / np.sqrt(np.log(1 + COV_F**2))
404
+ reliability = norm.cdf(beta_ln)
405
+ prob_failure = 1 - reliability
406
+
407
+ if debug_level >= 1:
408
+ print(f"\nσ_F = {sigma_F:.4f}")
409
+ print(f"COV_F = {COV_F:.4f}")
410
+ print(f"β_ln = {beta_ln:.4f}")
411
+ print(f"Reliability = {reliability*100:.2f}%")
412
+ print(f"Probability of failure = {prob_failure*100:.2f}%")
413
+
414
+ # Print summary table
415
+ if debug_level >= 0:
416
+ print("\n=== RELIABILITY ANALYSIS RESULTS ===")
417
+
418
+ # Parameter table
419
+ table_data = []
420
+ for param in param_info:
421
+ table_data.append([
422
+ f"Mat {param['material_id']} {param['param']}",
423
+ f"{param['mlv']:.3f}",
424
+ f"{param['std']:.3f}",
425
+ f"{param['mlv'] + param['std']:.3f}",
426
+ f"{param['mlv'] - param['std']:.3f}",
427
+ f"{param['F_plus']:.3f}",
428
+ f"{param['F_minus']:.3f}",
429
+ f"{param['delta_F']:.3f}"
430
+ ])
431
+
432
+ headers = ["Parameter", "MLV", "σ", "MLV+σ", "MLV-σ", "F+", "F-", "ΔF"]
433
+ colalign = ["left", "center", "center", "center", "center", "center", "center", "center"]
434
+ print(tabulate(table_data, headers=headers, tablefmt="grid", colalign=colalign))
435
+
436
+ # Summary statistics
437
+ print(f"\nSummary Statistics:")
438
+ print(f"F_MLV: {F_MLV:.3f}")
439
+ print(f"σ_F: {sigma_F:.3f}")
440
+ print(f"COV_F: {COV_F:.3f}")
441
+ print(f"β_ln: {beta_ln:.3f}")
442
+ print(f"Reliability: {reliability*100:.2f}%")
443
+ print(f"Probability of failure: {prob_failure*100:.2f}%")
444
+
445
+ # Prepare results
446
+ result = {
447
+ 'method': f'{method}_reliability',
448
+ 'F_MLV': F_MLV,
449
+ 'sigma_F': sigma_F,
450
+ 'COV_F': COV_F,
451
+ 'beta_ln': beta_ln,
452
+ 'reliability': reliability,
453
+ 'prob_failure': prob_failure,
454
+ 'param_info': param_info,
455
+ 'fs_cache': reliability_fs_cache,
456
+ 'critical_surface': critical_surface,
457
+ 'critical_slices': critical_slices
458
+ }
459
+
460
+ return True, result