xslope 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xslope/__init__.py +1 -0
- xslope/_version.py +4 -0
- xslope/advanced.py +460 -0
- xslope/fem.py +2753 -0
- xslope/fileio.py +671 -0
- xslope/global_config.py +59 -0
- xslope/mesh.py +2719 -0
- xslope/plot.py +1484 -0
- xslope/plot_fem.py +1658 -0
- xslope/plot_seep.py +634 -0
- xslope/search.py +416 -0
- xslope/seep.py +2080 -0
- xslope/slice.py +1075 -0
- xslope/solve.py +1259 -0
- xslope-0.1.2.dist-info/LICENSE +196 -0
- xslope-0.1.2.dist-info/METADATA +56 -0
- xslope-0.1.2.dist-info/NOTICE +14 -0
- xslope-0.1.2.dist-info/RECORD +20 -0
- xslope-0.1.2.dist-info/WHEEL +5 -0
- xslope-0.1.2.dist-info/top_level.txt +1 -0
xslope/fem.py
ADDED
|
@@ -0,0 +1,2753 @@
|
|
|
1
|
+
# Copyright 2025 Norman L. Jones
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import warnings
|
|
16
|
+
from math import radians, degrees, sin, cos, tan, sqrt, atan2
|
|
17
|
+
|
|
18
|
+
import matplotlib.pyplot as plt
|
|
19
|
+
import numpy as np
|
|
20
|
+
from scipy.linalg import eigh
|
|
21
|
+
from scipy.sparse import lil_matrix, csr_matrix, coo_matrix
|
|
22
|
+
from scipy.sparse.linalg import spsolve
|
|
23
|
+
from shapely.geometry import LineString, Point
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def build_fem_data(slope_data, mesh=None):
|
|
27
|
+
"""
|
|
28
|
+
Build a fem_data dictionary from slope_data and optional mesh.
|
|
29
|
+
|
|
30
|
+
This function takes a slope_data dictionary (from load_slope_data) and optionally a mesh
|
|
31
|
+
dictionary and constructs a fem_data dictionary suitable for finite element slope stability
|
|
32
|
+
analysis using the Shear Strength Reduction Method (SSRM).
|
|
33
|
+
|
|
34
|
+
The function:
|
|
35
|
+
1. Extracts or loads mesh information (nodes, elements, element types, element materials)
|
|
36
|
+
2. Builds material property arrays (c, phi, E, nu, gamma) from the materials table
|
|
37
|
+
3. Computes pore pressure field if needed (piezo or seep options)
|
|
38
|
+
4. Processes reinforcement lines into 1D truss elements with material properties
|
|
39
|
+
5. Constructs boundary conditions (fixed, roller, force) based on mesh geometry
|
|
40
|
+
6. Converts distributed loads to equivalent nodal forces
|
|
41
|
+
|
|
42
|
+
Parameters:
|
|
43
|
+
slope_data (dict): Data dictionary from load_slope_data containing:
|
|
44
|
+
- materials: list of material dictionaries with c, phi, gamma, E, nu, pp_option, etc.
|
|
45
|
+
- mesh: optional mesh data if mesh argument is None
|
|
46
|
+
- gamma_water: unit weight of water
|
|
47
|
+
- k_seismic: seismic coefficient
|
|
48
|
+
- reinforcement_lines: list of reinforcement line definitions
|
|
49
|
+
- distributed_loads: list of distributed load definitions
|
|
50
|
+
- seepage_solution: pore pressure data if pp_option is 'seep'
|
|
51
|
+
- max_depth: maximum depth for fixed boundary conditions
|
|
52
|
+
mesh (dict, optional): Mesh dictionary from build_mesh_from_polygons containing:
|
|
53
|
+
- nodes: np.ndarray (n_nodes, 2) of node coordinates
|
|
54
|
+
- elements: np.ndarray (n_elements, 9) of element node indices
|
|
55
|
+
- element_types: np.ndarray (n_elements,) indicating 3, 4, 6, 8, or 9 nodes per element
|
|
56
|
+
- element_materials: np.ndarray (n_elements,) of material IDs (1-based)
|
|
57
|
+
- elements_1d: np.ndarray (n_1d_elements, 3) of 1D element node indices
|
|
58
|
+
- element_types_1d: np.ndarray (n_1d_elements,) indicating 2 or 3 nodes per 1D element
|
|
59
|
+
- element_materials_1d: np.ndarray (n_1d_elements,) of reinforcement line IDs (1-based)
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
dict: fem_data dictionary with the following structure:
|
|
63
|
+
- nodes: np.ndarray (n_nodes, 2) of node coordinates
|
|
64
|
+
- elements: np.ndarray (n_elements, 9) of element node indices
|
|
65
|
+
- element_types: np.ndarray (n_elements,) indicating 3 for tri3 elements, 4 for quad4 elements, etc
|
|
66
|
+
- element_materials: np.ndarray (n_elements,) of material IDs (1-based)
|
|
67
|
+
- bc_type: np.ndarray (n_nodes,) of boundary condition flags (0=free, 1=fixed, 2=x roller, 3=y roller, 4=force)
|
|
68
|
+
- bc_values: np.ndarray (n_nodes, 2) of boundary condition values (f_x, f_y for type 4)
|
|
69
|
+
- c_by_mat: np.ndarray (n_materials,) of cohesion values
|
|
70
|
+
- phi_by_mat: np.ndarray (n_materials,) of friction angle values (degrees)
|
|
71
|
+
- E_by_mat: np.ndarray (n_materials,) of Young's modulus values
|
|
72
|
+
- nu_by_mat: np.ndarray (n_materials,) of Poisson's ratio values
|
|
73
|
+
- gamma_by_mat: np.ndarray (n_materials,) of unit weight values
|
|
74
|
+
- u: np.ndarray (n_nodes,) of pore pressures (if applicable)
|
|
75
|
+
- elements_1d: np.ndarray (n_1d_elements, 3) of 1D element node indices
|
|
76
|
+
- element_types_1d: np.ndarray (n_1d_elements,) indicating 2 for linear elements and 3 for quadratic elements
|
|
77
|
+
- element_materials_1d: np.ndarray (n_1d_elements,) of material IDs (1-based) corresponding to reinforcement lines
|
|
78
|
+
- t_allow_by_1d_elem: np.ndarray (n_1d_elements,) of maximum tensile forces for reinforcement lines
|
|
79
|
+
- t_res_by_1d_elem: np.ndarray (n_1d_elements,) of residual tensile forces for reinforcement lines
|
|
80
|
+
- k_by_1d_elem: np.ndarray (n_1d_elements,) of axial stiffness values for reinforcement lines
|
|
81
|
+
- unit_weight: float, unit weight of water
|
|
82
|
+
- k_seismic: float, seismic coefficient (horizontal acceleration / gravity)
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
# Get mesh data - either provided or from slope_data
|
|
86
|
+
if mesh is None:
|
|
87
|
+
if 'mesh' not in slope_data or slope_data['mesh'] is None:
|
|
88
|
+
raise ValueError("No mesh provided and no mesh found in slope_data")
|
|
89
|
+
mesh = slope_data['mesh']
|
|
90
|
+
|
|
91
|
+
# Extract mesh data
|
|
92
|
+
nodes = mesh["nodes"]
|
|
93
|
+
elements = mesh["elements"]
|
|
94
|
+
element_types = mesh["element_types"]
|
|
95
|
+
element_materials = mesh["element_materials"]
|
|
96
|
+
|
|
97
|
+
n_nodes = len(nodes)
|
|
98
|
+
n_elements = len(elements)
|
|
99
|
+
|
|
100
|
+
# Initialize boundary condition arrays
|
|
101
|
+
bc_type = np.zeros(n_nodes, dtype=int) # 0=free, 1=fixed, 2=x roller, 3=y roller, 4=force
|
|
102
|
+
bc_values = np.zeros((n_nodes, 2)) # f_x, f_y values for type 4
|
|
103
|
+
|
|
104
|
+
# Build material property arrays
|
|
105
|
+
materials = slope_data["materials"]
|
|
106
|
+
n_materials = len(materials)
|
|
107
|
+
|
|
108
|
+
c_by_mat = np.zeros(n_materials)
|
|
109
|
+
phi_by_mat = np.zeros(n_materials)
|
|
110
|
+
E_by_mat = np.zeros(n_materials)
|
|
111
|
+
nu_by_mat = np.zeros(n_materials)
|
|
112
|
+
gamma_by_mat = np.zeros(n_materials)
|
|
113
|
+
material_names = []
|
|
114
|
+
|
|
115
|
+
# Check for consistent pore pressure options
|
|
116
|
+
pp_options = [mat.get("pp_option", "none") for mat in materials]
|
|
117
|
+
unique_pp_options = set([opt for opt in pp_options if opt != "none"])
|
|
118
|
+
|
|
119
|
+
if len(unique_pp_options) > 1:
|
|
120
|
+
raise ValueError(f"Mixed pore pressure options not allowed: {unique_pp_options}")
|
|
121
|
+
|
|
122
|
+
pp_option = list(unique_pp_options)[0] if unique_pp_options else "none"
|
|
123
|
+
|
|
124
|
+
for i, material in enumerate(materials):
|
|
125
|
+
strength_option = material.get("strength_option", "mc")
|
|
126
|
+
|
|
127
|
+
if strength_option == "mc":
|
|
128
|
+
# Mohr-Coulomb: use c and phi directly
|
|
129
|
+
c_by_mat[i] = material.get("c", 0.0)
|
|
130
|
+
phi_by_mat[i] = material.get("phi", 0.0)
|
|
131
|
+
elif strength_option == "cp":
|
|
132
|
+
# c/p ratio: compute undrained strength based on depth
|
|
133
|
+
cp_ratio = material.get("cp_ratio", 0.0)
|
|
134
|
+
r_elev = material.get("r_elev", 0.0)
|
|
135
|
+
|
|
136
|
+
# For c/p option, we need to assign strength per element based on element centroid
|
|
137
|
+
# This will be handled when processing elements
|
|
138
|
+
c_by_mat[i] = cp_ratio # Store cp_ratio temporarily
|
|
139
|
+
phi_by_mat[i] = 0.0 # Undrained analysis
|
|
140
|
+
else:
|
|
141
|
+
c_by_mat[i] = material.get("c", 0.0)
|
|
142
|
+
phi_by_mat[i] = material.get("phi", 0.0)
|
|
143
|
+
|
|
144
|
+
# Require critical material properties to be explicitly specified
|
|
145
|
+
if "E" not in material:
|
|
146
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Young's modulus (E) is required but not specified")
|
|
147
|
+
if "nu" not in material:
|
|
148
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Poisson's ratio (nu) is required but not specified")
|
|
149
|
+
if "gamma" not in material:
|
|
150
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Unit weight (gamma) is required but not specified")
|
|
151
|
+
|
|
152
|
+
E_by_mat[i] = material["E"]
|
|
153
|
+
nu_by_mat[i] = material["nu"]
|
|
154
|
+
gamma_by_mat[i] = material["gamma"]
|
|
155
|
+
|
|
156
|
+
# Validate material property ranges
|
|
157
|
+
if E_by_mat[i] <= 0:
|
|
158
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Young's modulus (E) must be positive, got {E_by_mat[i]}")
|
|
159
|
+
if nu_by_mat[i] < 0 or nu_by_mat[i] >= 0.5:
|
|
160
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Poisson's ratio (nu) must be in range [0, 0.5), got {nu_by_mat[i]}")
|
|
161
|
+
if gamma_by_mat[i] <= 0:
|
|
162
|
+
raise ValueError(f"Material {i+1} ({material.get('name', f'Material {i+1}')}): Unit weight (gamma) must be positive, got {gamma_by_mat[i]}")
|
|
163
|
+
material_names.append(material.get("name", f"Material {i+1}"))
|
|
164
|
+
|
|
165
|
+
# Handle c/p strength option - compute actual cohesion per element
|
|
166
|
+
c_by_elem = np.zeros(n_elements)
|
|
167
|
+
phi_by_elem = np.zeros(n_elements)
|
|
168
|
+
|
|
169
|
+
for elem_idx in range(n_elements):
|
|
170
|
+
mat_id = element_materials[elem_idx] - 1 # Convert to 0-based
|
|
171
|
+
material = materials[mat_id]
|
|
172
|
+
strength_option = material.get("strength_option", "mc")
|
|
173
|
+
|
|
174
|
+
if strength_option == "cp":
|
|
175
|
+
cp_ratio = c_by_mat[mat_id] # This is actually cp_ratio
|
|
176
|
+
r_elev = material.get("r_elev", 0.0)
|
|
177
|
+
|
|
178
|
+
# Compute element centroid
|
|
179
|
+
elem_nodes = elements[elem_idx]
|
|
180
|
+
elem_type = element_types[elem_idx]
|
|
181
|
+
active_nodes = elem_nodes[:elem_type] # Only use active nodes
|
|
182
|
+
elem_coords = nodes[active_nodes]
|
|
183
|
+
centroid_y = np.mean(elem_coords[:, 1])
|
|
184
|
+
|
|
185
|
+
# Depth below reference elevation
|
|
186
|
+
depth = max(0.0, r_elev - centroid_y)
|
|
187
|
+
c_by_elem[elem_idx] = cp_ratio * depth
|
|
188
|
+
phi_by_elem[elem_idx] = 0.0
|
|
189
|
+
else:
|
|
190
|
+
c_by_elem[elem_idx] = c_by_mat[mat_id]
|
|
191
|
+
phi_by_elem[elem_idx] = phi_by_mat[mat_id]
|
|
192
|
+
|
|
193
|
+
# Process pore pressures
|
|
194
|
+
u = np.zeros(n_nodes)
|
|
195
|
+
|
|
196
|
+
if pp_option == "piezo":
|
|
197
|
+
# Find nodes and compute pore pressure from piezometric line
|
|
198
|
+
# Assuming the piezometric line is stored in slope_data
|
|
199
|
+
piezo_line_coords = None
|
|
200
|
+
|
|
201
|
+
# Look for piezometric line in various possible locations
|
|
202
|
+
if "piezo_line" in slope_data:
|
|
203
|
+
piezo_line_coords = slope_data["piezo_line"]
|
|
204
|
+
elif "profile_lines" in slope_data:
|
|
205
|
+
# Check if one of the profile lines is designated as piezo
|
|
206
|
+
for line in slope_data["profile_lines"]:
|
|
207
|
+
if hasattr(line, 'type') and line.type == 'piezo':
|
|
208
|
+
piezo_line_coords = line
|
|
209
|
+
break
|
|
210
|
+
|
|
211
|
+
if piezo_line_coords:
|
|
212
|
+
piezo_line = LineString(piezo_line_coords)
|
|
213
|
+
gamma_water = slope_data.get("gamma_water", 9.81)
|
|
214
|
+
|
|
215
|
+
for i, node in enumerate(nodes):
|
|
216
|
+
node_point = Point(node)
|
|
217
|
+
|
|
218
|
+
# Find closest point on piezometric line
|
|
219
|
+
closest_point = piezo_line.interpolate(piezo_line.project(node_point))
|
|
220
|
+
piezo_elevation = closest_point.y
|
|
221
|
+
|
|
222
|
+
# Compute pore pressure (only positive values)
|
|
223
|
+
if node[1] < piezo_elevation:
|
|
224
|
+
u[i] = gamma_water * (piezo_elevation - node[1])
|
|
225
|
+
else:
|
|
226
|
+
u[i] = 0.0
|
|
227
|
+
|
|
228
|
+
elif pp_option == "seep":
|
|
229
|
+
# Use existing seepage solution
|
|
230
|
+
if "seepage_solution" in slope_data:
|
|
231
|
+
seepage_solution = slope_data["seepage_solution"]
|
|
232
|
+
if isinstance(seepage_solution, np.ndarray) and len(seepage_solution) == n_nodes:
|
|
233
|
+
u = np.maximum(0.0, seepage_solution) # Ensure non-negative
|
|
234
|
+
else:
|
|
235
|
+
print("Warning: Seepage solution dimensions don't match mesh nodes")
|
|
236
|
+
|
|
237
|
+
# Process 1D reinforcement elements
|
|
238
|
+
elements_1d = np.array([]).reshape(0, 3) if 'elements_1d' not in mesh else mesh['elements_1d']
|
|
239
|
+
element_types_1d = np.array([]) if 'element_types_1d' not in mesh else mesh['element_types_1d']
|
|
240
|
+
element_materials_1d = np.array([]) if 'element_materials_1d' not in mesh else mesh['element_materials_1d']
|
|
241
|
+
|
|
242
|
+
n_1d_elements = len(elements_1d)
|
|
243
|
+
|
|
244
|
+
t_allow_by_1d_elem = np.zeros(n_1d_elements)
|
|
245
|
+
t_res_by_1d_elem = np.zeros(n_1d_elements)
|
|
246
|
+
k_by_1d_elem = np.zeros(n_1d_elements)
|
|
247
|
+
|
|
248
|
+
if n_1d_elements > 0 and "reinforcement_lines" in slope_data:
|
|
249
|
+
reinforcement_lines = slope_data["reinforcement_lines"]
|
|
250
|
+
|
|
251
|
+
for elem_idx in range(n_1d_elements):
|
|
252
|
+
line_id = element_materials_1d[elem_idx] - 1 # Convert to 0-based
|
|
253
|
+
|
|
254
|
+
if line_id < len(reinforcement_lines):
|
|
255
|
+
line_data = reinforcement_lines[line_id]
|
|
256
|
+
|
|
257
|
+
# Get element geometry
|
|
258
|
+
elem_nodes = elements_1d[elem_idx]
|
|
259
|
+
elem_type = element_types_1d[elem_idx]
|
|
260
|
+
active_nodes = elem_nodes[:elem_type]
|
|
261
|
+
elem_coords = nodes[active_nodes]
|
|
262
|
+
|
|
263
|
+
# Compute element length and centroid
|
|
264
|
+
if len(elem_coords) >= 2:
|
|
265
|
+
elem_length = np.linalg.norm(elem_coords[1] - elem_coords[0])
|
|
266
|
+
elem_centroid = np.mean(elem_coords, axis=0)
|
|
267
|
+
|
|
268
|
+
# Compute distance from element centroid to line ends
|
|
269
|
+
x1, y1 = line_data.get("x1", 0), line_data.get("y1", 0)
|
|
270
|
+
x2, y2 = line_data.get("x2", 0), line_data.get("y2", 0)
|
|
271
|
+
|
|
272
|
+
dist_to_left = np.linalg.norm(elem_centroid - [x1, y1])
|
|
273
|
+
dist_to_right = np.linalg.norm(elem_centroid - [x2, y2])
|
|
274
|
+
dist_to_nearest_end = min(dist_to_left, dist_to_right)
|
|
275
|
+
|
|
276
|
+
# Get reinforcement properties
|
|
277
|
+
t_max = line_data.get("t_max", 0.0)
|
|
278
|
+
t_res = line_data.get("t_res", 0.0)
|
|
279
|
+
lp1 = line_data.get("lp1", 0.0) # Pullout length left end
|
|
280
|
+
lp2 = line_data.get("lp2", 0.0) # Pullout length right end
|
|
281
|
+
|
|
282
|
+
# Use appropriate pullout length based on which end is closer
|
|
283
|
+
lp = lp1 if dist_to_left < dist_to_right else lp2
|
|
284
|
+
|
|
285
|
+
# Compute allowable and residual tensile forces
|
|
286
|
+
if dist_to_nearest_end < lp:
|
|
287
|
+
# Within pullout zone - linear variation
|
|
288
|
+
t_allow_by_1d_elem[elem_idx] = t_max * (dist_to_nearest_end / lp)
|
|
289
|
+
t_res_by_1d_elem[elem_idx] = 0.0 # Sudden pullout failure
|
|
290
|
+
else:
|
|
291
|
+
# Beyond pullout zone - full capacity
|
|
292
|
+
t_allow_by_1d_elem[elem_idx] = t_max
|
|
293
|
+
t_res_by_1d_elem[elem_idx] = t_res
|
|
294
|
+
|
|
295
|
+
# Compute axial stiffness
|
|
296
|
+
E = line_data.get("E", 2e11) # Steel default
|
|
297
|
+
A = line_data.get("area", 1e-4) # Default area
|
|
298
|
+
k_by_1d_elem[elem_idx] = E * A / elem_length
|
|
299
|
+
|
|
300
|
+
# Set up boundary conditions
|
|
301
|
+
|
|
302
|
+
# Step 1: Default to free (type 0)
|
|
303
|
+
# Already initialized to zeros
|
|
304
|
+
|
|
305
|
+
# Step 2: Fixed supports at bottom (type 1) - standard practice
|
|
306
|
+
# Use global minimum y as bottom
|
|
307
|
+
tolerance = 1e-6
|
|
308
|
+
y_min = float(np.min(nodes[:, 1])) if len(nodes) > 0 else 0.0
|
|
309
|
+
bottom_nodes = np.abs(nodes[:, 1] - y_min) < tolerance
|
|
310
|
+
bc_type[bottom_nodes] = 1 # Fixed (u=0, v=0)
|
|
311
|
+
|
|
312
|
+
# Step 3: X-roller supports at left and right sides (type 2) - standard practice
|
|
313
|
+
# Use global min/max x to identify left/right boundaries
|
|
314
|
+
if len(nodes) > 0:
|
|
315
|
+
x_min = float(np.min(nodes[:, 0]))
|
|
316
|
+
x_max = float(np.max(nodes[:, 0]))
|
|
317
|
+
left_nodes = np.abs(nodes[:, 0] - x_min) < tolerance
|
|
318
|
+
right_nodes = np.abs(nodes[:, 0] - x_max) < tolerance
|
|
319
|
+
|
|
320
|
+
# Apply X-roller but preserve existing boundary conditions (fixed takes precedence at corners)
|
|
321
|
+
left_not_fixed = left_nodes & (bc_type != 1)
|
|
322
|
+
right_not_fixed = right_nodes & (bc_type != 1)
|
|
323
|
+
|
|
324
|
+
bc_type[left_not_fixed] = 2 # X-roller (u=0, v=free)
|
|
325
|
+
bc_type[right_not_fixed] = 2 # X-roller (u=0, v=free)
|
|
326
|
+
|
|
327
|
+
# Step 4: Convert distributed loads to nodal forces (type 4)
|
|
328
|
+
# Check for distributed loads (could be 'dloads', 'dloads2', or 'distributed_loads')
|
|
329
|
+
distributed_loads = []
|
|
330
|
+
if "dloads" in slope_data and slope_data["dloads"]:
|
|
331
|
+
distributed_loads.extend(slope_data["dloads"])
|
|
332
|
+
if "dloads2" in slope_data and slope_data["dloads2"]:
|
|
333
|
+
distributed_loads.extend(slope_data["dloads2"])
|
|
334
|
+
if "distributed_loads" in slope_data and slope_data["distributed_loads"]:
|
|
335
|
+
distributed_loads.extend(slope_data["distributed_loads"])
|
|
336
|
+
|
|
337
|
+
if distributed_loads:
|
|
338
|
+
tolerance = 1e-1 # Tolerance for finding nodes on load lines (increased for better matching)
|
|
339
|
+
|
|
340
|
+
for load_idx, load_line in enumerate(distributed_loads):
|
|
341
|
+
# Handle different possible data structures
|
|
342
|
+
if isinstance(load_line, dict) and "coords" in load_line:
|
|
343
|
+
# Expected format: {"coords": [...], "loads": [...]}
|
|
344
|
+
load_coords = load_line["coords"]
|
|
345
|
+
load_values = load_line["loads"]
|
|
346
|
+
elif isinstance(load_line, list):
|
|
347
|
+
# Format from fileio: list of dicts with X, Y, Normal keys
|
|
348
|
+
load_coords = [(pt["X"], pt["Y"]) for pt in load_line]
|
|
349
|
+
load_values = [pt["Normal"] for pt in load_line]
|
|
350
|
+
else:
|
|
351
|
+
continue
|
|
352
|
+
|
|
353
|
+
if len(load_coords) < 2 or len(load_values) < 2:
|
|
354
|
+
continue
|
|
355
|
+
|
|
356
|
+
load_linestring = LineString(load_coords)
|
|
357
|
+
nodes_found = 0
|
|
358
|
+
|
|
359
|
+
# Find nodes that lie on or near the load line
|
|
360
|
+
for i, node in enumerate(nodes):
|
|
361
|
+
node_point = Point(node)
|
|
362
|
+
distance_to_line = load_linestring.distance(node_point)
|
|
363
|
+
|
|
364
|
+
if distance_to_line <= tolerance:
|
|
365
|
+
# This node is on the load line
|
|
366
|
+
nodes_found += 1
|
|
367
|
+
# Find position along line and interpolate load
|
|
368
|
+
projected_distance = load_linestring.project(node_point)
|
|
369
|
+
|
|
370
|
+
# Get segments and interpolate load value
|
|
371
|
+
segment_lengths = []
|
|
372
|
+
cumulative_length = 0
|
|
373
|
+
|
|
374
|
+
for j in range(len(load_coords) - 1):
|
|
375
|
+
seg_length = np.linalg.norm(np.array(load_coords[j+1]) - np.array(load_coords[j]))
|
|
376
|
+
segment_lengths.append(seg_length)
|
|
377
|
+
cumulative_length += seg_length
|
|
378
|
+
|
|
379
|
+
if projected_distance <= cumulative_length:
|
|
380
|
+
# Interpolate within this segment
|
|
381
|
+
local_distance = projected_distance - (cumulative_length - seg_length)
|
|
382
|
+
ratio = local_distance / seg_length if seg_length > 0 else 0
|
|
383
|
+
|
|
384
|
+
load_at_node = load_values[j] * (1 - ratio) + load_values[j+1] * ratio
|
|
385
|
+
break
|
|
386
|
+
else:
|
|
387
|
+
# Use last load value if beyond end
|
|
388
|
+
load_at_node = load_values[-1]
|
|
389
|
+
|
|
390
|
+
# Convert to nodal force using tributary length
|
|
391
|
+
# For simplicity, use average of adjacent segment lengths
|
|
392
|
+
tributary_length = np.mean(segment_lengths) if segment_lengths else 1.0
|
|
393
|
+
nodal_force_magnitude = load_at_node * tributary_length
|
|
394
|
+
|
|
395
|
+
# Determine direction (perpendicular to ground surface)
|
|
396
|
+
# For now, assume vertical loading
|
|
397
|
+
bc_type[i] = 4 # Applied force
|
|
398
|
+
bc_values[i, 0] = 0.0 # No horizontal component
|
|
399
|
+
bc_values[i, 1] = -nodal_force_magnitude # Downward
|
|
400
|
+
|
|
401
|
+
pass
|
|
402
|
+
|
|
403
|
+
# Print boundary condition summary
|
|
404
|
+
bc_summary = np.bincount(bc_type, minlength=5)
|
|
405
|
+
print(f"\nBoundary condition summary:")
|
|
406
|
+
print(f" Type 0 (free): {bc_summary[0]} nodes")
|
|
407
|
+
print(f" Type 1 (fixed): {bc_summary[1]} nodes")
|
|
408
|
+
print(f" Type 2 (x-roller): {bc_summary[2]} nodes")
|
|
409
|
+
print(f" Type 3 (y-roller): {bc_summary[3]} nodes")
|
|
410
|
+
print(f" Type 4 (force): {bc_summary[4]} nodes")
|
|
411
|
+
|
|
412
|
+
# Count non-zero forces
|
|
413
|
+
force_nodes = np.where(bc_type == 4)[0]
|
|
414
|
+
if len(force_nodes) > 0:
|
|
415
|
+
max_force = np.max(np.abs(bc_values[force_nodes]))
|
|
416
|
+
print(f" Maximum force magnitude: {max_force:.3f}")
|
|
417
|
+
|
|
418
|
+
# Get other parameters
|
|
419
|
+
unit_weight = slope_data.get("gamma_water", 9.81)
|
|
420
|
+
k_seismic = slope_data.get("k_seismic", 0.0)
|
|
421
|
+
|
|
422
|
+
# Construct fem_data dictionary
|
|
423
|
+
fem_data = {
|
|
424
|
+
"nodes": nodes,
|
|
425
|
+
"elements": elements,
|
|
426
|
+
"element_types": element_types,
|
|
427
|
+
"element_materials": element_materials,
|
|
428
|
+
"bc_type": bc_type,
|
|
429
|
+
"bc_values": bc_values,
|
|
430
|
+
"c_by_mat": c_by_mat,
|
|
431
|
+
"phi_by_mat": phi_by_mat,
|
|
432
|
+
"E_by_mat": E_by_mat,
|
|
433
|
+
"nu_by_mat": nu_by_mat,
|
|
434
|
+
"gamma_by_mat": gamma_by_mat,
|
|
435
|
+
"material_names": material_names,
|
|
436
|
+
"c_by_elem": c_by_elem, # Element-wise cohesion (for c/p option)
|
|
437
|
+
"phi_by_elem": phi_by_elem, # Element-wise friction angle
|
|
438
|
+
"u": u,
|
|
439
|
+
"elements_1d": elements_1d,
|
|
440
|
+
"element_types_1d": element_types_1d,
|
|
441
|
+
"element_materials_1d": element_materials_1d,
|
|
442
|
+
"t_allow_by_1d_elem": t_allow_by_1d_elem,
|
|
443
|
+
"t_res_by_1d_elem": t_res_by_1d_elem,
|
|
444
|
+
"k_by_1d_elem": k_by_1d_elem,
|
|
445
|
+
"unit_weight": unit_weight,
|
|
446
|
+
"k_seismic": k_seismic
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
return fem_data
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def apply_boundary_conditions(K_global, F_global, bc_type, nodes):
|
|
454
|
+
"""
|
|
455
|
+
Apply boundary conditions to global system using constraint elimination.
|
|
456
|
+
|
|
457
|
+
This function applies boundary conditions by eliminating constrained degrees
|
|
458
|
+
of freedom from the global stiffness matrix and load vector.
|
|
459
|
+
|
|
460
|
+
Parameters:
|
|
461
|
+
K_global: Global stiffness matrix (sparse or dense)
|
|
462
|
+
F_global: Global load vector
|
|
463
|
+
bc_type: Array of boundary condition types for each node:
|
|
464
|
+
0 = free (both u and v free)
|
|
465
|
+
1 = fixed (both u=0 and v=0)
|
|
466
|
+
2 = x-roller (u=0, v free)
|
|
467
|
+
3 = y-roller (u free, v=0)
|
|
468
|
+
4 = force (both u and v free, external forces applied)
|
|
469
|
+
nodes: Array of node coordinates (for reference)
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
K_constrained: Constrained stiffness matrix (only free DOFs)
|
|
473
|
+
F_constrained: Constrained load vector (only free DOFs)
|
|
474
|
+
constraint_dofs: List of constrained DOF indices
|
|
475
|
+
"""
|
|
476
|
+
|
|
477
|
+
n_nodes = len(nodes)
|
|
478
|
+
n_dof = 2 * n_nodes
|
|
479
|
+
|
|
480
|
+
# Identify constrained DOFs
|
|
481
|
+
constraint_dofs = []
|
|
482
|
+
|
|
483
|
+
for i in range(n_nodes):
|
|
484
|
+
if bc_type[i] == 1: # Fixed: both u and v constrained
|
|
485
|
+
constraint_dofs.extend([2*i, 2*i+1])
|
|
486
|
+
elif bc_type[i] == 2: # X-roller: u constrained, v free
|
|
487
|
+
constraint_dofs.append(2*i)
|
|
488
|
+
elif bc_type[i] == 3: # Y-roller: u free, v constrained
|
|
489
|
+
constraint_dofs.append(2*i+1)
|
|
490
|
+
# bc_type 0 and 4 are free DOFs - no constraints
|
|
491
|
+
|
|
492
|
+
# Get free DOFs
|
|
493
|
+
all_dofs = set(range(n_dof))
|
|
494
|
+
constraint_dofs_set = set(constraint_dofs)
|
|
495
|
+
free_dofs = sorted(all_dofs - constraint_dofs_set)
|
|
496
|
+
|
|
497
|
+
# Extract free DOF submatrices
|
|
498
|
+
if hasattr(K_global, 'toarray'):
|
|
499
|
+
# Sparse matrix
|
|
500
|
+
K_global_dense = K_global.toarray()
|
|
501
|
+
else:
|
|
502
|
+
K_global_dense = K_global
|
|
503
|
+
|
|
504
|
+
# Extract submatrix for free DOFs only
|
|
505
|
+
K_constrained = K_global_dense[np.ix_(free_dofs, free_dofs)]
|
|
506
|
+
F_constrained = F_global[free_dofs]
|
|
507
|
+
|
|
508
|
+
# Convert back to sparse if original was sparse and matrix is large
|
|
509
|
+
if hasattr(K_global, 'toarray') and len(free_dofs) > 100:
|
|
510
|
+
K_constrained = csr_matrix(K_constrained)
|
|
511
|
+
|
|
512
|
+
return K_constrained, F_constrained, constraint_dofs
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
# Implementation of Perzyna Visco-Plastic Algorithm for Slope Stability
|
|
516
|
+
#
|
|
517
|
+
# Based on:
|
|
518
|
+
# - Griffiths & Lane (1999) "Slope stability analysis by finite elements"
|
|
519
|
+
# - Perzyna (1966) "Fundamental problems in viscoplasticity"
|
|
520
|
+
# - Zienkiewicz & Cormeau (1974) visco-plastic algorithm
|
|
521
|
+
#
|
|
522
|
+
# Key features:
|
|
523
|
+
# - Pure non-convergence failure criterion
|
|
524
|
+
# - Perzyna stress redistribution algorithm
|
|
525
|
+
# - 8-node quadrilateral elements with reduced integration
|
|
526
|
+
# - No plastic stiffness reduction
|
|
527
|
+
|
|
528
|
+
def solve_fem(fem_data, F=1.0, debug_level=0, abort_after=-1, iteration_print_frequency=5, dt_max=1e-5, max_iterations=60, tolerance=5e-4, damping_factor=0.9, plastic_strain_cap=0.005):
|
|
529
|
+
"""
|
|
530
|
+
Solve FEM using Perzyna visco-plastic algorithm exactly as in Griffiths & Lane (1999).
|
|
531
|
+
|
|
532
|
+
This implements the exact algorithm from the 1999 Geotechnique paper:
|
|
533
|
+
- 8-node quadrilateral elements with reduced integration (4 Gauss points)
|
|
534
|
+
- Perzyna visco-plastic stress redistribution
|
|
535
|
+
- Non-convergence failure criterion (1000 iteration limit)
|
|
536
|
+
- No plastic stiffness reduction
|
|
537
|
+
|
|
538
|
+
Parameters:
|
|
539
|
+
fem_data (dict): FEM data dictionary
|
|
540
|
+
F (float): Shear strength reduction factor
|
|
541
|
+
debug_level (int): Verbosity level
|
|
542
|
+
abort_after (int): Abort after this many iterations. -1 = no abort (default)
|
|
543
|
+
0 = abort after gravity loading (before plasticity check)
|
|
544
|
+
1 = abort after first plasticity iteration
|
|
545
|
+
etc.
|
|
546
|
+
iteration_print_frequency (int): Print iteration info every N iterations (default=1)
|
|
547
|
+
dt_max (float): Maximum pseudo-time step for Perzyna updates; dt = min(dt_base, dt_max).
|
|
548
|
+
Defaults to 1e-6.
|
|
549
|
+
max_iterations (int): Maximum Perzyna iterations. Defaults to 60.
|
|
550
|
+
tolerance (float): Convergence tolerance on relative displacement change. Defaults to 5e-4.
|
|
551
|
+
damping_factor (float): Under-relaxation factor for displacement update (0<d<=1).
|
|
552
|
+
Lower for more damping (e.g., 0.8–0.9). Defaults to 0.95.
|
|
553
|
+
plastic_strain_cap (float|None): Max per-Gauss plastic strain increment magnitude per
|
|
554
|
+
iteration (cap on |erate*dt|). None disables capping.
|
|
555
|
+
|
|
556
|
+
Returns:
|
|
557
|
+
dict: Solution dictionary with convergence status
|
|
558
|
+
"""
|
|
559
|
+
|
|
560
|
+
if debug_level >= 1:
|
|
561
|
+
print(f"=== Perzyna Visco-Plastic FEM Analysis (F={F:.3f}) ===")
|
|
562
|
+
|
|
563
|
+
# Extract data
|
|
564
|
+
nodes = fem_data["nodes"]
|
|
565
|
+
elements = fem_data["elements"]
|
|
566
|
+
element_types = fem_data["element_types"]
|
|
567
|
+
element_materials = fem_data["element_materials"]
|
|
568
|
+
bc_type = fem_data["bc_type"]
|
|
569
|
+
bc_values = fem_data["bc_values"]
|
|
570
|
+
|
|
571
|
+
# Material properties
|
|
572
|
+
c_by_elem = fem_data.get("c_by_elem", fem_data["c_by_mat"][element_materials - 1])
|
|
573
|
+
phi_by_elem = fem_data.get("phi_by_elem", fem_data["phi_by_mat"][element_materials - 1])
|
|
574
|
+
E_by_mat = fem_data["E_by_mat"]
|
|
575
|
+
nu_by_mat = fem_data["nu_by_mat"]
|
|
576
|
+
gamma_by_mat = fem_data["gamma_by_mat"]
|
|
577
|
+
u_nodal = fem_data["u"]
|
|
578
|
+
k_seismic = fem_data.get("k_seismic", 0.0)
|
|
579
|
+
|
|
580
|
+
n_nodes = len(nodes)
|
|
581
|
+
n_elements = len(elements)
|
|
582
|
+
n_dof = 2 * n_nodes
|
|
583
|
+
|
|
584
|
+
# Apply strength reduction (Griffiths & Lane 1999 approach)
|
|
585
|
+
c_reduced = c_by_elem / F
|
|
586
|
+
tan_phi_reduced = np.tan(np.radians(phi_by_elem)) / F
|
|
587
|
+
phi_reduced = np.arctan(tan_phi_reduced) # Keep in radians for yield functions
|
|
588
|
+
|
|
589
|
+
if debug_level >= 2:
|
|
590
|
+
print(f"Original c range: [{np.min(c_by_elem):.1f}, {np.max(c_by_elem):.1f}]")
|
|
591
|
+
print(f"Reduced c range: [{np.min(c_reduced):.1f}, {np.max(c_reduced):.1f}]")
|
|
592
|
+
print(f"Original phi: {phi_by_elem[0]:.1f}°")
|
|
593
|
+
print(f"Reduced phi: {np.degrees(phi_reduced[0]):.1f}°")
|
|
594
|
+
print(f"Original φ range: [{np.min(phi_by_elem):.1f}°, {np.max(phi_by_elem):.1f}°]")
|
|
595
|
+
print(f"Reduced φ range: [{np.min(np.degrees(phi_reduced)):.1f}°, {np.max(np.degrees(phi_reduced)):.1f}°]")
|
|
596
|
+
|
|
597
|
+
# Build global stiffness matrix (elastic, constant throughout)
|
|
598
|
+
K_global = build_global_stiffness(nodes, elements, element_types,
|
|
599
|
+
element_materials, E_by_mat, nu_by_mat)
|
|
600
|
+
|
|
601
|
+
# Build gravity load vector
|
|
602
|
+
F_gravity = build_gravity_loads(nodes, elements, element_types,
|
|
603
|
+
element_materials, gamma_by_mat, k_seismic)
|
|
604
|
+
|
|
605
|
+
# Boundary conditions will be applied in each iteration using apply_boundary_conditions
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
# Pseudo-time step for numerical stability (not real time - this is steady-state)
|
|
610
|
+
# Griffiths & Lane approach: start with large value, then calculate based on material properties
|
|
611
|
+
dt_base = 1.0e15 # Large initial value as in p62.f90 line 19
|
|
612
|
+
|
|
613
|
+
# Calculate time step based on material properties (p62.f90 lines 72-73)
|
|
614
|
+
# Use the first material's properties for time step calculation
|
|
615
|
+
E = E_by_mat[0] # Young's modulus of first material
|
|
616
|
+
nu = nu_by_mat[0] # Poisson's ratio of first material
|
|
617
|
+
ddt = 4.0 * (1.0 + nu) / (3.0 * E) # d4*(one+prop(3))/(d3*prop(2))
|
|
618
|
+
if ddt < dt_base:
|
|
619
|
+
dt_base = ddt
|
|
620
|
+
|
|
621
|
+
# Final time step control: dt is min of material-based dt_base and user dt_max
|
|
622
|
+
dt = min(dt_base, dt_max)
|
|
623
|
+
|
|
624
|
+
# Debug prints for dt calculation
|
|
625
|
+
if debug_level >= 2:
|
|
626
|
+
print(f"Time step estimate: E={E:.3g}, nu={nu:.3g}, ddt={ddt:.3e}")
|
|
627
|
+
print(f"dt_base={dt_base:.3e}, dt_max={dt_max:.3e}, dt={dt:.3e}")
|
|
628
|
+
|
|
629
|
+
# Phase 1: Establish K0 (gravity) stress state from elastic solution
|
|
630
|
+
if debug_level >= 1:
|
|
631
|
+
print("Phase 1: Establishing K0 stress state by elastic gravity loading...")
|
|
632
|
+
initial_displacements, stress_state = establish_k0_stress_state(
|
|
633
|
+
K_global, F_gravity, bc_type, nodes, elements, element_types,
|
|
634
|
+
element_materials, E_by_mat, nu_by_mat, gamma_by_mat, u_nodal, debug_level=max(0, debug_level-1)
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
# Debug gravity loads and element areas
|
|
638
|
+
if debug_level >= 1:
|
|
639
|
+
print(f" Debug: Checking gravity load calculation")
|
|
640
|
+
sample_elem = 129 # should be close to shear zone for tri3 mesh
|
|
641
|
+
if sample_elem < len(elements):
|
|
642
|
+
elem_type = element_types[sample_elem]
|
|
643
|
+
elem_nodes = elements[sample_elem][:elem_type]
|
|
644
|
+
elem_coords = nodes[elem_nodes]
|
|
645
|
+
if elem_type == 8:
|
|
646
|
+
area = compute_quad_area(elem_coords)
|
|
647
|
+
else:
|
|
648
|
+
# Triangle area
|
|
649
|
+
x1, y1 = elem_coords[0]
|
|
650
|
+
x2, y2 = elem_coords[1]
|
|
651
|
+
x3, y3 = elem_coords[2]
|
|
652
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
653
|
+
|
|
654
|
+
gamma = gamma_by_mat[element_materials[sample_elem] - 1]
|
|
655
|
+
load_per_node = gamma * area / elem_type
|
|
656
|
+
print(f" Element {sample_elem}: area={area:.2f}, gamma={gamma}, load_per_node={load_per_node:.1f}")
|
|
657
|
+
|
|
658
|
+
# Calculate yield function values for all elements after gravity loading
|
|
659
|
+
yield_function_values = np.zeros(n_elements)
|
|
660
|
+
# Collect all σyy values for diagnostics
|
|
661
|
+
all_sigma_yy = []
|
|
662
|
+
total_gauss_points = 0
|
|
663
|
+
yielded_gauss_points = 0
|
|
664
|
+
|
|
665
|
+
for elem_idx in range(n_elements):
|
|
666
|
+
elem_type = element_types[elem_idx]
|
|
667
|
+
# Use first Gauss point stress for yield function (or average for quads)
|
|
668
|
+
if elem_type == 8: # 8-node quad - average over Gauss points
|
|
669
|
+
elem_stress_avg = np.mean(stress_state['element_stresses'][elem_idx, :4, :], axis=0)
|
|
670
|
+
# Collect all σyy from Gauss points
|
|
671
|
+
for gp in range(4):
|
|
672
|
+
all_sigma_yy.append(stress_state['element_stresses'][elem_idx, gp, 1]) # σyy is index 1
|
|
673
|
+
total_gauss_points += 1
|
|
674
|
+
# Check if this Gauss point yields
|
|
675
|
+
gp_yield = check_mohr_coulomb_cp(
|
|
676
|
+
stress_state['element_stresses'][elem_idx, gp, :],
|
|
677
|
+
c_reduced[elem_idx], phi_reduced[elem_idx])
|
|
678
|
+
if gp_yield > 0:
|
|
679
|
+
yielded_gauss_points += 1
|
|
680
|
+
else: # Triangle or other - use first Gauss point
|
|
681
|
+
elem_stress_avg = stress_state['element_stresses'][elem_idx, 0, :]
|
|
682
|
+
all_sigma_yy.append(stress_state['element_stresses'][elem_idx, 0, 1]) # σyy
|
|
683
|
+
total_gauss_points += 1
|
|
684
|
+
# Check if yields
|
|
685
|
+
gp_yield = check_mohr_coulomb_cp(
|
|
686
|
+
stress_state['element_stresses'][elem_idx, 0, :],
|
|
687
|
+
c_reduced[elem_idx], phi_reduced[elem_idx])
|
|
688
|
+
if gp_yield > 0:
|
|
689
|
+
yielded_gauss_points += 1
|
|
690
|
+
|
|
691
|
+
# Calculate yield function with reduced strength parameters
|
|
692
|
+
yield_function_values[elem_idx] = check_mohr_coulomb_cp(
|
|
693
|
+
elem_stress_avg, c_reduced[elem_idx], phi_reduced[elem_idx])
|
|
694
|
+
|
|
695
|
+
# Diagnostic 1: Min/max σyy after gravity
|
|
696
|
+
all_sigma_yy = np.array(all_sigma_yy)
|
|
697
|
+
min_sigma_yy = np.min(all_sigma_yy)
|
|
698
|
+
max_sigma_yy = np.max(all_sigma_yy)
|
|
699
|
+
|
|
700
|
+
if debug_level >= 1:
|
|
701
|
+
print(f"\n=== After Gravity Loading ====")
|
|
702
|
+
print(f" Min σyy: {min_sigma_yy:.3f} kPa")
|
|
703
|
+
print(f" Max σyy: {max_sigma_yy:.3f} kPa")
|
|
704
|
+
|
|
705
|
+
# Diagnostic 2: Fraction of yielded Gauss points
|
|
706
|
+
yielding_fraction_initial = yielded_gauss_points / total_gauss_points if total_gauss_points > 0 else 0
|
|
707
|
+
if debug_level >= 1:
|
|
708
|
+
print(f"\n=== After Yield Check (F > 0) ===")
|
|
709
|
+
print(f" Gauss points meeting yield criterion (F>0): {yielded_gauss_points}/{total_gauss_points} ({yielding_fraction_initial*100:.1f}%)")
|
|
710
|
+
print(f" Note: These points satisfy F>0 but haven't developed plastic strains yet")
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
# Check for early abort after gravity loading
|
|
714
|
+
if abort_after == 0:
|
|
715
|
+
if debug_level >= 1:
|
|
716
|
+
print("Aborting after gravity loading (abort_after=0)")
|
|
717
|
+
|
|
718
|
+
# Compute stresses and strains for output
|
|
719
|
+
final_stresses, plastic_elements = compute_final_state_perzyna(
|
|
720
|
+
nodes, elements, element_types, element_materials,
|
|
721
|
+
initial_displacements, {}, c_reduced, phi_reduced,
|
|
722
|
+
E_by_mat, nu_by_mat, u_nodal, stress_state)
|
|
723
|
+
|
|
724
|
+
strains = compute_strains(nodes, elements, element_types, initial_displacements)
|
|
725
|
+
|
|
726
|
+
return {
|
|
727
|
+
"converged": True,
|
|
728
|
+
"iterations": 0,
|
|
729
|
+
"displacements": initial_displacements,
|
|
730
|
+
"stresses": final_stresses,
|
|
731
|
+
"strains": strains,
|
|
732
|
+
"plastic_elements": plastic_elements,
|
|
733
|
+
"yield_function": yield_function_values,
|
|
734
|
+
"max_displacement": np.max(np.abs(initial_displacements)),
|
|
735
|
+
"plastic_strains": {},
|
|
736
|
+
"algorithm": "Perzyna Visco-Plastic (aborted after gravity)",
|
|
737
|
+
"aborted": True,
|
|
738
|
+
"abort_after": abort_after
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
if debug_level >= 1:
|
|
742
|
+
print("Phase 2: Starting Perzyna strength reduction analysis...")
|
|
743
|
+
|
|
744
|
+
displacements = initial_displacements.copy()
|
|
745
|
+
displacements_prev = initial_displacements.copy() # Track previous displacements
|
|
746
|
+
plastic_strains = {} # Store plastic strains at each Gauss point
|
|
747
|
+
|
|
748
|
+
# Initialize total stress state from K₀ (this will be updated incrementally)
|
|
749
|
+
current_stress_state = {
|
|
750
|
+
'element_stresses': stress_state['element_stresses'].copy(),
|
|
751
|
+
'plastic_state': np.zeros((n_elements, 4), dtype=bool)
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
# Initialize plastic strain storage
|
|
755
|
+
for elem_idx in range(n_elements):
|
|
756
|
+
elem_type = element_types[elem_idx]
|
|
757
|
+
if elem_type == 8: # 8-node quad
|
|
758
|
+
n_gauss = 4 # Reduced integration
|
|
759
|
+
elif elem_type == 3: # 3-node triangle
|
|
760
|
+
n_gauss = 1
|
|
761
|
+
else:
|
|
762
|
+
n_gauss = 1
|
|
763
|
+
|
|
764
|
+
plastic_strains[elem_idx] = np.zeros((n_gauss, 3)) # [eps_x, eps_y, gamma_xy] plastic
|
|
765
|
+
|
|
766
|
+
converged = False
|
|
767
|
+
|
|
768
|
+
# Track data for CSV output
|
|
769
|
+
csv_data = []
|
|
770
|
+
|
|
771
|
+
for iteration in range(max_iterations):
|
|
772
|
+
if debug_level >= 3:
|
|
773
|
+
print(f"\n--- Iteration {iteration + 1} ---")
|
|
774
|
+
|
|
775
|
+
# Build load vector: maintain gravity (constant external load) + plastic corrections
|
|
776
|
+
F_total = F_gravity.copy()
|
|
777
|
+
|
|
778
|
+
# Add plastic strain corrections (Perzyna stress redistribution)
|
|
779
|
+
F_plastic_correction = compute_plastic_load_correction_perzyna(
|
|
780
|
+
nodes, elements, element_types, element_materials,
|
|
781
|
+
plastic_strains, E_by_mat, nu_by_mat, dt)
|
|
782
|
+
|
|
783
|
+
if debug_level >= 2:
|
|
784
|
+
print(f"Plastic correction norm: {np.linalg.norm(F_plastic_correction):.2e}")
|
|
785
|
+
|
|
786
|
+
F_total += F_plastic_correction
|
|
787
|
+
|
|
788
|
+
# DEBUG: Detailed analysis for first iteration only
|
|
789
|
+
if iteration == 0 and debug_level >= 1:
|
|
790
|
+
print(f"\n=== DEBUGGING FIRST ITERATION ===")
|
|
791
|
+
print(f"Gravity load norm: {np.linalg.norm(F_gravity):.2e}")
|
|
792
|
+
print(f"Total load norm: {np.linalg.norm(F_total):.2e}")
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
# Add displacement and stress debugging after load application
|
|
796
|
+
if iteration == 0 and debug_level >= 1:
|
|
797
|
+
# Apply boundary conditions and solve to see what displacements result
|
|
798
|
+
K_constrained, F_constrained, constraint_dofs = apply_boundary_conditions(
|
|
799
|
+
K_global, F_total, bc_type, nodes)
|
|
800
|
+
|
|
801
|
+
try:
|
|
802
|
+
if hasattr(K_constrained, 'toarray'):
|
|
803
|
+
K_constrained = K_constrained.tocsr()
|
|
804
|
+
displacements_free = spsolve(K_constrained, F_constrained)
|
|
805
|
+
|
|
806
|
+
# Reconstruct full displacement vector
|
|
807
|
+
n_dof = 2 * n_nodes
|
|
808
|
+
displacements_new = np.zeros(n_dof)
|
|
809
|
+
free_dofs = [i for i in range(n_dof) if i not in constraint_dofs]
|
|
810
|
+
displacements_new[free_dofs] = displacements_free
|
|
811
|
+
|
|
812
|
+
# Check displacement magnitudes
|
|
813
|
+
max_disp = np.max(np.abs(displacements_new))
|
|
814
|
+
print(f"\nFirst iteration displacement analysis:")
|
|
815
|
+
print(f" Max displacement magnitude: {max_disp:.4f}")
|
|
816
|
+
print(f" Max vertical displacement: {np.max(displacements_new[1::2]):.4f}")
|
|
817
|
+
print(f" Min vertical displacement: {np.min(displacements_new[1::2]):.4f}")
|
|
818
|
+
print(f" Max horizontal displacement: {np.max(displacements_new[0::2]):.4f}")
|
|
819
|
+
print(f" Min horizontal displacement: {np.min(displacements_new[0::2]):.4f}")
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
except Exception as e:
|
|
824
|
+
print(f" Error in displacement analysis: {e}")
|
|
825
|
+
|
|
826
|
+
# Add boundary condition loads
|
|
827
|
+
for i in range(n_nodes):
|
|
828
|
+
if bc_type[i] == 4: # Force boundary condition
|
|
829
|
+
F_total[2*i] += bc_values[i, 0]
|
|
830
|
+
F_total[2*i+1] += bc_values[i, 1]
|
|
831
|
+
|
|
832
|
+
# Apply constraints using proper free DOF extraction
|
|
833
|
+
K_constrained, F_constrained, constraint_dofs = apply_boundary_conditions(
|
|
834
|
+
K_global, F_total, bc_type, nodes)
|
|
835
|
+
|
|
836
|
+
# Solve for displacement increment at equilibrium (incremental residual form)
|
|
837
|
+
try:
|
|
838
|
+
if hasattr(K_constrained, 'toarray'):
|
|
839
|
+
K_constrained = K_constrained.tocsr()
|
|
840
|
+
|
|
841
|
+
# Current free DOF vector
|
|
842
|
+
n_dof = 2 * n_nodes
|
|
843
|
+
free_dofs = [i for i in range(n_dof) if i not in constraint_dofs]
|
|
844
|
+
u_free_curr = displacements[free_dofs]
|
|
845
|
+
|
|
846
|
+
# Residual: r = F_free - K_free * u_free
|
|
847
|
+
r_free = F_constrained - K_constrained @ u_free_curr
|
|
848
|
+
|
|
849
|
+
# Solve K * delta_u = r
|
|
850
|
+
delta_u_free = spsolve(K_constrained, r_free)
|
|
851
|
+
|
|
852
|
+
# Classic update (no extra damping)
|
|
853
|
+
u_free_new = u_free_curr + delta_u_free
|
|
854
|
+
|
|
855
|
+
# Debug displacement increments
|
|
856
|
+
if debug_level >= 2:
|
|
857
|
+
max_delta_u = np.max(np.abs(delta_u_free))
|
|
858
|
+
max_u_curr = np.max(np.abs(u_free_curr))
|
|
859
|
+
max_u_new = np.max(np.abs(u_free_new))
|
|
860
|
+
print(f" Displacement increment: max_delta_u={max_delta_u:.2e}, max_u_curr={max_u_curr:.2e}, max_u_new={max_u_new:.2e}")
|
|
861
|
+
|
|
862
|
+
# Reconstruct full displacement vector
|
|
863
|
+
displacements_new = np.zeros(n_dof)
|
|
864
|
+
displacements_new[free_dofs] = u_free_new
|
|
865
|
+
# Constrained DOFs remain zero
|
|
866
|
+
except Exception as e:
|
|
867
|
+
if debug_level >= 1:
|
|
868
|
+
print(f"Matrix solution failed: {e}")
|
|
869
|
+
return {
|
|
870
|
+
"converged": False,
|
|
871
|
+
"error": f"Matrix solution failed: {e}",
|
|
872
|
+
"iterations": iteration + 1,
|
|
873
|
+
"displacements": displacements,
|
|
874
|
+
"algorithm": "Perzyna"
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
# Update plastic strains using Perzyna algorithm with incremental approach
|
|
878
|
+
plastic_strains_new, total_plastic_increment, current_stress_state = update_plastic_strains_perzyna_incremental(
|
|
879
|
+
nodes, elements, element_types, element_materials,
|
|
880
|
+
displacements_new, displacements_prev, plastic_strains, current_stress_state,
|
|
881
|
+
c_reduced, phi_reduced, E_by_mat, nu_by_mat, dt, plastic_strain_cap, debug_level)
|
|
882
|
+
|
|
883
|
+
# Debug plastic strain accumulation
|
|
884
|
+
if debug_level >= 2:
|
|
885
|
+
max_plastic_strain = 0.0
|
|
886
|
+
total_plastic_strain = 0.0
|
|
887
|
+
n_plastic_points = 0
|
|
888
|
+
for elem_idx in plastic_strains_new:
|
|
889
|
+
for gp in range(len(plastic_strains_new[elem_idx])):
|
|
890
|
+
plastic_magnitude = np.linalg.norm(plastic_strains_new[elem_idx][gp])
|
|
891
|
+
if plastic_magnitude > 1e-12:
|
|
892
|
+
max_plastic_strain = max(max_plastic_strain, plastic_magnitude)
|
|
893
|
+
total_plastic_strain += plastic_magnitude
|
|
894
|
+
n_plastic_points += 1
|
|
895
|
+
print(f" Plastic strain stats: max={max_plastic_strain:.2e}, total={total_plastic_strain:.2e}, n_points={n_plastic_points}")
|
|
896
|
+
|
|
897
|
+
# Check convergence
|
|
898
|
+
disp_change = np.linalg.norm(displacements_new - displacements)
|
|
899
|
+
plastic_change = total_plastic_increment
|
|
900
|
+
residual_norm = disp_change # Using displacement change as residual
|
|
901
|
+
|
|
902
|
+
# Calculate current plastic and yielding fractions
|
|
903
|
+
n_plastic_gauss = 0
|
|
904
|
+
n_yielding_gauss = 0
|
|
905
|
+
total_gauss = 0
|
|
906
|
+
for elem_idx in range(n_elements):
|
|
907
|
+
elem_type = element_types[elem_idx]
|
|
908
|
+
n_gauss = 4 if elem_type == 8 else 1
|
|
909
|
+
total_gauss += n_gauss
|
|
910
|
+
|
|
911
|
+
# Count plastic strains (use tolerance to avoid numerical precision issues)
|
|
912
|
+
if elem_idx in plastic_strains:
|
|
913
|
+
for gp in range(n_gauss):
|
|
914
|
+
strain_magnitude = np.linalg.norm(plastic_strains[elem_idx][gp])
|
|
915
|
+
if strain_magnitude > 1e-12:
|
|
916
|
+
n_plastic_gauss += 1
|
|
917
|
+
|
|
918
|
+
# Count yielding points (F > 0)
|
|
919
|
+
for gp in range(n_gauss):
|
|
920
|
+
stress_gp = stress_state['element_stresses'][elem_idx, gp, :]
|
|
921
|
+
f_yield = check_mohr_coulomb_cp(stress_gp, c_reduced[elem_idx], phi_reduced[elem_idx])
|
|
922
|
+
if f_yield > 0:
|
|
923
|
+
n_yielding_gauss += 1
|
|
924
|
+
|
|
925
|
+
plastic_fraction = n_plastic_gauss / total_gauss if total_gauss > 0 else 0
|
|
926
|
+
yielding_fraction = n_yielding_gauss / total_gauss if total_gauss > 0 else 0
|
|
927
|
+
|
|
928
|
+
# Diagnostic 3: Per-iteration output (controlled by frequency)
|
|
929
|
+
if debug_level >= 2 and (iteration + 1) % iteration_print_frequency == 0:
|
|
930
|
+
print(f"Iteration {iteration + 1}: F={F:.3f}, Residual={residual_norm:.3e}, Yielding={yielding_fraction*100:.1f}%, Plastic strains={plastic_fraction*100:.1f}%")
|
|
931
|
+
elif debug_level >= 3 and (iteration + 1) % iteration_print_frequency == 0:
|
|
932
|
+
print(f"Displacement change norm: {disp_change:.2e}")
|
|
933
|
+
print(f"Plastic strain increment: {plastic_change:.2e}")
|
|
934
|
+
print(f"Max displacement: {np.max(np.abs(displacements_new)):.6f}")
|
|
935
|
+
|
|
936
|
+
# Griffiths convergence criterion - check for equilibrium
|
|
937
|
+
# Converge if displacement change is small relative to current displacement magnitude
|
|
938
|
+
max_current_disp = np.max(np.abs(displacements_new))
|
|
939
|
+
relative_disp_change = disp_change / max(max_current_disp, 1e-6)
|
|
940
|
+
|
|
941
|
+
# Don't converge too early - ensure at least some iterations for plastic development
|
|
942
|
+
if iteration > 10 and relative_disp_change < tolerance and plastic_change < 0.01:
|
|
943
|
+
converged = True
|
|
944
|
+
if debug_level >= 2:
|
|
945
|
+
print(f"Converged after {iteration + 1} iterations")
|
|
946
|
+
break
|
|
947
|
+
|
|
948
|
+
# Additional check: if displacements become very large, this indicates failure
|
|
949
|
+
max_disp = np.max(np.abs(displacements_new))
|
|
950
|
+
if max_disp > 100.0: # Much higher threshold to allow expansive plastic zone development
|
|
951
|
+
if debug_level >= 1:
|
|
952
|
+
print(f"Large displacements detected ({max_disp:.3f}) - slope failure")
|
|
953
|
+
converged = False
|
|
954
|
+
break
|
|
955
|
+
|
|
956
|
+
# Apply light numerical damping to control global instability
|
|
957
|
+
displacements = damping_factor * displacements_new + (1 - damping_factor) * displacements
|
|
958
|
+
displacements_prev = displacements.copy() # Store previous displacements
|
|
959
|
+
plastic_strains = plastic_strains_new
|
|
960
|
+
|
|
961
|
+
# Check for early abort
|
|
962
|
+
if abort_after > 0 and iteration + 1 >= abort_after:
|
|
963
|
+
if debug_level >= 1:
|
|
964
|
+
print(f"Aborting after iteration {iteration + 1} (abort_after={abort_after})")
|
|
965
|
+
converged = True # Mark as converged for early abort
|
|
966
|
+
break
|
|
967
|
+
|
|
968
|
+
# Check for excessive displacements (numerical instability indicator)
|
|
969
|
+
max_disp = np.max(np.abs(displacements))
|
|
970
|
+
if max_disp > 1e6: # Much higher threshold to allow expansive plastic zone development
|
|
971
|
+
if debug_level >= 1:
|
|
972
|
+
print(f"Numerical instability detected: max displacement = {max_disp:.2e}")
|
|
973
|
+
break
|
|
974
|
+
|
|
975
|
+
# Compute final state
|
|
976
|
+
final_stresses, plastic_elements = compute_final_state_perzyna(
|
|
977
|
+
nodes, elements, element_types, element_materials,
|
|
978
|
+
displacements, plastic_strains, c_reduced, phi_reduced,
|
|
979
|
+
E_by_mat, nu_by_mat, u_nodal, current_stress_state)
|
|
980
|
+
|
|
981
|
+
# Compute strains
|
|
982
|
+
strains = compute_strains(nodes, elements, element_types, displacements)
|
|
983
|
+
|
|
984
|
+
# Calculate final statistics
|
|
985
|
+
final_sigma_yy = []
|
|
986
|
+
for elem_idx in range(n_elements):
|
|
987
|
+
elem_type = element_types[elem_idx]
|
|
988
|
+
if elem_type == 8:
|
|
989
|
+
for gp in range(4):
|
|
990
|
+
final_sigma_yy.append(final_stresses[elem_idx, 1]) # Using element average stress
|
|
991
|
+
else:
|
|
992
|
+
final_sigma_yy.append(final_stresses[elem_idx, 1])
|
|
993
|
+
|
|
994
|
+
final_min_sigma_yy = np.min(final_sigma_yy)
|
|
995
|
+
final_max_sigma_yy = np.max(final_sigma_yy)
|
|
996
|
+
|
|
997
|
+
# Final plastic fraction
|
|
998
|
+
final_n_plastic_gauss = 0
|
|
999
|
+
final_total_gauss = 0
|
|
1000
|
+
for elem_idx in range(n_elements):
|
|
1001
|
+
elem_type = element_types[elem_idx]
|
|
1002
|
+
n_gauss = 4 if elem_type == 8 else 1
|
|
1003
|
+
final_total_gauss += n_gauss
|
|
1004
|
+
if elem_idx in plastic_strains:
|
|
1005
|
+
for gp in range(n_gauss):
|
|
1006
|
+
strain_magnitude = np.linalg.norm(plastic_strains[elem_idx][gp])
|
|
1007
|
+
if strain_magnitude > 1e-12:
|
|
1008
|
+
final_n_plastic_gauss += 1
|
|
1009
|
+
|
|
1010
|
+
final_plastic_fraction = final_n_plastic_gauss / final_total_gauss if final_total_gauss > 0 else 0
|
|
1011
|
+
|
|
1012
|
+
# Calculate final yield function values
|
|
1013
|
+
final_yield_function_values = np.zeros(n_elements)
|
|
1014
|
+
for elem_idx in range(n_elements):
|
|
1015
|
+
# Use the stress from final_stresses (which includes von Mises as 4th column)
|
|
1016
|
+
elem_stress = final_stresses[elem_idx, :3] # [sig_x, sig_y, tau_xy]
|
|
1017
|
+
final_yield_function_values[elem_idx] = check_mohr_coulomb_cp(
|
|
1018
|
+
elem_stress, c_reduced[elem_idx], phi_reduced[elem_idx])
|
|
1019
|
+
|
|
1020
|
+
# Diagnostic 4: Final summary
|
|
1021
|
+
if debug_level >= 1:
|
|
1022
|
+
n_yielding = np.sum(final_yield_function_values > 0)
|
|
1023
|
+
n_plastic = np.sum(plastic_elements)
|
|
1024
|
+
print(f"\n=== Final Summary ===")
|
|
1025
|
+
print(f" F={F:.3f}, Iterations={iteration + 1}, Converged={'Yes' if converged else 'No'}")
|
|
1026
|
+
print(f" Final residual: {residual_norm:.3e}")
|
|
1027
|
+
print(f" Elements with F>0 (yielding): {n_yielding}/{n_elements}")
|
|
1028
|
+
print(f" Elements with plastic strains: {n_plastic}/{n_elements} (based on final F>1e-8)")
|
|
1029
|
+
print(f" Gauss points with accumulated plastic strains: {final_n_plastic_gauss}/{final_total_gauss} ({final_plastic_fraction*100:.1f}%)")
|
|
1030
|
+
print(f" Min σyy: {final_min_sigma_yy:.3f} kPa, Max σyy: {final_max_sigma_yy:.3f} kPa")
|
|
1031
|
+
elif debug_level >= 2:
|
|
1032
|
+
n_plastic = np.sum(plastic_elements)
|
|
1033
|
+
print(f"Final: {n_plastic}/{n_elements} plastic elements")
|
|
1034
|
+
|
|
1035
|
+
result = {
|
|
1036
|
+
"converged": converged,
|
|
1037
|
+
"iterations": iteration + 1,
|
|
1038
|
+
"displacements": displacements,
|
|
1039
|
+
"stresses": final_stresses,
|
|
1040
|
+
"strains": strains,
|
|
1041
|
+
"plastic_elements": plastic_elements,
|
|
1042
|
+
"yield_function": final_yield_function_values,
|
|
1043
|
+
"max_displacement": np.max(np.abs(displacements)),
|
|
1044
|
+
"plastic_strains": plastic_strains,
|
|
1045
|
+
"algorithm": "Perzyna Visco-Plastic",
|
|
1046
|
+
"F": F,
|
|
1047
|
+
"residual": residual_norm if 'residual_norm' in locals() else 0.0,
|
|
1048
|
+
"plastic_fraction": final_plastic_fraction,
|
|
1049
|
+
"min_sigma_yy": final_min_sigma_yy,
|
|
1050
|
+
"max_sigma_yy": final_max_sigma_yy
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
# Add abort information if applicable
|
|
1054
|
+
if abort_after > 0 and iteration + 1 >= abort_after:
|
|
1055
|
+
result["aborted"] = True
|
|
1056
|
+
result["abort_after"] = abort_after
|
|
1057
|
+
|
|
1058
|
+
return result
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
def solve_ssrm(fem_data, F_min=1.0, F_max=3.0, tolerance=0.01, debug_level=0):
|
|
1062
|
+
"""
|
|
1063
|
+
SSRM using Perzyna algorithm with pure non-convergence failure criterion.
|
|
1064
|
+
"""
|
|
1065
|
+
|
|
1066
|
+
if debug_level >= 1:
|
|
1067
|
+
print("=== Perzyna SSRM Analysis ===")
|
|
1068
|
+
print("Failure criterion: Pure non-convergence (Griffiths & Lane 1999)")
|
|
1069
|
+
|
|
1070
|
+
F_left = F_min
|
|
1071
|
+
F_right = F_max
|
|
1072
|
+
|
|
1073
|
+
# Verify bounds
|
|
1074
|
+
solution_min = solve_fem(fem_data, F=F_min, debug_level=max(0, debug_level-1))
|
|
1075
|
+
if not solution_min["converged"]:
|
|
1076
|
+
return {
|
|
1077
|
+
"converged": False,
|
|
1078
|
+
"error": f"F_min = {F_min} does not converge - slope unstable",
|
|
1079
|
+
"FS": None
|
|
1080
|
+
}
|
|
1081
|
+
|
|
1082
|
+
solution_max = solve_fem(fem_data, F=F_max, debug_level=max(0, debug_level-1))
|
|
1083
|
+
if solution_max["converged"]:
|
|
1084
|
+
if debug_level >= 1:
|
|
1085
|
+
print(f"Warning: F_max = {F_max} still converges - very stable slope")
|
|
1086
|
+
return {
|
|
1087
|
+
"converged": True,
|
|
1088
|
+
"FS": F_max,
|
|
1089
|
+
"last_solution": solution_max,
|
|
1090
|
+
"note": f"Slope stable up to F = {F_max}"
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
iteration = 0
|
|
1094
|
+
max_iterations = 50
|
|
1095
|
+
last_converged_solution = solution_min # Initialize with F_min solution
|
|
1096
|
+
|
|
1097
|
+
# Bisection search for critical F
|
|
1098
|
+
while (F_right - F_left) > tolerance and iteration < max_iterations:
|
|
1099
|
+
F_mid = (F_left + F_right) / 2.0
|
|
1100
|
+
|
|
1101
|
+
if debug_level >= 1:
|
|
1102
|
+
print(f"\nSSRM Iteration {iteration + 1}: Testing F = {F_mid:.4f}")
|
|
1103
|
+
print(f"Current interval: [{F_left:.4f}, {F_right:.4f}]")
|
|
1104
|
+
|
|
1105
|
+
solution = solve_fem(fem_data, F=F_mid, debug_level=max(0, debug_level-1))
|
|
1106
|
+
|
|
1107
|
+
if solution["converged"]:
|
|
1108
|
+
# F_mid is stable, critical F is higher
|
|
1109
|
+
F_left = F_mid
|
|
1110
|
+
last_converged_solution = solution
|
|
1111
|
+
if debug_level >= 2:
|
|
1112
|
+
print(f"F = {F_mid:.4f} converged (stable)")
|
|
1113
|
+
else:
|
|
1114
|
+
# F_mid failed, critical F is lower
|
|
1115
|
+
F_right = F_mid
|
|
1116
|
+
if debug_level >= 2:
|
|
1117
|
+
print(f"F = {F_mid:.4f} failed to converge (unstable)")
|
|
1118
|
+
|
|
1119
|
+
iteration += 1
|
|
1120
|
+
|
|
1121
|
+
critical_FS = F_left
|
|
1122
|
+
|
|
1123
|
+
if debug_level >= 1:
|
|
1124
|
+
print(f"\nPerszyna SSRM completed: Critical FS = {critical_FS:.4f}")
|
|
1125
|
+
print(f"Final interval: [{F_left:.4f}, {F_right:.4f}]")
|
|
1126
|
+
print(f"Iterations: {iteration}")
|
|
1127
|
+
|
|
1128
|
+
return {
|
|
1129
|
+
"converged": True,
|
|
1130
|
+
"FS": critical_FS,
|
|
1131
|
+
"last_solution": last_converged_solution,
|
|
1132
|
+
"iterations_ssrm": iteration,
|
|
1133
|
+
"final_interval": (F_left, F_right),
|
|
1134
|
+
"interval_width": F_right - F_left,
|
|
1135
|
+
"method": "Perzyna Visco-Plastic (Griffiths & Lane 1999)"
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
|
|
1139
|
+
def build_global_stiffness(nodes, elements, element_types, element_materials, E_by_mat, nu_by_mat):
|
|
1140
|
+
"""
|
|
1141
|
+
Build global stiffness matrix using existing FE implementation for proper 8-node quad support.
|
|
1142
|
+
"""
|
|
1143
|
+
# Use existing stiffness functions (now they are in this same file after consolidation)
|
|
1144
|
+
|
|
1145
|
+
n_nodes = len(nodes)
|
|
1146
|
+
n_dof = 2 * n_nodes
|
|
1147
|
+
|
|
1148
|
+
K_global = lil_matrix((n_dof, n_dof))
|
|
1149
|
+
|
|
1150
|
+
for elem_idx, element in enumerate(elements):
|
|
1151
|
+
elem_type = element_types[elem_idx]
|
|
1152
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1153
|
+
|
|
1154
|
+
E = E_by_mat[mat_id]
|
|
1155
|
+
nu = nu_by_mat[mat_id]
|
|
1156
|
+
|
|
1157
|
+
# Get element coordinates
|
|
1158
|
+
elem_nodes = element[:elem_type]
|
|
1159
|
+
elem_coords = nodes[elem_nodes]
|
|
1160
|
+
|
|
1161
|
+
# Build element stiffness matrix using corrected implementation
|
|
1162
|
+
try:
|
|
1163
|
+
if elem_type == 3: # Triangular elements
|
|
1164
|
+
K_elem = build_triangle_stiffness_corrected(elem_coords, E, nu)
|
|
1165
|
+
elif elem_type == 8: # 8-node quadrilateral elements - use corrected Griffiths version
|
|
1166
|
+
K_elem = build_quad8_stiffness_reduced_integration_corrected(elem_coords, E, nu)
|
|
1167
|
+
elif elem_type in [4, 6, 9]: # Other elements - use simple triangle implementation
|
|
1168
|
+
K_elem = build_triangle_stiffness_corrected(elem_coords, E, nu)
|
|
1169
|
+
else:
|
|
1170
|
+
print(f"Warning: Element type {elem_type} not supported")
|
|
1171
|
+
continue
|
|
1172
|
+
except Exception as e:
|
|
1173
|
+
print(f"Error building stiffness for element {elem_idx}, type {elem_type}: {e}")
|
|
1174
|
+
continue
|
|
1175
|
+
|
|
1176
|
+
# Assemble into global matrix
|
|
1177
|
+
for i in range(elem_type):
|
|
1178
|
+
for j in range(elem_type):
|
|
1179
|
+
node_i = elem_nodes[i]
|
|
1180
|
+
node_j = elem_nodes[j]
|
|
1181
|
+
|
|
1182
|
+
for di in range(2):
|
|
1183
|
+
for dj in range(2):
|
|
1184
|
+
global_i = 2 * node_i + di
|
|
1185
|
+
global_j = 2 * node_j + dj
|
|
1186
|
+
local_i = 2 * i + di
|
|
1187
|
+
local_j = 2 * j + dj
|
|
1188
|
+
|
|
1189
|
+
if local_i < K_elem.shape[0] and local_j < K_elem.shape[1]:
|
|
1190
|
+
K_global[global_i, global_j] += K_elem[local_i, local_j]
|
|
1191
|
+
|
|
1192
|
+
return K_global.tocsr()
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def build_quad8_stiffness_reduced_integration(coords, E, nu):
|
|
1196
|
+
"""
|
|
1197
|
+
Build stiffness matrix for 8-node quadrilateral with reduced integration (4 Gauss points).
|
|
1198
|
+
This is the key element type used in Griffiths & Lane (1999).
|
|
1199
|
+
"""
|
|
1200
|
+
# For now, use a simplified approach - proper implementation would use isoparametric mapping
|
|
1201
|
+
# This is a placeholder that should be replaced with full 8-node quad implementation
|
|
1202
|
+
|
|
1203
|
+
# Simplified: use average coordinates to create an equivalent triangle
|
|
1204
|
+
if len(coords) >= 4:
|
|
1205
|
+
# Use first 4 corners for a quad approximation
|
|
1206
|
+
quad_coords = coords[:4]
|
|
1207
|
+
# Convert to equivalent triangle for now
|
|
1208
|
+
tri_coords = np.array([
|
|
1209
|
+
quad_coords[0],
|
|
1210
|
+
quad_coords[1],
|
|
1211
|
+
quad_coords[2]
|
|
1212
|
+
])
|
|
1213
|
+
return build_triangle_stiffness(tri_coords, E, nu)
|
|
1214
|
+
else:
|
|
1215
|
+
return build_triangle_stiffness(coords, E, nu)
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
def build_triangle_stiffness(coords, E, nu):
|
|
1219
|
+
"""
|
|
1220
|
+
Build stiffness matrix for triangular element (plane strain).
|
|
1221
|
+
"""
|
|
1222
|
+
x1, y1 = coords[0]
|
|
1223
|
+
x2, y2 = coords[1]
|
|
1224
|
+
x3, y3 = coords[2]
|
|
1225
|
+
|
|
1226
|
+
# Area
|
|
1227
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
1228
|
+
|
|
1229
|
+
if area < 1e-12:
|
|
1230
|
+
print(f"Warning: Very small element area: {area}")
|
|
1231
|
+
return np.zeros((6, 6))
|
|
1232
|
+
|
|
1233
|
+
# Shape function derivatives
|
|
1234
|
+
b1 = y2 - y3
|
|
1235
|
+
b2 = y3 - y1
|
|
1236
|
+
b3 = y1 - y2
|
|
1237
|
+
c1 = x3 - x2
|
|
1238
|
+
c2 = x1 - x3
|
|
1239
|
+
c3 = x2 - x1
|
|
1240
|
+
|
|
1241
|
+
# B matrix (standard linear triangle)
|
|
1242
|
+
B = np.array([
|
|
1243
|
+
[b1, 0, b2, 0, b3, 0 ], # εx = ∂u/∂x
|
|
1244
|
+
[0, c1, 0, c2, 0, c3], # εy = ∂v/∂y
|
|
1245
|
+
[c1, b1, c2, b2, c3, b3] # γxy = ∂u/∂y + ∂v/∂x
|
|
1246
|
+
]) / (2 * area)
|
|
1247
|
+
|
|
1248
|
+
# Constitutive matrix (plane strain)
|
|
1249
|
+
factor = E / ((1 + nu) * (1 - 2*nu))
|
|
1250
|
+
D = factor * np.array([
|
|
1251
|
+
[1-nu, nu, 0 ],
|
|
1252
|
+
[nu, 1-nu, 0 ],
|
|
1253
|
+
[0, 0, (1-2*nu)/2]
|
|
1254
|
+
])
|
|
1255
|
+
|
|
1256
|
+
# Element stiffness matrix
|
|
1257
|
+
K_elem = area * B.T @ D @ B
|
|
1258
|
+
|
|
1259
|
+
return K_elem
|
|
1260
|
+
|
|
1261
|
+
|
|
1262
|
+
def build_gravity_loads(nodes, elements, element_types, element_materials, gamma_by_mat, k_seismic):
|
|
1263
|
+
"""
|
|
1264
|
+
Build gravity load vector using Griffiths & Lane (1999) approach.
|
|
1265
|
+
|
|
1266
|
+
Uses equation 3 from the paper: p(e) = γ ∫[Ve] N^T d(vol)
|
|
1267
|
+
This integrates shape functions over each element to properly distribute gravity loads.
|
|
1268
|
+
"""
|
|
1269
|
+
n_nodes = len(nodes)
|
|
1270
|
+
F_gravity = np.zeros(2 * n_nodes)
|
|
1271
|
+
|
|
1272
|
+
for elem_idx, element in enumerate(elements):
|
|
1273
|
+
elem_type = element_types[elem_idx]
|
|
1274
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1275
|
+
gamma = gamma_by_mat[mat_id]
|
|
1276
|
+
|
|
1277
|
+
elem_nodes = element[:elem_type]
|
|
1278
|
+
elem_coords = nodes[elem_nodes]
|
|
1279
|
+
|
|
1280
|
+
if elem_type == 3: # 3-node triangle
|
|
1281
|
+
# For linear triangles, shape function integration gives equal distribution (1/3 each)
|
|
1282
|
+
x1, y1 = elem_coords[0]
|
|
1283
|
+
x2, y2 = elem_coords[1]
|
|
1284
|
+
x3, y3 = elem_coords[2]
|
|
1285
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
1286
|
+
|
|
1287
|
+
# Each node gets 1/3 of the element weight (exact for linear shape functions)
|
|
1288
|
+
for i, node in enumerate(elem_nodes):
|
|
1289
|
+
load = gamma * area / 3.0
|
|
1290
|
+
F_gravity[2*node + 1] -= load # Vertical (negative = downward)
|
|
1291
|
+
F_gravity[2*node] += k_seismic * load # Horizontal seismic
|
|
1292
|
+
|
|
1293
|
+
elif elem_type == 8: # 8-node quad
|
|
1294
|
+
# For 8-node quads, use 2x2 Gauss integration as in Griffiths
|
|
1295
|
+
# This properly weights corner vs midside nodes
|
|
1296
|
+
|
|
1297
|
+
# Gauss points for 2x2 integration
|
|
1298
|
+
gauss_coord = 1.0 / np.sqrt(3.0)
|
|
1299
|
+
xi_points = np.array([-gauss_coord, gauss_coord])
|
|
1300
|
+
eta_points = np.array([-gauss_coord, gauss_coord])
|
|
1301
|
+
weights = np.array([1.0, 1.0])
|
|
1302
|
+
|
|
1303
|
+
# Initialize element load vector
|
|
1304
|
+
elem_loads = np.zeros(2 * elem_type)
|
|
1305
|
+
|
|
1306
|
+
# Numerical integration over Gauss points
|
|
1307
|
+
for i in range(2):
|
|
1308
|
+
for j in range(2):
|
|
1309
|
+
xi = xi_points[i]
|
|
1310
|
+
eta = eta_points[j]
|
|
1311
|
+
w = weights[i] * weights[j]
|
|
1312
|
+
|
|
1313
|
+
# Shape functions for 8-node quad at (xi, eta)
|
|
1314
|
+
N = compute_quad8_shape_functions(xi, eta)
|
|
1315
|
+
|
|
1316
|
+
# Jacobian for coordinate transformation
|
|
1317
|
+
J = compute_quad8_jacobian(elem_coords, xi, eta)
|
|
1318
|
+
det_J = np.linalg.det(J)
|
|
1319
|
+
|
|
1320
|
+
# Accumulate load contribution: w * det(J) * γ * N
|
|
1321
|
+
for k in range(8):
|
|
1322
|
+
elem_loads[2*k + 1] -= w * det_J * gamma * N[k] # Vertical
|
|
1323
|
+
elem_loads[2*k] += w * det_J * gamma * k_seismic * N[k] # Horizontal
|
|
1324
|
+
|
|
1325
|
+
# Add element loads to global vector
|
|
1326
|
+
for i, node in enumerate(elem_nodes):
|
|
1327
|
+
F_gravity[2*node] += elem_loads[2*i]
|
|
1328
|
+
F_gravity[2*node + 1] += elem_loads[2*i + 1]
|
|
1329
|
+
|
|
1330
|
+
elif elem_type == 4: # 4-node quad (if used)
|
|
1331
|
+
# For 4-node quads, use 2x2 Gauss integration
|
|
1332
|
+
area = compute_quad_area(elem_coords)
|
|
1333
|
+
# Simple equal distribution for now (can be refined)
|
|
1334
|
+
load_per_node = gamma * area / 4.0
|
|
1335
|
+
|
|
1336
|
+
for i, node in enumerate(elem_nodes):
|
|
1337
|
+
F_gravity[2*node + 1] -= load_per_node
|
|
1338
|
+
F_gravity[2*node] += k_seismic * load_per_node
|
|
1339
|
+
else:
|
|
1340
|
+
# Fallback for other element types
|
|
1341
|
+
if elem_type >= 3:
|
|
1342
|
+
# Triangle area calculation
|
|
1343
|
+
x1, y1 = elem_coords[0]
|
|
1344
|
+
x2, y2 = elem_coords[1]
|
|
1345
|
+
x3, y3 = elem_coords[2]
|
|
1346
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
1347
|
+
|
|
1348
|
+
load_per_node = gamma * area / elem_type
|
|
1349
|
+
|
|
1350
|
+
for i, node in enumerate(elem_nodes):
|
|
1351
|
+
F_gravity[2*node + 1] -= load_per_node
|
|
1352
|
+
F_gravity[2*node] += k_seismic * load_per_node
|
|
1353
|
+
|
|
1354
|
+
return F_gravity
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
def compute_quad8_shape_functions(xi, eta):
|
|
1358
|
+
"""
|
|
1359
|
+
Compute shape functions for 8-node serendipity quadrilateral at (xi, eta).
|
|
1360
|
+
|
|
1361
|
+
Node numbering:
|
|
1362
|
+
3---6---2
|
|
1363
|
+
| |
|
|
1364
|
+
7 5
|
|
1365
|
+
| |
|
|
1366
|
+
0---4---1
|
|
1367
|
+
"""
|
|
1368
|
+
N = np.zeros(8)
|
|
1369
|
+
|
|
1370
|
+
# Corner nodes
|
|
1371
|
+
N[0] = 0.25 * (1 - xi) * (1 - eta) * (-xi - eta - 1)
|
|
1372
|
+
N[1] = 0.25 * (1 + xi) * (1 - eta) * (xi - eta - 1)
|
|
1373
|
+
N[2] = 0.25 * (1 + xi) * (1 + eta) * (xi + eta - 1)
|
|
1374
|
+
N[3] = 0.25 * (1 - xi) * (1 + eta) * (-xi + eta - 1)
|
|
1375
|
+
|
|
1376
|
+
# Midside nodes
|
|
1377
|
+
N[4] = 0.5 * (1 - xi**2) * (1 - eta)
|
|
1378
|
+
N[5] = 0.5 * (1 + xi) * (1 - eta**2)
|
|
1379
|
+
N[6] = 0.5 * (1 - xi**2) * (1 + eta)
|
|
1380
|
+
N[7] = 0.5 * (1 - xi) * (1 - eta**2)
|
|
1381
|
+
|
|
1382
|
+
return N
|
|
1383
|
+
|
|
1384
|
+
|
|
1385
|
+
def compute_quad8_jacobian(coords, xi, eta):
|
|
1386
|
+
"""
|
|
1387
|
+
Compute Jacobian matrix for 8-node quad at (xi, eta).
|
|
1388
|
+
"""
|
|
1389
|
+
# Shape function derivatives
|
|
1390
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta)
|
|
1391
|
+
|
|
1392
|
+
# Jacobian matrix
|
|
1393
|
+
J = np.zeros((2, 2))
|
|
1394
|
+
for i in range(8):
|
|
1395
|
+
J[0, 0] += dN_dxi[i] * coords[i, 0] # dx/dxi
|
|
1396
|
+
J[0, 1] += dN_dxi[i] * coords[i, 1] # dy/dxi
|
|
1397
|
+
J[1, 0] += dN_deta[i] * coords[i, 0] # dx/deta
|
|
1398
|
+
J[1, 1] += dN_deta[i] * coords[i, 1] # dy/deta
|
|
1399
|
+
|
|
1400
|
+
return J
|
|
1401
|
+
|
|
1402
|
+
|
|
1403
|
+
def compute_quad_area(coords):
|
|
1404
|
+
"""
|
|
1405
|
+
Compute area of quadrilateral (approximate).
|
|
1406
|
+
"""
|
|
1407
|
+
if len(coords) >= 4:
|
|
1408
|
+
# Use shoelace formula for polygon area
|
|
1409
|
+
x = coords[:4, 0]
|
|
1410
|
+
y = coords[:4, 1]
|
|
1411
|
+
return 0.5 * abs(sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, 3)))
|
|
1412
|
+
else:
|
|
1413
|
+
return 0.0
|
|
1414
|
+
|
|
1415
|
+
|
|
1416
|
+
def compute_plastic_load_correction_perzyna(nodes, elements, element_types, element_materials,
|
|
1417
|
+
plastic_strains, E_by_mat, nu_by_mat, dt):
|
|
1418
|
+
"""
|
|
1419
|
+
Compute plastic load correction vector using Perzyna algorithm.
|
|
1420
|
+
|
|
1421
|
+
This computes the internal force vector due to plastic strains:
|
|
1422
|
+
F_plastic = ∫ B^T D ε_plastic dV
|
|
1423
|
+
"""
|
|
1424
|
+
n_nodes = len(nodes)
|
|
1425
|
+
F_plastic = np.zeros(2 * n_nodes)
|
|
1426
|
+
|
|
1427
|
+
for elem_idx in range(len(elements)):
|
|
1428
|
+
elem_type = element_types[elem_idx]
|
|
1429
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1430
|
+
|
|
1431
|
+
E = E_by_mat[mat_id]
|
|
1432
|
+
nu = nu_by_mat[mat_id]
|
|
1433
|
+
|
|
1434
|
+
# Get element data
|
|
1435
|
+
elem_nodes = elements[elem_idx][:elem_type]
|
|
1436
|
+
elem_coords = nodes[elem_nodes]
|
|
1437
|
+
|
|
1438
|
+
if elem_type == 8:
|
|
1439
|
+
# 8-node quad with 2x2 Gauss integration
|
|
1440
|
+
gauss_points, weights = get_gauss_points_2x2()
|
|
1441
|
+
n_gauss = 4
|
|
1442
|
+
else:
|
|
1443
|
+
n_gauss = 1 # Triangle - single Gauss point
|
|
1444
|
+
gauss_points = [(0.0, 0.0)]
|
|
1445
|
+
weights = [1.0]
|
|
1446
|
+
|
|
1447
|
+
# Element plastic force vector
|
|
1448
|
+
elem_f_plastic = np.zeros(2 * elem_type)
|
|
1449
|
+
|
|
1450
|
+
# For each Gauss point
|
|
1451
|
+
for gp in range(n_gauss):
|
|
1452
|
+
# Get plastic strains at this Gauss point
|
|
1453
|
+
plastic_strain = plastic_strains[elem_idx][gp, :]
|
|
1454
|
+
|
|
1455
|
+
# Skip if no plastic strain
|
|
1456
|
+
if np.linalg.norm(plastic_strain) < 1e-20:
|
|
1457
|
+
continue
|
|
1458
|
+
|
|
1459
|
+
# Constitutive matrix
|
|
1460
|
+
D = build_constitutive_matrix(E, nu)
|
|
1461
|
+
plastic_stress = D @ plastic_strain # Tension-positive convention
|
|
1462
|
+
|
|
1463
|
+
# Compute B and weight at this Gauss point
|
|
1464
|
+
if elem_type == 3: # Triangle
|
|
1465
|
+
B, area = compute_B_matrix_triangle(elem_coords)
|
|
1466
|
+
weight = area
|
|
1467
|
+
elif elem_type == 8: # 8-node quad
|
|
1468
|
+
xi, eta_local = gauss_points[gp]
|
|
1469
|
+
# Build B and detJ at Gauss point
|
|
1470
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta_local)
|
|
1471
|
+
# Jacobian
|
|
1472
|
+
J = np.zeros((2, 2))
|
|
1473
|
+
for a in range(8):
|
|
1474
|
+
J[0,0] += dN_dxi[a] * elem_coords[a,0]
|
|
1475
|
+
J[0,1] += dN_dxi[a] * elem_coords[a,1]
|
|
1476
|
+
J[1,0] += dN_deta[a] * elem_coords[a,0]
|
|
1477
|
+
J[1,1] += dN_deta[a] * elem_coords[a,1]
|
|
1478
|
+
det_J = J[0,0]*J[1,1] - J[0,1]*J[1,0]
|
|
1479
|
+
if abs(det_J) < 1e-14:
|
|
1480
|
+
continue
|
|
1481
|
+
J_inv = np.array([[J[1,1], -J[0,1]], [-J[1,0], J[0,0]]]) / det_J
|
|
1482
|
+
dN_dx = np.zeros(8)
|
|
1483
|
+
dN_dy = np.zeros(8)
|
|
1484
|
+
for a in range(8):
|
|
1485
|
+
dN_dx[a] = J_inv[0,0]*dN_dxi[a] + J_inv[0,1]*dN_deta[a]
|
|
1486
|
+
dN_dy[a] = J_inv[1,0]*dN_dxi[a] + J_inv[1,1]*dN_deta[a]
|
|
1487
|
+
B = np.zeros((3, 16))
|
|
1488
|
+
for a in range(8):
|
|
1489
|
+
B[0, 2*a] = dN_dx[a]
|
|
1490
|
+
B[1, 2*a+1] = dN_dy[a]
|
|
1491
|
+
B[2, 2*a] = dN_dy[a]
|
|
1492
|
+
B[2, 2*a+1] = dN_dx[a]
|
|
1493
|
+
weight = weights[gp] * abs(det_J)
|
|
1494
|
+
else:
|
|
1495
|
+
# Simplified for other elements
|
|
1496
|
+
B = np.zeros((3, 2 * elem_type))
|
|
1497
|
+
weight = 1.0
|
|
1498
|
+
|
|
1499
|
+
# Add contribution to element force vector
|
|
1500
|
+
if B.size > 0:
|
|
1501
|
+
elem_f_plastic += B.T @ plastic_stress * weight
|
|
1502
|
+
|
|
1503
|
+
# Assemble into global force vector
|
|
1504
|
+
for i in range(elem_type):
|
|
1505
|
+
node = elem_nodes[i]
|
|
1506
|
+
F_plastic[2*node] += elem_f_plastic[2*i]
|
|
1507
|
+
F_plastic[2*node + 1] += elem_f_plastic[2*i + 1]
|
|
1508
|
+
|
|
1509
|
+
return F_plastic
|
|
1510
|
+
|
|
1511
|
+
|
|
1512
|
+
def compute_B_matrix_quad8_centroid(coords):
|
|
1513
|
+
"""Compute B matrix and determinant of Jacobian for 8-node quad at centroid."""
|
|
1514
|
+
# Evaluate at centroid (xi=0, eta=0)
|
|
1515
|
+
xi, eta = 0.0, 0.0
|
|
1516
|
+
|
|
1517
|
+
# Shape function derivatives
|
|
1518
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta)
|
|
1519
|
+
|
|
1520
|
+
# Jacobian matrix
|
|
1521
|
+
J = np.zeros((2, 2))
|
|
1522
|
+
for i in range(8):
|
|
1523
|
+
x, y = coords[i]
|
|
1524
|
+
J[0, 0] += dN_dxi[i] * x # dx/dxi
|
|
1525
|
+
J[0, 1] += dN_dxi[i] * y # dy/dxi
|
|
1526
|
+
J[1, 0] += dN_deta[i] * x # dx/deta
|
|
1527
|
+
J[1, 1] += dN_deta[i] * y # dy/deta
|
|
1528
|
+
|
|
1529
|
+
det_J = J[0, 0] * J[1, 1] - J[0, 1] * J[1, 0]
|
|
1530
|
+
|
|
1531
|
+
if abs(det_J) < 1e-12:
|
|
1532
|
+
return np.zeros((3, 16)), 0.0
|
|
1533
|
+
|
|
1534
|
+
# Inverse Jacobian
|
|
1535
|
+
J_inv = np.array([[J[1, 1], -J[0, 1]], [-J[1, 0], J[0, 0]]]) / det_J
|
|
1536
|
+
|
|
1537
|
+
# Shape function derivatives in physical coordinates
|
|
1538
|
+
dN_dx = np.zeros(8)
|
|
1539
|
+
dN_dy = np.zeros(8)
|
|
1540
|
+
for i in range(8):
|
|
1541
|
+
dN_dx[i] = J_inv[0, 0] * dN_dxi[i] + J_inv[0, 1] * dN_deta[i]
|
|
1542
|
+
dN_dy[i] = J_inv[1, 0] * dN_dxi[i] + J_inv[1, 1] * dN_deta[i]
|
|
1543
|
+
|
|
1544
|
+
# B matrix (standard tension positive)
|
|
1545
|
+
B = np.zeros((3, 16)) # 3 strains x 16 DOFs (8 nodes x 2 DOFs)
|
|
1546
|
+
for i in range(8):
|
|
1547
|
+
B[0, 2*i] = dN_dx[i] # εx = ∂u/∂x
|
|
1548
|
+
B[1, 2*i+1] = dN_dy[i] # εy = ∂v/∂y
|
|
1549
|
+
B[2, 2*i] = dN_dy[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
1550
|
+
B[2, 2*i+1] = dN_dx[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
1551
|
+
|
|
1552
|
+
return B, abs(det_J)
|
|
1553
|
+
|
|
1554
|
+
|
|
1555
|
+
def compute_B_matrix_triangle(coords):
|
|
1556
|
+
"""Compute B matrix and area for triangle element."""
|
|
1557
|
+
|
|
1558
|
+
x1, y1 = coords[0]
|
|
1559
|
+
x2, y2 = coords[1]
|
|
1560
|
+
x3, y3 = coords[2]
|
|
1561
|
+
|
|
1562
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
1563
|
+
|
|
1564
|
+
if area < 1e-12:
|
|
1565
|
+
return np.zeros((3, 6)), 0.0
|
|
1566
|
+
|
|
1567
|
+
# Shape function derivatives
|
|
1568
|
+
b1 = y2 - y3
|
|
1569
|
+
b2 = y3 - y1
|
|
1570
|
+
b3 = y1 - y2
|
|
1571
|
+
c1 = x3 - x2
|
|
1572
|
+
c2 = x1 - x3
|
|
1573
|
+
c3 = x2 - x1
|
|
1574
|
+
|
|
1575
|
+
# B matrix (standard linear triangle)
|
|
1576
|
+
B = np.array([
|
|
1577
|
+
[b1, 0, b2, 0, b3, 0 ], # εx = ∂u/∂x
|
|
1578
|
+
[0, c1, 0, c2, 0, c3], # εy = ∂v/∂y
|
|
1579
|
+
[c1, b1, c2, b2, c3, b3] # γxy = ∂u/∂y + ∂v/∂x
|
|
1580
|
+
]) / (2 * area)
|
|
1581
|
+
|
|
1582
|
+
return B, area
|
|
1583
|
+
|
|
1584
|
+
|
|
1585
|
+
def get_gauss_points_2x2():
|
|
1586
|
+
"""Get 2x2 Gauss quadrature points and weights for reduced integration."""
|
|
1587
|
+
# 2x2 Gauss points in natural coordinates
|
|
1588
|
+
gp = 1.0 / np.sqrt(3.0)
|
|
1589
|
+
gauss_points = [
|
|
1590
|
+
(-gp, -gp), # Gauss point 0
|
|
1591
|
+
( gp, -gp), # Gauss point 1
|
|
1592
|
+
( gp, gp), # Gauss point 2
|
|
1593
|
+
(-gp, gp), # Gauss point 3
|
|
1594
|
+
]
|
|
1595
|
+
weights = [1.0, 1.0, 1.0, 1.0] # Equal weights for 2x2
|
|
1596
|
+
return gauss_points, weights
|
|
1597
|
+
|
|
1598
|
+
|
|
1599
|
+
def compute_gauss_point_coordinates_quad8(elem_coords, xi, eta):
|
|
1600
|
+
"""
|
|
1601
|
+
Compute physical coordinates of a Gauss point in an 8-node quadrilateral.
|
|
1602
|
+
|
|
1603
|
+
Args:
|
|
1604
|
+
elem_coords: Array of element node coordinates (8x2)
|
|
1605
|
+
xi, eta: Natural coordinates of Gauss point
|
|
1606
|
+
|
|
1607
|
+
Returns:
|
|
1608
|
+
Physical coordinates [x, y] of the Gauss point
|
|
1609
|
+
"""
|
|
1610
|
+
# 8-node quadrilateral shape functions
|
|
1611
|
+
N = np.zeros(8)
|
|
1612
|
+
N[0] = 0.25 * (1 - xi) * (1 - eta) * (-xi - eta - 1)
|
|
1613
|
+
N[1] = 0.25 * (1 + xi) * (1 - eta) * (xi - eta - 1)
|
|
1614
|
+
N[2] = 0.25 * (1 + xi) * (1 + eta) * (xi + eta - 1)
|
|
1615
|
+
N[3] = 0.25 * (1 - xi) * (1 + eta) * (-xi + eta - 1)
|
|
1616
|
+
N[4] = 0.5 * (1 - xi*xi) * (1 - eta)
|
|
1617
|
+
N[5] = 0.5 * (1 + xi) * (1 - eta*eta)
|
|
1618
|
+
N[6] = 0.5 * (1 - xi*xi) * (1 + eta)
|
|
1619
|
+
N[7] = 0.5 * (1 - xi) * (1 - eta*eta)
|
|
1620
|
+
|
|
1621
|
+
# Compute physical coordinates
|
|
1622
|
+
gauss_coords = np.zeros(2)
|
|
1623
|
+
for i in range(8):
|
|
1624
|
+
gauss_coords += N[i] * elem_coords[i]
|
|
1625
|
+
|
|
1626
|
+
return gauss_coords
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
def check_initial_yield_state(stress_state, c_values, phi_values):
|
|
1630
|
+
"""
|
|
1631
|
+
Check how many elements are yielding at the initial stress state.
|
|
1632
|
+
|
|
1633
|
+
Args:
|
|
1634
|
+
stress_state: Dictionary with 'element_stresses' array
|
|
1635
|
+
c_values: Cohesion values for each element
|
|
1636
|
+
phi_values: Friction angle values for each element (in radians)
|
|
1637
|
+
|
|
1638
|
+
Returns:
|
|
1639
|
+
Number of elements that are yielding
|
|
1640
|
+
"""
|
|
1641
|
+
element_stresses = stress_state['element_stresses']
|
|
1642
|
+
n_elements = element_stresses.shape[0]
|
|
1643
|
+
yield_count = 0
|
|
1644
|
+
|
|
1645
|
+
for elem_idx in range(n_elements):
|
|
1646
|
+
# Check first Gauss point of each element
|
|
1647
|
+
sig_x = element_stresses[elem_idx, 0, 0]
|
|
1648
|
+
sig_y = element_stresses[elem_idx, 0, 1]
|
|
1649
|
+
tau_xy = element_stresses[elem_idx, 0, 2]
|
|
1650
|
+
|
|
1651
|
+
c = c_values[elem_idx]
|
|
1652
|
+
phi = phi_values[elem_idx]
|
|
1653
|
+
|
|
1654
|
+
# Check Mohr-Coulomb yield criterion
|
|
1655
|
+
stress = np.array([sig_x, sig_y, tau_xy])
|
|
1656
|
+
F_yield = check_mohr_coulomb_cp_from_tp(stress, c, phi)
|
|
1657
|
+
if F_yield > 0:
|
|
1658
|
+
yield_count += 1
|
|
1659
|
+
|
|
1660
|
+
return yield_count
|
|
1661
|
+
|
|
1662
|
+
|
|
1663
|
+
def update_plastic_strains_perzyna(nodes, elements, element_types, element_materials,
|
|
1664
|
+
displacements, plastic_strains, c_reduced, phi_reduced,
|
|
1665
|
+
E_by_mat, nu_by_mat, u_nodal, eta, initial_stresses=None):
|
|
1666
|
+
"""
|
|
1667
|
+
Update plastic strains using Perzyna visco-plastic algorithm with proper Gauss integration.
|
|
1668
|
+
"""
|
|
1669
|
+
plastic_strains_new = {}
|
|
1670
|
+
total_increment = 0.0
|
|
1671
|
+
|
|
1672
|
+
# Get Gauss points for 8-node quads
|
|
1673
|
+
gauss_points_2x2, weights_2x2 = get_gauss_points_2x2()
|
|
1674
|
+
|
|
1675
|
+
for elem_idx in range(len(elements)):
|
|
1676
|
+
elem_type = element_types[elem_idx]
|
|
1677
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1678
|
+
|
|
1679
|
+
E = E_by_mat[mat_id]
|
|
1680
|
+
nu = nu_by_mat[mat_id]
|
|
1681
|
+
c = c_reduced[elem_idx]
|
|
1682
|
+
phi = phi_reduced[elem_idx]
|
|
1683
|
+
|
|
1684
|
+
if elem_type == 8:
|
|
1685
|
+
n_gauss = 4 # 8-node quad with reduced integration
|
|
1686
|
+
else:
|
|
1687
|
+
n_gauss = 1 # Triangle - single Gauss point
|
|
1688
|
+
|
|
1689
|
+
plastic_strains_new[elem_idx] = plastic_strains[elem_idx].copy()
|
|
1690
|
+
|
|
1691
|
+
# Get element data
|
|
1692
|
+
elem_nodes = elements[elem_idx][:elem_type]
|
|
1693
|
+
elem_coords = nodes[elem_nodes]
|
|
1694
|
+
|
|
1695
|
+
# Get element displacements
|
|
1696
|
+
elem_disp = np.zeros(2 * elem_type)
|
|
1697
|
+
for i, node in enumerate(elem_nodes):
|
|
1698
|
+
elem_disp[2*i] = displacements[2*node]
|
|
1699
|
+
elem_disp[2*i+1] = displacements[2*node+1]
|
|
1700
|
+
|
|
1701
|
+
# For each Gauss point
|
|
1702
|
+
for gp in range(n_gauss):
|
|
1703
|
+
# Compute total strains at this Gauss point
|
|
1704
|
+
if elem_type == 3: # Triangle
|
|
1705
|
+
total_strains = compute_triangle_strains_manual(elem_coords, elem_disp)
|
|
1706
|
+
elif elem_type == 8: # 8-node quad with proper Gauss points
|
|
1707
|
+
xi, eta_local = gauss_points_2x2[gp]
|
|
1708
|
+
total_strains = compute_quad8_strains_at_xi_eta(elem_coords, elem_disp, xi, eta_local)
|
|
1709
|
+
else:
|
|
1710
|
+
# Simplified for other element types
|
|
1711
|
+
total_strains = np.array([0.0, 0.0, 0.0])
|
|
1712
|
+
|
|
1713
|
+
# Elastic trial strains
|
|
1714
|
+
plastic_strain_old = plastic_strains[elem_idx][gp, :]
|
|
1715
|
+
elastic_strains = total_strains - plastic_strain_old
|
|
1716
|
+
|
|
1717
|
+
# Elastic trial stress = initial stress + incremental stress
|
|
1718
|
+
D = build_constitutive_matrix(E, nu)
|
|
1719
|
+
incremental_stress = D @ elastic_strains # Tension-positive convention
|
|
1720
|
+
|
|
1721
|
+
# Add initial stress if provided
|
|
1722
|
+
if initial_stresses is not None:
|
|
1723
|
+
initial_stress = initial_stresses['element_stresses'][elem_idx, gp, :]
|
|
1724
|
+
trial_stress = initial_stress + incremental_stress
|
|
1725
|
+
else:
|
|
1726
|
+
trial_stress = incremental_stress
|
|
1727
|
+
|
|
1728
|
+
# Check yield criterion with total stress
|
|
1729
|
+
f_yield = check_mohr_coulomb_cp_from_tp(trial_stress, c, phi)
|
|
1730
|
+
|
|
1731
|
+
if f_yield > 1e-6: # Plastic loading - higher threshold to reduce initial yielding
|
|
1732
|
+
# Perzyna visco-plastic flow as per Griffiths & Lane (1999)
|
|
1733
|
+
# Δλ = η * <f> where <f> = max(0, f)
|
|
1734
|
+
# Use appropriate viscosity parameter from paper
|
|
1735
|
+
delta_lambda = eta * f_yield # Remove artificial cap
|
|
1736
|
+
|
|
1737
|
+
# Flow vector (non-associated: ψ = 0)
|
|
1738
|
+
flow_vector = compute_plastic_flow_vector_cp_return_tp(trial_stress, 0.0) # ψ = 0
|
|
1739
|
+
|
|
1740
|
+
# Plastic strain increment - controlled flow to prevent instability
|
|
1741
|
+
plastic_increment = delta_lambda * flow_vector
|
|
1742
|
+
|
|
1743
|
+
# Apply reasonable limit to prevent numerical explosion
|
|
1744
|
+
increment_norm = np.linalg.norm(plastic_increment)
|
|
1745
|
+
if increment_norm > 1e-5: # Much smaller limit for very controlled plastic development
|
|
1746
|
+
plastic_increment *= 1e-5 / increment_norm
|
|
1747
|
+
|
|
1748
|
+
# Update plastic strains
|
|
1749
|
+
plastic_strains_new[elem_idx][gp, :] += plastic_increment
|
|
1750
|
+
|
|
1751
|
+
# Track total plastic increment
|
|
1752
|
+
total_increment += np.linalg.norm(plastic_increment)
|
|
1753
|
+
|
|
1754
|
+
return plastic_strains_new, total_increment
|
|
1755
|
+
|
|
1756
|
+
|
|
1757
|
+
def update_plastic_strains_perzyna_incremental(nodes, elements, element_types, element_materials,
|
|
1758
|
+
displacements_new, displacements_prev, plastic_strains,
|
|
1759
|
+
current_stress_state, c_reduced, phi_reduced,
|
|
1760
|
+
E_by_mat, nu_by_mat, dt, plastic_strain_cap, debug_level=0):
|
|
1761
|
+
"""
|
|
1762
|
+
Update plastic strains using incremental Perzyna algorithm with compression-positive stress storage.
|
|
1763
|
+
"""
|
|
1764
|
+
plastic_strains_new = {}
|
|
1765
|
+
total_increment = 0.0
|
|
1766
|
+
|
|
1767
|
+
# New stress state (compression-positive)
|
|
1768
|
+
new_stress_state = {
|
|
1769
|
+
'element_stresses': current_stress_state['element_stresses'].copy(),
|
|
1770
|
+
'plastic_state': current_stress_state['plastic_state'].copy()
|
|
1771
|
+
}
|
|
1772
|
+
|
|
1773
|
+
# Displacement increment
|
|
1774
|
+
displacement_increment = displacements_new - displacements_prev
|
|
1775
|
+
|
|
1776
|
+
for elem_idx in range(len(elements)):
|
|
1777
|
+
elem_type = element_types[elem_idx]
|
|
1778
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1779
|
+
|
|
1780
|
+
E = E_by_mat[mat_id]
|
|
1781
|
+
nu = nu_by_mat[mat_id]
|
|
1782
|
+
c = c_reduced[elem_idx]
|
|
1783
|
+
phi = phi_reduced[elem_idx]
|
|
1784
|
+
|
|
1785
|
+
# Element nodes/coords
|
|
1786
|
+
elem_nodes = elements[elem_idx][:elem_type]
|
|
1787
|
+
elem_coords = nodes[elem_nodes]
|
|
1788
|
+
|
|
1789
|
+
# Incremental displacements for element
|
|
1790
|
+
elem_disp_increment = np.zeros(2 * elem_type)
|
|
1791
|
+
for i, node_idx in enumerate(elem_nodes):
|
|
1792
|
+
elem_disp_increment[2*i] = displacement_increment[2*node_idx]
|
|
1793
|
+
elem_disp_increment[2*i+1] = displacement_increment[2*node_idx+1]
|
|
1794
|
+
|
|
1795
|
+
plastic_strains_new[elem_idx] = plastic_strains[elem_idx].copy()
|
|
1796
|
+
|
|
1797
|
+
# Gauss points
|
|
1798
|
+
if elem_type == 8:
|
|
1799
|
+
gauss_points_2x2, _ = get_gauss_points_2x2()
|
|
1800
|
+
n_gauss = 4
|
|
1801
|
+
else:
|
|
1802
|
+
n_gauss = 1
|
|
1803
|
+
|
|
1804
|
+
for gp in range(n_gauss):
|
|
1805
|
+
# Incremental strains
|
|
1806
|
+
if elem_type == 3:
|
|
1807
|
+
incremental_strains = compute_triangle_strains_manual(elem_coords, elem_disp_increment)
|
|
1808
|
+
elif elem_type == 8:
|
|
1809
|
+
xi, eta_local = gauss_points_2x2[gp]
|
|
1810
|
+
incremental_strains = compute_quad8_strains_at_xi_eta(elem_coords, elem_disp_increment, xi, eta_local)
|
|
1811
|
+
else:
|
|
1812
|
+
incremental_strains = np.array([0.0, 0.0, 0.0])
|
|
1813
|
+
|
|
1814
|
+
D = build_constitutive_matrix(E, nu)
|
|
1815
|
+
incremental_stress_tp = D @ incremental_strains
|
|
1816
|
+
incremental_stress_cp = -incremental_stress_tp
|
|
1817
|
+
|
|
1818
|
+
# Current total stress at GP (compression-positive)
|
|
1819
|
+
current_stress_cp = current_stress_state['element_stresses'][elem_idx, gp, :]
|
|
1820
|
+
|
|
1821
|
+
# Trial stress (compression-positive)
|
|
1822
|
+
trial_stress_cp = current_stress_cp + incremental_stress_cp
|
|
1823
|
+
|
|
1824
|
+
# Yield check (compression-positive)
|
|
1825
|
+
f_yield = check_mohr_coulomb_cp(trial_stress_cp, c, phi)
|
|
1826
|
+
|
|
1827
|
+
if f_yield > 1e-12:
|
|
1828
|
+
# Flow direction in tp from cp stress
|
|
1829
|
+
n_flow_tp = compute_plastic_flow_vector_cp_return_tp(-trial_stress_cp, 0.0)
|
|
1830
|
+
|
|
1831
|
+
# Simple stress return: use Perzyna approach with controlled plastic multiplier
|
|
1832
|
+
D = build_constitutive_matrix(E, nu)
|
|
1833
|
+
|
|
1834
|
+
# Griffiths & Lane viscoplastic strain method with strain softening
|
|
1835
|
+
# Calculate plastic strain rate using their approach
|
|
1836
|
+
if f_yield > 0:
|
|
1837
|
+
# Non-associated flow with ψ=0; Perzyna rate: erate ∝ f * n
|
|
1838
|
+
flow_vector = f_yield * n_flow_tp
|
|
1839
|
+
erate = flow_vector
|
|
1840
|
+
# Plastic strain increment: Δεp = erate * dt
|
|
1841
|
+
plastic_increment = erate * dt
|
|
1842
|
+
else:
|
|
1843
|
+
plastic_increment = np.zeros(3)
|
|
1844
|
+
inc_norm = float(np.linalg.norm(plastic_increment))
|
|
1845
|
+
|
|
1846
|
+
# Debug stress return
|
|
1847
|
+
# if debug_level >= 3 and inc_norm > 1e-3:
|
|
1848
|
+
# print(f" GP {gp}: inc_norm={inc_norm:.2e}, f_yield={f_yield:.2e}")
|
|
1849
|
+
|
|
1850
|
+
# Enforce plastic_strain_cap per Gauss point
|
|
1851
|
+
if plastic_strain_cap is not None and inc_norm > plastic_strain_cap:
|
|
1852
|
+
plastic_increment *= plastic_strain_cap / max(inc_norm, 1e-20)
|
|
1853
|
+
inc_norm = plastic_strain_cap
|
|
1854
|
+
|
|
1855
|
+
# Update plastic strains
|
|
1856
|
+
plastic_strains_new[elem_idx][gp, :] += plastic_increment
|
|
1857
|
+
|
|
1858
|
+
# Update stress state (remove plastic stress contribution)
|
|
1859
|
+
plastic_stress_tp = D @ plastic_increment
|
|
1860
|
+
plastic_stress_cp = -plastic_stress_tp
|
|
1861
|
+
new_stress_state['element_stresses'][elem_idx, gp, :] = trial_stress_cp + plastic_stress_cp
|
|
1862
|
+
total_increment += inc_norm
|
|
1863
|
+
else:
|
|
1864
|
+
new_stress_state['element_stresses'][elem_idx, gp, :] = trial_stress_cp
|
|
1865
|
+
|
|
1866
|
+
return plastic_strains_new, total_increment, new_stress_state
|
|
1867
|
+
|
|
1868
|
+
|
|
1869
|
+
def compute_final_state_perzyna(nodes, elements, element_types, element_materials,
|
|
1870
|
+
displacements, plastic_strains, c_reduced, phi_reduced,
|
|
1871
|
+
E_by_mat, nu_by_mat, u_nodal, stress_state):
|
|
1872
|
+
"""
|
|
1873
|
+
Compute final stress state (compression-positive) and identify plastic elements.
|
|
1874
|
+
"""
|
|
1875
|
+
n_elements = len(elements)
|
|
1876
|
+
final_stresses = np.zeros((n_elements, 4))
|
|
1877
|
+
plastic_elements = np.zeros(n_elements, dtype=bool)
|
|
1878
|
+
|
|
1879
|
+
for elem_idx in range(n_elements):
|
|
1880
|
+
elem_type = element_types[elem_idx]
|
|
1881
|
+
mat_id = element_materials[elem_idx] - 1
|
|
1882
|
+
|
|
1883
|
+
E = E_by_mat[mat_id]
|
|
1884
|
+
nu = nu_by_mat[mat_id]
|
|
1885
|
+
c = c_reduced[elem_idx]
|
|
1886
|
+
phi = phi_reduced[elem_idx]
|
|
1887
|
+
|
|
1888
|
+
elem_nodes = elements[elem_idx][:elem_type]
|
|
1889
|
+
elem_coords = nodes[elem_nodes]
|
|
1890
|
+
|
|
1891
|
+
elem_disp = np.zeros(2 * elem_type)
|
|
1892
|
+
for i, node_idx in enumerate(elem_nodes):
|
|
1893
|
+
elem_disp[2*i] = displacements[2*node_idx]
|
|
1894
|
+
elem_disp[2*i+1] = displacements[2*node_idx+1]
|
|
1895
|
+
|
|
1896
|
+
if elem_type == 3:
|
|
1897
|
+
total_strains = compute_triangle_strains_manual(elem_coords, elem_disp)
|
|
1898
|
+
plastic_strain = plastic_strains[elem_idx][0, :] if elem_idx in plastic_strains else np.zeros(3)
|
|
1899
|
+
elastic_strains = total_strains - plastic_strain
|
|
1900
|
+
D = build_constitutive_matrix(E, nu)
|
|
1901
|
+
stress_tp = D @ elastic_strains
|
|
1902
|
+
if stress_state is not None:
|
|
1903
|
+
initial_cp = stress_state['element_stresses'][elem_idx, 0, :]
|
|
1904
|
+
stress_cp = initial_cp - stress_tp # cp = initial_cp + (-tp)
|
|
1905
|
+
else:
|
|
1906
|
+
stress_cp = -stress_tp
|
|
1907
|
+
sig_x, sig_y, tau_xy = stress_cp
|
|
1908
|
+
sig_vm = np.sqrt(sig_x**2 + sig_y**2 - sig_x*sig_y + 3*tau_xy**2)
|
|
1909
|
+
final_stresses[elem_idx] = [sig_x, sig_y, tau_xy, sig_vm]
|
|
1910
|
+
f_yield = check_mohr_coulomb_cp(stress_cp, c, phi)
|
|
1911
|
+
plastic_elements[elem_idx] = f_yield > 1e-8
|
|
1912
|
+
else:
|
|
1913
|
+
# 8-node quad: average cp stress over Gauss points from stress_state
|
|
1914
|
+
elem_stress_avg_cp = np.zeros(3)
|
|
1915
|
+
n_gauss = 4
|
|
1916
|
+
for gp in range(n_gauss):
|
|
1917
|
+
elem_stress_avg_cp += stress_state['element_stresses'][elem_idx, gp, :]
|
|
1918
|
+
elem_stress_avg_cp /= n_gauss
|
|
1919
|
+
sig_x, sig_y, tau_xy = elem_stress_avg_cp
|
|
1920
|
+
sig_vm = np.sqrt(sig_x**2 + sig_y**2 - sig_x*sig_y + 3*tau_xy**2)
|
|
1921
|
+
final_stresses[elem_idx] = [sig_x, sig_y, tau_xy, sig_vm]
|
|
1922
|
+
f_yield = check_mohr_coulomb_cp(elem_stress_avg_cp, c, phi)
|
|
1923
|
+
plastic_elements[elem_idx] = f_yield > 1e-8
|
|
1924
|
+
|
|
1925
|
+
return final_stresses, plastic_elements
|
|
1926
|
+
|
|
1927
|
+
|
|
1928
|
+
def compute_triangle_strains_manual(coords, displacements):
|
|
1929
|
+
"""Manually compute triangle strains from displacements."""
|
|
1930
|
+
|
|
1931
|
+
x1, y1 = coords[0]
|
|
1932
|
+
x2, y2 = coords[1]
|
|
1933
|
+
x3, y3 = coords[2]
|
|
1934
|
+
|
|
1935
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
1936
|
+
|
|
1937
|
+
if area < 1e-12:
|
|
1938
|
+
return np.array([0.0, 0.0, 0.0])
|
|
1939
|
+
|
|
1940
|
+
# Shape function derivatives
|
|
1941
|
+
b1 = y2 - y3
|
|
1942
|
+
b2 = y3 - y1
|
|
1943
|
+
b3 = y1 - y2
|
|
1944
|
+
c1 = x3 - x2
|
|
1945
|
+
c2 = x1 - x3
|
|
1946
|
+
c3 = x2 - x1
|
|
1947
|
+
|
|
1948
|
+
# B matrix (standard linear triangle)
|
|
1949
|
+
B = np.array([
|
|
1950
|
+
[b1, 0, b2, 0, b3, 0 ], # εx = ∂u/∂x
|
|
1951
|
+
[0, c1, 0, c2, 0, c3], # εy = ∂v/∂y
|
|
1952
|
+
[c1, b1, c2, b2, c3, b3] # γxy = ∂u/∂y + ∂v/∂x
|
|
1953
|
+
]) / (2 * area)
|
|
1954
|
+
|
|
1955
|
+
# Strains
|
|
1956
|
+
strains = B @ displacements
|
|
1957
|
+
return strains
|
|
1958
|
+
|
|
1959
|
+
|
|
1960
|
+
def build_constitutive_matrix(E, nu):
|
|
1961
|
+
"""Build constitutive matrix for plane strain - standard tension-positive convention."""
|
|
1962
|
+
# Add numerical stability check for near-incompressible materials
|
|
1963
|
+
if nu >= 0.45:
|
|
1964
|
+
print(f"Warning: Poisson's ratio {nu:.3f} is close to incompressible limit (0.5)")
|
|
1965
|
+
print("Consider using nu <= 0.4 for better numerical stability")
|
|
1966
|
+
|
|
1967
|
+
# Optional: Add small regularization to prevent singularity
|
|
1968
|
+
# nu_effective = min(nu, 0.495) # Cap at safe value if needed
|
|
1969
|
+
|
|
1970
|
+
factor = E / ((1 + nu) * (1 - 2*nu))
|
|
1971
|
+
D = factor * np.array([
|
|
1972
|
+
[1-nu, nu, 0 ],
|
|
1973
|
+
[nu, 1-nu, 0 ],
|
|
1974
|
+
[0, 0, (1-2*nu)/2]
|
|
1975
|
+
])
|
|
1976
|
+
# Standard tension-positive convention (σ > 0 in tension, σ < 0 in compression)
|
|
1977
|
+
return D
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
def check_shear_convention_consistency(strain_vec, D_matrix, E, nu, element_id=None, debug=True):
|
|
1981
|
+
"""
|
|
1982
|
+
Diagnostic check for engineering vs tensor shear strain convention consistency.
|
|
1983
|
+
|
|
1984
|
+
This catches the common bug where B-matrix computes εxy (tensor shear) but
|
|
1985
|
+
D-matrix expects γxy (engineering shear), causing τxy to be off by factor of 2.
|
|
1986
|
+
|
|
1987
|
+
Args:
|
|
1988
|
+
strain_vec: [εx, εy, γxy] strain vector from B-matrix
|
|
1989
|
+
D_matrix: Constitutive matrix
|
|
1990
|
+
E, nu: Material properties
|
|
1991
|
+
element_id: For debugging output
|
|
1992
|
+
debug: Whether to print warnings
|
|
1993
|
+
"""
|
|
1994
|
+
if len(strain_vec) < 3:
|
|
1995
|
+
return True # Skip if not enough components
|
|
1996
|
+
|
|
1997
|
+
ex, ey, gxy = strain_vec[:3] # what your code uses
|
|
1998
|
+
|
|
1999
|
+
# Expected shear modulus for engineering shear
|
|
2000
|
+
mu = E / (2 * (1 + nu))
|
|
2001
|
+
|
|
2002
|
+
# What D-matrix actually computes for shear stress
|
|
2003
|
+
tau_from_D = (D_matrix @ strain_vec)[2]
|
|
2004
|
+
|
|
2005
|
+
# What we expect for engineering shear: τxy = μ * γxy
|
|
2006
|
+
tau_expected = mu * gxy
|
|
2007
|
+
|
|
2008
|
+
# Check consistency
|
|
2009
|
+
tolerance = 1e-6 * max(1.0, abs(tau_expected))
|
|
2010
|
+
is_consistent = abs(tau_from_D - tau_expected) <= tolerance
|
|
2011
|
+
|
|
2012
|
+
if not is_consistent and debug:
|
|
2013
|
+
elem_str = f" (element {element_id})" if element_id is not None else ""
|
|
2014
|
+
print(f"WARNING: Shear convention mismatch{elem_str}!")
|
|
2015
|
+
print(f" γxy from B-matrix: {gxy:.6f}")
|
|
2016
|
+
print(f" τxy from D-matrix: {tau_from_D:.2f}")
|
|
2017
|
+
print(f" τxy expected (μ*γxy): {tau_expected:.2f}")
|
|
2018
|
+
print(f" Ratio (actual/expected): {tau_from_D/tau_expected if abs(tau_expected) > 1e-12 else 'inf':.3f}")
|
|
2019
|
+
print(f" D[2,2] = {D_matrix[2,2]:.1f}, expected μ = {mu:.1f}")
|
|
2020
|
+
print(" Either B builds [εx, εy, εxy] but labels it γxy, or")
|
|
2021
|
+
print(" D[2,2] isn't correct for engineering shear convention")
|
|
2022
|
+
|
|
2023
|
+
return is_consistent
|
|
2024
|
+
|
|
2025
|
+
|
|
2026
|
+
def validate_bd_matrices_simple_test():
|
|
2027
|
+
"""
|
|
2028
|
+
Simple validation test for B and D matrix consistency using pure shear.
|
|
2029
|
+
Apply known displacement field and check if resulting stresses are correct.
|
|
2030
|
+
"""
|
|
2031
|
+
print("\n=== B/D Matrix Validation Test ===")
|
|
2032
|
+
|
|
2033
|
+
# Simple triangle for testing
|
|
2034
|
+
coords = np.array([[0, 0], [1, 0], [0, 1]]) # Right triangle
|
|
2035
|
+
E, nu = 30000, 0.3
|
|
2036
|
+
|
|
2037
|
+
# Test case 1: Pure shear displacement field
|
|
2038
|
+
# u = 0.001*y, v = 0.001*x (creates γxy = 0.002)
|
|
2039
|
+
displacements = np.array([
|
|
2040
|
+
0.0, 0.0, # Node 1: u1=0, v1=0
|
|
2041
|
+
0.0, 0.001, # Node 2: u2=0, v2=0.001
|
|
2042
|
+
0.001, 0.0 # Node 3: u3=0.001, v3=0
|
|
2043
|
+
])
|
|
2044
|
+
|
|
2045
|
+
# Compute strains using triangle B-matrix
|
|
2046
|
+
strains = compute_triangle_strains_manual(coords, displacements)
|
|
2047
|
+
print(f"Computed strains: εx={strains[0]:.6f}, εy={strains[1]:.6f}, γxy={strains[2]:.6f}")
|
|
2048
|
+
|
|
2049
|
+
# Expected: εx=0, εy=0, γxy=0.002 (engineering shear)
|
|
2050
|
+
expected_gxy = 0.002
|
|
2051
|
+
print(f"Expected γxy: {expected_gxy:.6f}")
|
|
2052
|
+
|
|
2053
|
+
# Compute stresses
|
|
2054
|
+
D = build_constitutive_matrix(E, nu)
|
|
2055
|
+
stresses = D @ strains
|
|
2056
|
+
print(f"Computed stresses: σx={stresses[0]:.2f}, σy={stresses[1]:.2f}, τxy={stresses[2]:.2f}")
|
|
2057
|
+
|
|
2058
|
+
# Expected: σx=0, σy=0, τxy = G*γxy = E/(2*(1+ν))*γxy
|
|
2059
|
+
G = E / (2 * (1 + nu))
|
|
2060
|
+
expected_tau_xy = G * expected_gxy
|
|
2061
|
+
print(f"Expected τxy: G*γxy = {G:.1f} * {expected_gxy:.6f} = {expected_tau_xy:.2f}")
|
|
2062
|
+
|
|
2063
|
+
# Check consistency
|
|
2064
|
+
is_consistent = check_shear_convention_consistency(strains, D, E, nu, debug=False)
|
|
2065
|
+
|
|
2066
|
+
if is_consistent:
|
|
2067
|
+
print("✓ B/D matrices are consistent (engineering shear convention)")
|
|
2068
|
+
else:
|
|
2069
|
+
print("✗ B/D matrices have shear convention mismatch!")
|
|
2070
|
+
|
|
2071
|
+
print("=================================\n")
|
|
2072
|
+
return is_consistent
|
|
2073
|
+
|
|
2074
|
+
# Uncomment this line to run the validation test:
|
|
2075
|
+
# validate_bd_matrices_simple_test()
|
|
2076
|
+
|
|
2077
|
+
|
|
2078
|
+
def test_constitutive_matrix_sanity():
|
|
2079
|
+
"""
|
|
2080
|
+
Basic sanity checks for constitutive matrix behavior.
|
|
2081
|
+
Tests fundamental stress-strain relationships.
|
|
2082
|
+
"""
|
|
2083
|
+
print("\n=== Constitutive Matrix Sanity Tests ===")
|
|
2084
|
+
|
|
2085
|
+
E, nu = 1e5, 0.3
|
|
2086
|
+
D = build_constitutive_matrix(E, nu)
|
|
2087
|
+
|
|
2088
|
+
# Test (A) Pure vertical shortening → σyy negative
|
|
2089
|
+
print("Test A: Pure vertical shortening")
|
|
2090
|
+
eps = np.array([0.0, -1e-4, 0.0]) # [εx, εy, γxy]
|
|
2091
|
+
sig = D @ eps
|
|
2092
|
+
print(f" Strain: εx={eps[0]:.1e}, εy={eps[1]:.1e}, γxy={eps[2]:.1e}")
|
|
2093
|
+
print(f" Stress: σx={sig[0]:.1f}, σy={sig[1]:.1f}, τxy={sig[2]:.1f}")
|
|
2094
|
+
|
|
2095
|
+
try:
|
|
2096
|
+
assert sig[1] < 0, "σyy must be negative under vertical shortening (tension-positive)."
|
|
2097
|
+
print(" ✓ PASS: σyy is negative (compression) as expected")
|
|
2098
|
+
except AssertionError as e:
|
|
2099
|
+
print(f" ✗ FAIL: {e}")
|
|
2100
|
+
print(f" σyy = {sig[1]:.3f}, expected < 0")
|
|
2101
|
+
|
|
2102
|
+
# Test (B) Pure shear → τxy = μ * γxy
|
|
2103
|
+
print("\nTest B: Pure shear stress")
|
|
2104
|
+
mu = E / (2*(1+nu))
|
|
2105
|
+
eps = np.array([0.0, 0.0, 1e-3]) # γxy = 1e-3
|
|
2106
|
+
sig = D @ eps
|
|
2107
|
+
expected_tau = mu * eps[2]
|
|
2108
|
+
|
|
2109
|
+
print(f" Strain: εx={eps[0]:.1e}, εy={eps[1]:.1e}, γxy={eps[2]:.1e}")
|
|
2110
|
+
print(f" Stress: σx={sig[0]:.1f}, σy={sig[1]:.1f}, τxy={sig[2]:.1f}")
|
|
2111
|
+
print(f" Expected τxy = μ*γxy = {mu:.1f} * {eps[2]:.1e} = {expected_tau:.1f}")
|
|
2112
|
+
|
|
2113
|
+
try:
|
|
2114
|
+
assert abs(sig[2] - mu*eps[2]) < 1e-8*max(1.0, abs(mu*eps[2])), "τxy must equal μ*γxy."
|
|
2115
|
+
print(" ✓ PASS: τxy = μ*γxy exactly")
|
|
2116
|
+
except AssertionError as e:
|
|
2117
|
+
print(f" ✗ FAIL: {e}")
|
|
2118
|
+
print(f" τxy = {sig[2]:.6f}, expected = {expected_tau:.6f}")
|
|
2119
|
+
print(f" Error = {abs(sig[2] - expected_tau):.2e}")
|
|
2120
|
+
|
|
2121
|
+
# Test (C) Check Poisson effect - verify D-matrix formulation
|
|
2122
|
+
print("\nTest C: Poisson effect under uniaxial tension")
|
|
2123
|
+
eps = np.array([1e-4, 0.0, 0.0]) # Pure εx
|
|
2124
|
+
sig = D @ eps
|
|
2125
|
+
|
|
2126
|
+
print(f" Strain: εx={eps[0]:.1e}, εy={eps[1]:.1e}, γxy={eps[2]:.1e}")
|
|
2127
|
+
print(f" Stress: σx={sig[0]:.1f}, σy={sig[1]:.1f}, τxy={sig[2]:.1f}")
|
|
2128
|
+
|
|
2129
|
+
# Debug: Check what D-matrix actually looks like
|
|
2130
|
+
print(f" D-matrix:")
|
|
2131
|
+
print(f" [{D[0,0]:.1f} {D[0,1]:.1f} {D[0,2]:.1f}]")
|
|
2132
|
+
print(f" [{D[1,0]:.1f} {D[1,1]:.1f} {D[1,2]:.1f}]")
|
|
2133
|
+
print(f" [{D[2,0]:.1f} {D[2,1]:.1f} {D[2,2]:.1f}]")
|
|
2134
|
+
|
|
2135
|
+
# What we expect from your D-matrix formulation
|
|
2136
|
+
factor = E / ((1 + nu) * (1 - 2*nu))
|
|
2137
|
+
expected_d11 = factor * (1 - nu)
|
|
2138
|
+
expected_d12 = factor * nu
|
|
2139
|
+
expected_d22 = factor * (1 - nu)
|
|
2140
|
+
expected_d33 = factor * (1 - 2*nu) / 2
|
|
2141
|
+
|
|
2142
|
+
print(f" Expected D-matrix (your formulation):")
|
|
2143
|
+
print(f" [{expected_d11:.1f} {expected_d12:.1f} 0.0]")
|
|
2144
|
+
print(f" [{expected_d12:.1f} {expected_d22:.1f} 0.0]")
|
|
2145
|
+
print(f" [0.0 0.0 {expected_d33:.1f}]")
|
|
2146
|
+
|
|
2147
|
+
# For your D-matrix, under pure εx strain:
|
|
2148
|
+
expected_sigx_your = expected_d11 * eps[0] # D[0,0] * εx
|
|
2149
|
+
expected_sigy_your = expected_d12 * eps[0] # D[1,0] * εx
|
|
2150
|
+
|
|
2151
|
+
print(f" Your D gives: σx = {expected_sigx_your:.1f}, σy = {expected_sigy_your:.1f}")
|
|
2152
|
+
|
|
2153
|
+
# Verify this matches what D actually computed
|
|
2154
|
+
if abs(sig[0] - expected_sigx_your) < 1e-10 and abs(sig[1] - expected_sigy_your) < 1e-10:
|
|
2155
|
+
print(" ✓ PASS: D-matrix working as designed")
|
|
2156
|
+
else:
|
|
2157
|
+
print(" ✗ FAIL: D-matrix computation error")
|
|
2158
|
+
|
|
2159
|
+
print("==========================================\n")
|
|
2160
|
+
|
|
2161
|
+
|
|
2162
|
+
# Run the tests
|
|
2163
|
+
# test_constitutive_matrix_sanity()
|
|
2164
|
+
|
|
2165
|
+
|
|
2166
|
+
def check_mohr_coulomb_cp_from_tp(stress_tp, c, phi):
|
|
2167
|
+
"""Mohr-Coulomb yield using compression-positive convention, input stress in tension-positive.
|
|
2168
|
+
F = tau_max - sigma_mean_cp * sin(phi) - c * cos(phi)
|
|
2169
|
+
Positive F => yield.
|
|
2170
|
+
"""
|
|
2171
|
+
sig_x_cp, sig_y_cp, tau_xy = stress_tp_to_cp(stress_tp)
|
|
2172
|
+
sig_mean_cp = (sig_x_cp + sig_y_cp) / 2.0
|
|
2173
|
+
tau_max = sqrt(((sig_x_cp - sig_y_cp) / 2.0)**2 + tau_xy**2)
|
|
2174
|
+
cos_phi = cos(phi)
|
|
2175
|
+
sin_phi = sin(phi)
|
|
2176
|
+
F = tau_max - sig_mean_cp * sin_phi - c * cos_phi
|
|
2177
|
+
return F
|
|
2178
|
+
|
|
2179
|
+
|
|
2180
|
+
def check_mohr_coulomb_cp(stress_cp, c, phi):
|
|
2181
|
+
"""Mohr-Coulomb yield function for compression-positive stresses.
|
|
2182
|
+
|
|
2183
|
+
For compression-positive convention (compression > 0, tension < 0):
|
|
2184
|
+
F = tau_max - sigma_mean * sin(phi) - c * cos(phi)
|
|
2185
|
+
|
|
2186
|
+
Where:
|
|
2187
|
+
- tau_max = maximum shear stress = sqrt((sig_x - sig_y)^2/4 + tau_xy^2)
|
|
2188
|
+
- sigma_mean = mean normal stress = (sig_x + sig_y)/2
|
|
2189
|
+
- Positive F indicates yielding
|
|
2190
|
+
|
|
2191
|
+
Args:
|
|
2192
|
+
stress_cp: Array [sig_x, sig_y, tau_xy] in compression-positive convention
|
|
2193
|
+
c: Cohesion
|
|
2194
|
+
phi: Friction angle in radians
|
|
2195
|
+
|
|
2196
|
+
Returns:
|
|
2197
|
+
F: Yield function value (F > 0 means yielding)
|
|
2198
|
+
"""
|
|
2199
|
+
sig_x, sig_y, tau_xy = stress_cp
|
|
2200
|
+
sig_mean = (sig_x + sig_y) / 2.0
|
|
2201
|
+
tau_max = sqrt(((sig_x - sig_y) / 2.0)**2 + tau_xy**2)
|
|
2202
|
+
cos_phi = cos(phi)
|
|
2203
|
+
sin_phi = sin(phi)
|
|
2204
|
+
F = tau_max - sig_mean * sin_phi - c * cos_phi
|
|
2205
|
+
return F
|
|
2206
|
+
|
|
2207
|
+
|
|
2208
|
+
def compute_mohr_coulomb_potential_derivatives(psi_deg, dsbar, theta):
|
|
2209
|
+
"""
|
|
2210
|
+
Compute derivatives of Mohr-Coulomb potential function with respect to invariants.
|
|
2211
|
+
Based on mocouq.f90 from Griffiths & Lane implementation.
|
|
2212
|
+
|
|
2213
|
+
Args:
|
|
2214
|
+
psi_deg: Dilation angle in degrees
|
|
2215
|
+
dsbar: Second deviatoric stress invariant
|
|
2216
|
+
theta: Lode angle in radians
|
|
2217
|
+
|
|
2218
|
+
Returns:
|
|
2219
|
+
dq1, dq2, dq3: Derivatives with respect to I1, sqrt(J2), theta
|
|
2220
|
+
"""
|
|
2221
|
+
psi_rad = np.radians(psi_deg)
|
|
2222
|
+
sin_theta = np.sin(theta)
|
|
2223
|
+
sin_psi = np.sin(psi_rad)
|
|
2224
|
+
|
|
2225
|
+
dq1 = sin_psi
|
|
2226
|
+
|
|
2227
|
+
if abs(sin_theta) > 0.49:
|
|
2228
|
+
c1 = 1.0 if sin_theta >= 0 else -1.0
|
|
2229
|
+
dq2 = (np.sqrt(3) * 0.5 - c1 * sin_psi * 0.5 / np.sqrt(3)) * np.sqrt(3) * 0.5 / dsbar
|
|
2230
|
+
else:
|
|
2231
|
+
cos_theta = np.cos(theta)
|
|
2232
|
+
cos_3theta = np.cos(3 * theta)
|
|
2233
|
+
tan_3theta = np.tan(3 * theta)
|
|
2234
|
+
dq2 = (np.sqrt(3) * 0.5 * cos_theta + sin_psi * (sin_theta + sin_theta * cos_3theta / (3 * cos_3theta))) / (dsbar * cos_theta)
|
|
2235
|
+
|
|
2236
|
+
dq3 = 0.0 # Simplified - could include more complex terms
|
|
2237
|
+
|
|
2238
|
+
return dq1, dq2, dq3
|
|
2239
|
+
|
|
2240
|
+
def compute_plastic_flow_vector_cp_return_tp(stress_tp, psi):
|
|
2241
|
+
"""Compute flow direction for compression-positive potential and return vector in tension-positive axes.
|
|
2242
|
+
Uses g = tau_max - sigma_mean_cp * sin(psi).
|
|
2243
|
+
Maps derivatives back to tension-positive components: d/dsig_tp = - d/dsig_cp for normal components.
|
|
2244
|
+
"""
|
|
2245
|
+
sig_x_cp, sig_y_cp, tau_xy = stress_tp_to_cp(stress_tp)
|
|
2246
|
+
sig_mean_cp = (sig_x_cp + sig_y_cp) / 2.0
|
|
2247
|
+
tau_max = sqrt(((sig_x_cp - sig_y_cp) / 2.0)**2 + tau_xy**2)
|
|
2248
|
+
if tau_max < 1e-20:
|
|
2249
|
+
return np.array([0.0, 0.0, 0.0])
|
|
2250
|
+
# Derivatives in cp convention
|
|
2251
|
+
dsig_mean_dsigx_cp = 0.5
|
|
2252
|
+
dsig_mean_dsigy_cp = 0.5
|
|
2253
|
+
dsig_mean_dtau_cp = 0.0
|
|
2254
|
+
dtau_dsigx_cp = (sig_x_cp - sig_y_cp) / (4.0 * tau_max)
|
|
2255
|
+
dtau_dsigy_cp = -(sig_x_cp - sig_y_cp) / (4.0 * tau_max)
|
|
2256
|
+
dtau_dtau_cp = tau_xy / tau_max
|
|
2257
|
+
sin_psi = sin(psi)
|
|
2258
|
+
# ∂g/∂σ_cp = ∂τ * 1 + ∂σ_mean * ( - sin ψ)
|
|
2259
|
+
flow_x_cp = dtau_dsigx_cp - sin_psi * dsig_mean_dsigx_cp
|
|
2260
|
+
flow_y_cp = dtau_dsigy_cp - sin_psi * dsig_mean_dsigy_cp
|
|
2261
|
+
flow_xy_cp = dtau_dtau_cp - sin_psi * dsig_mean_dtau_cp
|
|
2262
|
+
# Map back to tension-positive: d/dsig_tp = - d/dsig_cp for normals; shear unchanged
|
|
2263
|
+
flow_x_tp = -flow_x_cp
|
|
2264
|
+
flow_y_tp = -flow_y_cp
|
|
2265
|
+
flow_xy_tp = flow_xy_cp
|
|
2266
|
+
return np.array([flow_x_tp, flow_y_tp, flow_xy_tp])
|
|
2267
|
+
|
|
2268
|
+
|
|
2269
|
+
def stress_tp_to_cp(stress_tp):
|
|
2270
|
+
"""Convert tension-positive stress [sigx, sigy, tau_xy] to compression-positive."""
|
|
2271
|
+
sig_x, sig_y, tau_xy = stress_tp
|
|
2272
|
+
return np.array([-sig_x, -sig_y, tau_xy])
|
|
2273
|
+
|
|
2274
|
+
|
|
2275
|
+
def establish_k0_stress_state(K_global, F_gravity, bc_type, nodes, elements, element_types,
|
|
2276
|
+
element_materials, E_by_mat, nu_by_mat, gamma_by_mat, u_nodal, debug_level=0):
|
|
2277
|
+
"""
|
|
2278
|
+
Establish K₀ initial stress state through elastic gravity loading.
|
|
2279
|
+
|
|
2280
|
+
This creates the geostatic stress field that exists before applying strength reduction.
|
|
2281
|
+
Critical for developing proper rotational failure modes in slopes.
|
|
2282
|
+
"""
|
|
2283
|
+
|
|
2284
|
+
# Apply boundary conditions to gravity loading system
|
|
2285
|
+
# apply_boundary_conditions is now defined in this same file
|
|
2286
|
+
K_constrained, F_constrained, constraint_dofs = apply_boundary_conditions(
|
|
2287
|
+
K_global, F_gravity, bc_type, nodes)
|
|
2288
|
+
|
|
2289
|
+
# Solve elastic system under gravity
|
|
2290
|
+
try:
|
|
2291
|
+
if hasattr(K_constrained, 'toarray'):
|
|
2292
|
+
K_constrained = K_constrained.tocsr()
|
|
2293
|
+
displacements_free = spsolve(K_constrained, F_constrained)
|
|
2294
|
+
|
|
2295
|
+
# Reconstruct full displacement vector
|
|
2296
|
+
n_dof = 2 * len(nodes)
|
|
2297
|
+
displacements = np.zeros(n_dof)
|
|
2298
|
+
free_dofs = [i for i in range(n_dof) if i not in constraint_dofs]
|
|
2299
|
+
displacements[free_dofs] = displacements_free
|
|
2300
|
+
|
|
2301
|
+
except Exception as e:
|
|
2302
|
+
print(f"K₀ stress establishment failed: {e}")
|
|
2303
|
+
# Fall back to zero displacement
|
|
2304
|
+
displacements = np.zeros(2 * len(nodes))
|
|
2305
|
+
|
|
2306
|
+
# Compute stress state from elastic solution
|
|
2307
|
+
stress_state = compute_k0_stress_state(
|
|
2308
|
+
nodes, elements, element_types, element_materials, displacements,
|
|
2309
|
+
E_by_mat, nu_by_mat, gamma_by_mat, u_nodal)
|
|
2310
|
+
|
|
2311
|
+
if debug_level >= 2:
|
|
2312
|
+
max_disp = np.max(np.abs(displacements))
|
|
2313
|
+
print(f" K₀ solution: max displacement = {max_disp:.6f}")
|
|
2314
|
+
|
|
2315
|
+
# Debug: Check actual displacement at a specific node
|
|
2316
|
+
node_near_top = nodes[:, 1].argmax() # Node with highest y coordinate
|
|
2317
|
+
disp_x = displacements[2*node_near_top]
|
|
2318
|
+
disp_y = displacements[2*node_near_top+1]
|
|
2319
|
+
print(f" Top node {node_near_top} at y={nodes[node_near_top, 1]:.1f}: disp_x={disp_x:.6f}, disp_y={disp_y:.6f}")
|
|
2320
|
+
|
|
2321
|
+
n_stress_elements = len(stress_state.get('element_stresses', []))
|
|
2322
|
+
print(f" Stress state established for {n_stress_elements} elements")
|
|
2323
|
+
|
|
2324
|
+
return displacements, stress_state
|
|
2325
|
+
|
|
2326
|
+
|
|
2327
|
+
def compute_k0_stress_state(nodes, elements, element_types, element_materials, displacements,
|
|
2328
|
+
E_by_mat, nu_by_mat, gamma_by_mat, u_nodal):
|
|
2329
|
+
"""
|
|
2330
|
+
Compute initial stress state from elastic FEM gravity solution.
|
|
2331
|
+
|
|
2332
|
+
Following Griffiths & Lane (1999): "The present work applies gravity in a single
|
|
2333
|
+
increment to an initially stress-free slope" - this means:
|
|
2334
|
+
1. Start with zero stress everywhere
|
|
2335
|
+
2. Apply gravity loads via FEM
|
|
2336
|
+
3. Compute strains from resulting displacements
|
|
2337
|
+
4. Compute stresses from elastic strains: σ = D·ε
|
|
2338
|
+
|
|
2339
|
+
Store stresses as compression-positive throughout the codebase.
|
|
2340
|
+
"""
|
|
2341
|
+
n_elements = len(elements)
|
|
2342
|
+
max_gauss_points = 4
|
|
2343
|
+
element_stresses = np.zeros((n_elements, max_gauss_points, 3)) # [sig_x, sig_y, tau_xy] (compression+)
|
|
2344
|
+
|
|
2345
|
+
for elem_idx in range(n_elements):
|
|
2346
|
+
elem_type = element_types[elem_idx]
|
|
2347
|
+
mat_id = element_materials[elem_idx] - 1
|
|
2348
|
+
|
|
2349
|
+
E = E_by_mat[mat_id]
|
|
2350
|
+
nu = nu_by_mat[mat_id]
|
|
2351
|
+
|
|
2352
|
+
# Get element data
|
|
2353
|
+
elem_nodes = elements[elem_idx][:elem_type]
|
|
2354
|
+
elem_coords = nodes[elem_nodes]
|
|
2355
|
+
|
|
2356
|
+
# Get Gauss points for proper integration
|
|
2357
|
+
if elem_type == 8:
|
|
2358
|
+
gauss_points_2x2, _ = get_gauss_points_2x2()
|
|
2359
|
+
n_gauss = 4
|
|
2360
|
+
else:
|
|
2361
|
+
n_gauss = 1
|
|
2362
|
+
|
|
2363
|
+
# Get element displacements from FEM gravity solution
|
|
2364
|
+
elem_disp = np.zeros(2 * elem_type)
|
|
2365
|
+
for i, node in enumerate(elem_nodes):
|
|
2366
|
+
elem_disp[2*i] = displacements[2*node]
|
|
2367
|
+
elem_disp[2*i+1] = displacements[2*node+1]
|
|
2368
|
+
|
|
2369
|
+
# Compute stresses at each Gauss point from FEM strains
|
|
2370
|
+
for gp in range(n_gauss):
|
|
2371
|
+
if elem_type == 3: # Triangle
|
|
2372
|
+
strains = compute_triangle_strains_manual(elem_coords, elem_disp)
|
|
2373
|
+
elif elem_type == 8: # 8-node quad
|
|
2374
|
+
xi, eta_local = gauss_points_2x2[gp]
|
|
2375
|
+
strains = compute_quad8_strains_at_xi_eta(elem_coords, elem_disp, xi, eta_local)
|
|
2376
|
+
else:
|
|
2377
|
+
strains = np.array([0.0, 0.0, 0.0])
|
|
2378
|
+
|
|
2379
|
+
# Compute stresses from strains using elastic constitutive matrix
|
|
2380
|
+
D = build_constitutive_matrix(E, nu)
|
|
2381
|
+
stresses_tp = D @ strains # tension-positive
|
|
2382
|
+
stresses_cp = -stresses_tp # store compression-positive
|
|
2383
|
+
|
|
2384
|
+
element_stresses[elem_idx, gp, :] = stresses_cp
|
|
2385
|
+
|
|
2386
|
+
# Debug: Check stress state statistics (compression-positive)
|
|
2387
|
+
stress_stats = {
|
|
2388
|
+
'sigma_x': {'min': np.min(element_stresses[:, :, 0]), 'max': np.max(element_stresses[:, :, 0]), 'mean': np.mean(element_stresses[:, :, 0])},
|
|
2389
|
+
'sigma_y': {'min': np.min(element_stresses[:, :, 1]), 'max': np.max(element_stresses[:, :, 1]), 'mean': np.mean(element_stresses[:, :, 1])},
|
|
2390
|
+
'tau_xy': {'min': np.min(element_stresses[:, :, 2]), 'max': np.max(element_stresses[:, :, 2]), 'mean': np.mean(element_stresses[:, :, 2])}
|
|
2391
|
+
}
|
|
2392
|
+
|
|
2393
|
+
print(f" Initial stress state statistics (compression+):")
|
|
2394
|
+
print(f" σ_x: min={stress_stats['sigma_x']['min']:.1f}, max={stress_stats['sigma_x']['max']:.1f}, mean={stress_stats['sigma_x']['mean']:.1f}")
|
|
2395
|
+
print(f" σ_y: min={stress_stats['sigma_y']['min']:.1f}, max={stress_stats['sigma_y']['max']:.1f}, mean={stress_stats['sigma_y']['mean']:.1f}")
|
|
2396
|
+
print(f" τ_xy: min={stress_stats['tau_xy']['min']:.1f}, max={stress_stats['tau_xy']['max']:.1f}, mean={stress_stats['tau_xy']['mean']:.1f}")
|
|
2397
|
+
|
|
2398
|
+
return {
|
|
2399
|
+
'element_stresses': element_stresses,
|
|
2400
|
+
'plastic_state': np.zeros((n_elements, max_gauss_points), dtype=bool)
|
|
2401
|
+
}
|
|
2402
|
+
|
|
2403
|
+
|
|
2404
|
+
def compute_strains(nodes, elements, element_types, displacements):
|
|
2405
|
+
"""
|
|
2406
|
+
Compute element strains for visualization.
|
|
2407
|
+
"""
|
|
2408
|
+
n_elements = len(elements)
|
|
2409
|
+
strains = np.zeros((n_elements, 4)) # [eps_x, eps_y, gamma_xy, max_shear_strain]
|
|
2410
|
+
|
|
2411
|
+
for elem_idx, element in enumerate(elements):
|
|
2412
|
+
elem_type = element_types[elem_idx]
|
|
2413
|
+
elem_nodes = element[:elem_type]
|
|
2414
|
+
elem_coords = nodes[elem_nodes]
|
|
2415
|
+
|
|
2416
|
+
# Get element displacements
|
|
2417
|
+
elem_disp = np.zeros(2 * elem_type)
|
|
2418
|
+
for i, node in enumerate(elem_nodes):
|
|
2419
|
+
elem_disp[2*i] = displacements[2*node]
|
|
2420
|
+
elem_disp[2*i+1] = displacements[2*node+1]
|
|
2421
|
+
|
|
2422
|
+
# Compute strains
|
|
2423
|
+
if elem_type == 3:
|
|
2424
|
+
element_strains = compute_triangle_strains_manual(elem_coords, elem_disp)
|
|
2425
|
+
elif elem_type == 8:
|
|
2426
|
+
# For 8-node quad, compute strain at centroid
|
|
2427
|
+
xi, eta = 0.0, 0.0 # Centroid
|
|
2428
|
+
element_strains = compute_quad8_strains_at_xi_eta(elem_coords, elem_disp, xi, eta)
|
|
2429
|
+
else:
|
|
2430
|
+
element_strains = np.array([0.0, 0.0, 0.0])
|
|
2431
|
+
|
|
2432
|
+
eps_x = element_strains[0]
|
|
2433
|
+
eps_y = element_strains[1]
|
|
2434
|
+
gamma_xy = element_strains[2]
|
|
2435
|
+
|
|
2436
|
+
# Maximum shear strain
|
|
2437
|
+
max_shear_strain = sqrt(((eps_x - eps_y) / 2)**2 + (gamma_xy / 2)**2)
|
|
2438
|
+
|
|
2439
|
+
strains[elem_idx] = [eps_x, eps_y, gamma_xy, max_shear_strain]
|
|
2440
|
+
|
|
2441
|
+
return strains
|
|
2442
|
+
|
|
2443
|
+
|
|
2444
|
+
def compute_quad8_strains_at_xi_eta(coords, displacements, xi, eta):
|
|
2445
|
+
"""
|
|
2446
|
+
Compute strains for 8-node quadrilateral at specific (xi, eta) coordinates.
|
|
2447
|
+
"""
|
|
2448
|
+
# 8-node quad shape function derivatives at (xi, eta)
|
|
2449
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta)
|
|
2450
|
+
|
|
2451
|
+
# Jacobian matrix and its inverse
|
|
2452
|
+
J = np.zeros((2, 2))
|
|
2453
|
+
for i in range(8):
|
|
2454
|
+
x, y = coords[i]
|
|
2455
|
+
J[0, 0] += dN_dxi[i] * x # dx/dxi
|
|
2456
|
+
J[0, 1] += dN_dxi[i] * y # dy/dxi
|
|
2457
|
+
J[1, 0] += dN_deta[i] * x # dx/deta
|
|
2458
|
+
J[1, 1] += dN_deta[i] * y # dy/deta
|
|
2459
|
+
|
|
2460
|
+
det_J = J[0, 0] * J[1, 1] - J[0, 1] * J[1, 0]
|
|
2461
|
+
|
|
2462
|
+
if abs(det_J) < 1e-12:
|
|
2463
|
+
return np.array([0.0, 0.0, 0.0])
|
|
2464
|
+
|
|
2465
|
+
# Inverse Jacobian
|
|
2466
|
+
J_inv = np.array([[J[1, 1], -J[0, 1]], [-J[1, 0], J[0, 0]]]) / det_J
|
|
2467
|
+
|
|
2468
|
+
# Shape function derivatives in physical coordinates
|
|
2469
|
+
dN_dx = np.zeros(8)
|
|
2470
|
+
dN_dy = np.zeros(8)
|
|
2471
|
+
for i in range(8):
|
|
2472
|
+
dN_dx[i] = J_inv[0, 0] * dN_dxi[i] + J_inv[0, 1] * dN_deta[i]
|
|
2473
|
+
dN_dy[i] = J_inv[1, 0] * dN_dxi[i] + J_inv[1, 1] * dN_deta[i]
|
|
2474
|
+
|
|
2475
|
+
# B matrix for strain calculation (standard tension positive)
|
|
2476
|
+
B = np.zeros((3, 16)) # 3 strains x 16 DOFs (8 nodes x 2 DOFs)
|
|
2477
|
+
for i in range(8):
|
|
2478
|
+
B[0, 2*i] = dN_dx[i] # εx = ∂u/∂x
|
|
2479
|
+
B[1, 2*i+1] = dN_dy[i] # εy = ∂v/∂y
|
|
2480
|
+
B[2, 2*i] = dN_dy[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
2481
|
+
B[2, 2*i+1] = dN_dx[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
2482
|
+
|
|
2483
|
+
# Compute strains
|
|
2484
|
+
strains = B @ displacements
|
|
2485
|
+
return strains
|
|
2486
|
+
|
|
2487
|
+
def compute_simple_quad4_strains(coords, displacements):
|
|
2488
|
+
"""
|
|
2489
|
+
Simple strain calculation for 4-node quad using bilinear interpolation.
|
|
2490
|
+
This is a test to see if the issue is in the isoparametric formulation.
|
|
2491
|
+
"""
|
|
2492
|
+
# Use center point (xi=0, eta=0) for simplicity
|
|
2493
|
+
xi, eta = 0.0, 0.0
|
|
2494
|
+
|
|
2495
|
+
# 4-node bilinear shape function derivatives
|
|
2496
|
+
dN_dxi = 0.25 * np.array([-(1-eta), (1-eta), (1+eta), -(1+eta)])
|
|
2497
|
+
dN_deta = 0.25 * np.array([-(1-xi), -(1+xi), (1+xi), (1-xi)])
|
|
2498
|
+
|
|
2499
|
+
# Jacobian matrix
|
|
2500
|
+
J = np.zeros((2, 2))
|
|
2501
|
+
for i in range(4):
|
|
2502
|
+
x, y = coords[i]
|
|
2503
|
+
J[0, 0] += dN_dxi[i] * x # dx/dxi
|
|
2504
|
+
J[0, 1] += dN_dxi[i] * y # dy/dxi
|
|
2505
|
+
J[1, 0] += dN_deta[i] * x # dx/deta
|
|
2506
|
+
J[1, 1] += dN_deta[i] * y # dy/deta
|
|
2507
|
+
|
|
2508
|
+
det_J = J[0, 0] * J[1, 1] - J[0, 1] * J[1, 0]
|
|
2509
|
+
|
|
2510
|
+
if abs(det_J) < 1e-12:
|
|
2511
|
+
return np.array([0.0, 0.0, 0.0])
|
|
2512
|
+
|
|
2513
|
+
# Inverse Jacobian
|
|
2514
|
+
J_inv = np.array([[J[1, 1], -J[0, 1]], [-J[1, 0], J[0, 0]]]) / det_J
|
|
2515
|
+
|
|
2516
|
+
# Shape function derivatives in physical coordinates
|
|
2517
|
+
dN_dx = np.zeros(4)
|
|
2518
|
+
dN_dy = np.zeros(4)
|
|
2519
|
+
for i in range(4):
|
|
2520
|
+
dN_dx[i] = J_inv[0, 0] * dN_dxi[i] + J_inv[0, 1] * dN_deta[i]
|
|
2521
|
+
dN_dy[i] = J_inv[1, 0] * dN_dxi[i] + J_inv[1, 1] * dN_deta[i]
|
|
2522
|
+
|
|
2523
|
+
# B matrix (standard tension positive, 3 strains x 8 DOFs for 4 nodes)
|
|
2524
|
+
B = np.zeros((3, 8))
|
|
2525
|
+
for i in range(4):
|
|
2526
|
+
B[0, 2*i] = dN_dx[i] # εx = ∂u/∂x
|
|
2527
|
+
B[1, 2*i+1] = dN_dy[i] # εy = ∂v/∂y
|
|
2528
|
+
B[2, 2*i] = dN_dy[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
2529
|
+
B[2, 2*i+1] = dN_dx[i] # γxy = ∂u/∂y + ∂v/∂x
|
|
2530
|
+
|
|
2531
|
+
# Compute strains
|
|
2532
|
+
strains = B @ displacements
|
|
2533
|
+
return strains
|
|
2534
|
+
|
|
2535
|
+
def compute_quad8_strains_at_gauss_point(coords, displacements, gauss_point):
|
|
2536
|
+
"""
|
|
2537
|
+
Compute strains for 8-node quadrilateral element at specific Gauss point.
|
|
2538
|
+
|
|
2539
|
+
This implements the exact formulation used in Griffiths & Lane (1999).
|
|
2540
|
+
Uses reduced integration with 4 Gauss points (2x2 rule).
|
|
2541
|
+
"""
|
|
2542
|
+
# 2x2 Gauss points for reduced integration (as per Griffiths paper)
|
|
2543
|
+
gauss_coords = [
|
|
2544
|
+
(-0.5773502692, -0.5773502692), # Point 0
|
|
2545
|
+
( 0.5773502692, -0.5773502692), # Point 1
|
|
2546
|
+
( 0.5773502692, 0.5773502692), # Point 2
|
|
2547
|
+
(-0.5773502692, 0.5773502692) # Point 3
|
|
2548
|
+
]
|
|
2549
|
+
|
|
2550
|
+
if gauss_point >= len(gauss_coords):
|
|
2551
|
+
return np.array([0.0, 0.0, 0.0])
|
|
2552
|
+
|
|
2553
|
+
xi, eta = gauss_coords[gauss_point]
|
|
2554
|
+
|
|
2555
|
+
# 8-node quad shape function derivatives at (xi, eta)
|
|
2556
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta)
|
|
2557
|
+
|
|
2558
|
+
# Jacobian matrix and its inverse
|
|
2559
|
+
J = np.zeros((2, 2))
|
|
2560
|
+
for i in range(8):
|
|
2561
|
+
x, y = coords[i]
|
|
2562
|
+
J[0, 0] += dN_dxi[i] * x # dx/dxi
|
|
2563
|
+
J[0, 1] += dN_dxi[i] * y # dy/dxi
|
|
2564
|
+
J[1, 0] += dN_deta[i] * x # dx/deta
|
|
2565
|
+
J[1, 1] += dN_deta[i] * y # dy/deta
|
|
2566
|
+
|
|
2567
|
+
det_J = J[0, 0] * J[1, 1] - J[0, 1] * J[1, 0]
|
|
2568
|
+
|
|
2569
|
+
if abs(det_J) < 1e-12:
|
|
2570
|
+
return np.array([0.0, 0.0, 0.0])
|
|
2571
|
+
|
|
2572
|
+
# Inverse Jacobian
|
|
2573
|
+
J_inv = np.array([[J[1, 1], -J[0, 1]],
|
|
2574
|
+
[-J[1, 0], J[0, 0]]]) / det_J
|
|
2575
|
+
|
|
2576
|
+
# Shape function derivatives in physical coordinates
|
|
2577
|
+
dN_dx = np.zeros(8)
|
|
2578
|
+
dN_dy = np.zeros(8)
|
|
2579
|
+
for i in range(8):
|
|
2580
|
+
dN_dx[i] = J_inv[0, 0] * dN_dxi[i] + J_inv[0, 1] * dN_deta[i]
|
|
2581
|
+
dN_dy[i] = J_inv[1, 0] * dN_dxi[i] + J_inv[1, 1] * dN_deta[i]
|
|
2582
|
+
|
|
2583
|
+
# B matrix for strain calculation
|
|
2584
|
+
B = np.zeros((3, 16)) # 3 strains x 16 DOFs (8 nodes x 2 DOFs)
|
|
2585
|
+
for i in range(8):
|
|
2586
|
+
B[0, 2*i] = dN_dx[i] # ∂u/∂x
|
|
2587
|
+
B[1, 2*i+1] = dN_dy[i] # ∂v/∂y
|
|
2588
|
+
B[2, 2*i] = dN_dy[i] # ∂u/∂y
|
|
2589
|
+
B[2, 2*i+1] = dN_dx[i] # ∂v/∂x
|
|
2590
|
+
|
|
2591
|
+
# Compute strains: ε = B * u
|
|
2592
|
+
strains = B @ displacements
|
|
2593
|
+
|
|
2594
|
+
return strains
|
|
2595
|
+
|
|
2596
|
+
|
|
2597
|
+
def compute_quad8_shape_derivatives(xi, eta):
|
|
2598
|
+
"""
|
|
2599
|
+
Compute shape function derivatives for 8-node quadrilateral at (xi, eta).
|
|
2600
|
+
|
|
2601
|
+
Uses correct serendipity formulation with CCW node ordering:
|
|
2602
|
+
3 --- 6 --- 2
|
|
2603
|
+
| |
|
|
2604
|
+
7 + 5
|
|
2605
|
+
| |
|
|
2606
|
+
0 --- 4 --- 1
|
|
2607
|
+
|
|
2608
|
+
Corner nodes: 0(-1,-1), 1(1,-1), 2(1,1), 3(-1,1)
|
|
2609
|
+
Edge nodes: 4(0,-1), 5(1,0), 6(0,1), 7(-1,0)
|
|
2610
|
+
"""
|
|
2611
|
+
|
|
2612
|
+
# Serendipity shape function derivatives for CCW node ordering
|
|
2613
|
+
# (From working implementation in seep.py)
|
|
2614
|
+
dN_dxi = np.array([
|
|
2615
|
+
-0.25*(1-eta)*(-xi-eta-1) - 0.25*(1-xi)*(1-eta), # Node 0: corner (-1,-1)
|
|
2616
|
+
0.25*(1-eta)*(xi-eta-1) + 0.25*(1+xi)*(1-eta), # Node 1: corner (1,-1)
|
|
2617
|
+
0.25*(1+eta)*(xi+eta-1) + 0.25*(1+xi)*(1+eta), # Node 2: corner (1,1)
|
|
2618
|
+
-0.25*(1+eta)*(-xi+eta-1) - 0.25*(1-xi)*(1+eta), # Node 3: corner (-1,1)
|
|
2619
|
+
-xi*(1-eta), # Node 4: edge (0,-1)
|
|
2620
|
+
0.5*(1-eta*eta), # Node 5: edge (1,0)
|
|
2621
|
+
-xi*(1+eta), # Node 6: edge (0,1)
|
|
2622
|
+
-0.5*(1-eta*eta) # Node 7: edge (-1,0)
|
|
2623
|
+
])
|
|
2624
|
+
|
|
2625
|
+
dN_deta = np.array([
|
|
2626
|
+
-0.25*(1-xi)*(-xi-eta-1) - 0.25*(1-xi)*(1-eta), # Node 0: corner (-1,-1)
|
|
2627
|
+
-0.25*(1+xi)*(xi-eta-1) - 0.25*(1+xi)*(1-eta), # Node 1: corner (1,-1)
|
|
2628
|
+
0.25*(1+xi)*(xi+eta-1) + 0.25*(1+xi)*(1+eta), # Node 2: corner (1,1)
|
|
2629
|
+
0.25*(1-xi)*(-xi+eta-1) + 0.25*(1-xi)*(1+eta), # Node 3: corner (-1,1)
|
|
2630
|
+
-0.5*(1-xi*xi), # Node 4: edge (0,-1)
|
|
2631
|
+
-eta*(1+xi), # Node 5: edge (1,0)
|
|
2632
|
+
0.5*(1-xi*xi), # Node 6: edge (0,1)
|
|
2633
|
+
-eta*(1-xi) # Node 7: edge (-1,0)
|
|
2634
|
+
])
|
|
2635
|
+
|
|
2636
|
+
return dN_dxi, dN_deta
|
|
2637
|
+
|
|
2638
|
+
|
|
2639
|
+
def build_quad8_stiffness_reduced_integration_corrected(coords, E, nu):
|
|
2640
|
+
"""
|
|
2641
|
+
Build stiffness matrix for 8-node quadrilateral with 2x2 reduced integration.
|
|
2642
|
+
|
|
2643
|
+
This follows the Griffiths & Lane (1999) implementation exactly:
|
|
2644
|
+
- 8-node serendipity quadrilateral elements
|
|
2645
|
+
- 2x2 reduced integration (4 Gauss points)
|
|
2646
|
+
- Prevents volumetric locking in nearly incompressible materials
|
|
2647
|
+
"""
|
|
2648
|
+
# Constitutive matrix for plane strain
|
|
2649
|
+
factor = E / ((1 + nu) * (1 - 2 * nu))
|
|
2650
|
+
D = factor * np.array([
|
|
2651
|
+
[1 - nu, nu, 0],
|
|
2652
|
+
[nu, 1 - nu, 0],
|
|
2653
|
+
[0, 0, (1 - 2 * nu) / 2]
|
|
2654
|
+
])
|
|
2655
|
+
|
|
2656
|
+
# 2x2 Gauss points for reduced integration (exactly as in Griffiths paper)
|
|
2657
|
+
gauss_coord = 1.0 / np.sqrt(3.0) # = 0.5773502692
|
|
2658
|
+
xi_points = np.array([-gauss_coord, gauss_coord])
|
|
2659
|
+
eta_points = np.array([-gauss_coord, gauss_coord])
|
|
2660
|
+
weights = np.array([1.0, 1.0, 1.0, 1.0]) # 2D weights = 1 * 1
|
|
2661
|
+
|
|
2662
|
+
K = np.zeros((16, 16)) # 8 nodes x 2 DOF = 16x16 matrix
|
|
2663
|
+
|
|
2664
|
+
gp_idx = 0
|
|
2665
|
+
for i in range(2):
|
|
2666
|
+
for j in range(2):
|
|
2667
|
+
xi, eta = xi_points[i], eta_points[j]
|
|
2668
|
+
w = weights[gp_idx]
|
|
2669
|
+
gp_idx += 1
|
|
2670
|
+
|
|
2671
|
+
# Use the existing correct shape function derivatives
|
|
2672
|
+
dN_dxi, dN_deta = compute_quad8_shape_derivatives(xi, eta)
|
|
2673
|
+
|
|
2674
|
+
# Jacobian matrix
|
|
2675
|
+
J = np.zeros((2, 2))
|
|
2676
|
+
for a in range(8):
|
|
2677
|
+
J[0,0] += dN_dxi[a] * coords[a,0] # dx/dxi
|
|
2678
|
+
J[0,1] += dN_dxi[a] * coords[a,1] # dy/dxi
|
|
2679
|
+
J[1,0] += dN_deta[a] * coords[a,0] # dx/deta
|
|
2680
|
+
J[1,1] += dN_deta[a] * coords[a,1] # dy/deta
|
|
2681
|
+
|
|
2682
|
+
det_J = J[0,0] * J[1,1] - J[0,1] * J[1,0]
|
|
2683
|
+
|
|
2684
|
+
if abs(det_J) < 1e-12:
|
|
2685
|
+
print(f"Warning: Nearly singular Jacobian in quad8 element: det(J) = {det_J}")
|
|
2686
|
+
continue
|
|
2687
|
+
|
|
2688
|
+
# Inverse Jacobian
|
|
2689
|
+
J_inv = np.array([[J[1,1], -J[0,1]], [-J[1,0], J[0,0]]]) / det_J
|
|
2690
|
+
|
|
2691
|
+
# Shape function derivatives in physical coordinates
|
|
2692
|
+
dN_dx = np.zeros(8)
|
|
2693
|
+
dN_dy = np.zeros(8)
|
|
2694
|
+
for a in range(8):
|
|
2695
|
+
dN_dx[a] = J_inv[0,0]*dN_dxi[a] + J_inv[0,1]*dN_deta[a]
|
|
2696
|
+
dN_dy[a] = J_inv[1,0]*dN_dxi[a] + J_inv[1,1]*dN_deta[a]
|
|
2697
|
+
|
|
2698
|
+
# B matrix (strain-displacement, standard tension positive)
|
|
2699
|
+
B = np.zeros((3, 16)) # 3 strains x 16 DOF
|
|
2700
|
+
for a in range(8):
|
|
2701
|
+
B[0, 2*a] = dN_dx[a] # εx = ∂u/∂x
|
|
2702
|
+
B[1, 2*a+1] = dN_dy[a] # εy = ∂v/∂y
|
|
2703
|
+
B[2, 2*a] = dN_dy[a] # γxy = ∂u/∂y + ∂v/∂x
|
|
2704
|
+
B[2, 2*a+1] = dN_dx[a] # γxy = ∂u/∂y + ∂v/∂x
|
|
2705
|
+
|
|
2706
|
+
# Element stiffness matrix contribution
|
|
2707
|
+
K += w * det_J * (B.T @ D @ B)
|
|
2708
|
+
|
|
2709
|
+
return K
|
|
2710
|
+
|
|
2711
|
+
|
|
2712
|
+
def build_triangle_stiffness_corrected(coords, E, nu):
|
|
2713
|
+
"""
|
|
2714
|
+
Build corrected stiffness matrix for triangular element (plane strain).
|
|
2715
|
+
"""
|
|
2716
|
+
x1, y1 = coords[0]
|
|
2717
|
+
x2, y2 = coords[1]
|
|
2718
|
+
x3, y3 = coords[2]
|
|
2719
|
+
|
|
2720
|
+
# Area
|
|
2721
|
+
area = 0.5 * abs((x2-x1)*(y3-y1) - (x3-x1)*(y2-y1))
|
|
2722
|
+
|
|
2723
|
+
if area < 1e-12:
|
|
2724
|
+
print(f"Warning: Very small triangle area: {area}")
|
|
2725
|
+
return np.zeros((6, 6))
|
|
2726
|
+
|
|
2727
|
+
# Shape function derivatives
|
|
2728
|
+
b1 = y2 - y3
|
|
2729
|
+
b2 = y3 - y1
|
|
2730
|
+
b3 = y1 - y2
|
|
2731
|
+
c1 = x3 - x2
|
|
2732
|
+
c2 = x1 - x3
|
|
2733
|
+
c3 = x2 - x1
|
|
2734
|
+
|
|
2735
|
+
# B matrix (standard linear triangle)
|
|
2736
|
+
B = np.array([
|
|
2737
|
+
[b1, 0, b2, 0, b3, 0 ], # εx = ∂u/∂x
|
|
2738
|
+
[0, c1, 0, c2, 0, c3], # εy = ∂v/∂y
|
|
2739
|
+
[c1, b1, c2, b2, c3, b3] # γxy = ∂u/∂y + ∂v/∂x
|
|
2740
|
+
]) / (2 * area)
|
|
2741
|
+
|
|
2742
|
+
# Constitutive matrix (plane strain)
|
|
2743
|
+
factor = E / ((1 + nu) * (1 - 2*nu))
|
|
2744
|
+
D = factor * np.array([
|
|
2745
|
+
[1-nu, nu, 0 ],
|
|
2746
|
+
[nu, 1-nu, 0 ],
|
|
2747
|
+
[0, 0, (1-2*nu)/2]
|
|
2748
|
+
])
|
|
2749
|
+
|
|
2750
|
+
# Element stiffness matrix
|
|
2751
|
+
K_elem = area * B.T @ D @ B
|
|
2752
|
+
|
|
2753
|
+
return K_elem
|