xslope 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xslope/fileio.py CHANGED
@@ -29,8 +29,8 @@ def build_ground_surface(profile_lines):
29
29
  which represents the true ground surface.
30
30
 
31
31
  Parameters:
32
- profile_lines (list of list of tuple): A list of profile lines, each represented
33
- as a list of (x, y) coordinate tuples.
32
+ profile_lines (list of dict): A list of profile lines, each represented
33
+ as a dict with 'coords' key containing a list of (x, y) coordinate tuples.
34
34
 
35
35
  Returns:
36
36
  shapely.geometry.LineString: A LineString of the top surface, or an empty LineString
@@ -40,9 +40,12 @@ def build_ground_surface(profile_lines):
40
40
  if not profile_lines:
41
41
  return LineString([])
42
42
 
43
+ # Extract coordinate lists from profile line dicts
44
+ coord_lists = [line['coords'] for line in profile_lines]
45
+
43
46
  # Step 1: Gather all points from all profile lines
44
47
  all_points = []
45
- for line in profile_lines:
48
+ for line in coord_lists:
46
49
  all_points.extend(line)
47
50
 
48
51
  # Step 2: Group points by x-coordinate and find the highest y for each x
@@ -61,7 +64,7 @@ def build_ground_surface(profile_lines):
61
64
 
62
65
  # Check intersections with all profile lines
63
66
  is_topmost = True
64
- for profile_line in profile_lines:
67
+ for profile_line in coord_lists:
65
68
  line = LineString(profile_line)
66
69
  if line.length == 0:
67
70
  continue
@@ -125,10 +128,10 @@ def load_slope_data(filepath):
125
128
 
126
129
  try:
127
130
  template_version = main_df.iloc[4, 3] # Excel row 5, column D
128
- gamma_water = float(main_df.iloc[18, 3]) # Excel row 19, column D
129
- tcrack_depth = float(main_df.iloc[19, 3]) # Excel row 20, column D
130
- tcrack_water = float(main_df.iloc[20, 3]) # Excel row 21, column D
131
- k_seismic = float(main_df.iloc[21, 3]) # Excel row 22, column D
131
+ gamma_water = float(main_df.iloc[7, 3]) # Excel row 8, column D
132
+ tcrack_depth = float(main_df.iloc[8, 3]) # Excel row 9, column D
133
+ tcrack_water = float(main_df.iloc[9, 3]) # Excel row 10, column D
134
+ k_seismic = float(main_df.iloc[10, 3]) # Excel row 11, column D
132
135
  except Exception as e:
133
136
  raise ValueError(f"Error reading static global values from 'main' tab: {e}")
134
137
 
@@ -139,37 +142,76 @@ def load_slope_data(filepath):
139
142
  max_depth = float(profile_df.iloc[1, 1]) # Excel B2 = row 1, column 1
140
143
 
141
144
  profile_lines = []
142
-
143
- profile_data_blocks = [
144
- {"header_row": 4, "data_start": 5, "data_end": 20},
145
- {"header_row": 22, "data_start": 23, "data_end": 38}
146
- ]
147
- profile_block_width = 3
148
-
149
- for block in profile_data_blocks:
150
- for col in range(0, profile_df.shape[1], profile_block_width):
151
- x_col, y_col = col, col + 1
145
+
146
+ # New format: single data block, profile lines arranged horizontally
147
+ # First profile line: columns A:B, second: D:E, third: G:H, etc.
148
+ # Header row is row 4 (index 3), mat_id is in B5 (row 4, column 1)
149
+ # XY coordinates start in row 7 (index 6)
150
+ header_row = 3 # Excel row 4 (0-indexed)
151
+ mat_id_row = 4 # Excel row 5 (0-indexed)
152
+ coords_start_row = 7 # Excel row 8 (0-indexed)
153
+
154
+ col = 0 # Start with column A (index 0)
155
+ while col < profile_df.shape[1]:
156
+ x_col = col
157
+ y_col = col + 1
158
+
159
+ # Check if header row is empty (stop reading if empty)
160
+ try:
161
+ header_val = str(profile_df.iloc[header_row, x_col]).strip()
162
+ if not header_val or header_val.lower() == 'nan':
163
+ break # No more profile lines
164
+ except:
165
+ break # No more profile lines
166
+
167
+ # Read mat_id from B5 (row 4, column 1) for this profile line
168
+ # Convert from 1-based to 0-based for internal use
169
+ try:
170
+ mat_id_val = profile_df.iloc[mat_id_row, y_col]
171
+ if pd.isna(mat_id_val):
172
+ mat_id = None
173
+ else:
174
+ # Convert to integer and subtract 1 to make it 0-based
175
+ mat_id = int(float(mat_id_val)) - 1
176
+ if mat_id < 0:
177
+ mat_id = None # Invalid mat_id
178
+ except (ValueError, TypeError):
179
+ mat_id = None
180
+
181
+ # Read XY coordinates starting from row 7, stop at first empty row
182
+ coords = []
183
+ row = coords_start_row
184
+ while row < profile_df.shape[0]:
152
185
  try:
153
- x_header = str(profile_df.iloc[block["header_row"], x_col]).strip().lower()
154
- y_header = str(profile_df.iloc[block["header_row"], y_col]).strip().lower()
186
+ x_val = profile_df.iloc[row, x_col]
187
+ y_val = profile_df.iloc[row, y_col]
188
+
189
+ # Stop at first empty row (both x and y are empty)
190
+ if pd.isna(x_val) and pd.isna(y_val):
191
+ break
192
+
193
+ # If at least one coordinate is present, try to convert
194
+ if pd.notna(x_val) and pd.notna(y_val):
195
+ coords.append((float(x_val), float(y_val)))
155
196
  except:
156
- continue
157
- if x_header != 'x' or y_header != 'y':
158
- continue
159
- data = profile_df.iloc[block["data_start"]:block["data_end"], [x_col, y_col]]
160
- data = data.dropna(how='all')
161
- if data.empty:
162
- continue
163
- if data.iloc[0].isna().any():
164
- continue
165
- coords = data.dropna().apply(lambda r: (float(r.iloc[0]), float(r.iloc[1])), axis=1).tolist()
166
- if len(coords) == 1:
167
- raise ValueError("Each profile line must contain at least two points.")
168
- if coords:
169
- profile_lines.append(coords)
197
+ break
198
+ row += 1
199
+
200
+ # Validate that we have at least 2 points
201
+ if len(coords) == 1:
202
+ raise ValueError(f"Each profile line must contain at least two points. Profile line starting at column {chr(65 + col)} has only one point.")
203
+
204
+ if len(coords) >= 2:
205
+ # Store as dict with coords and mat_id
206
+ profile_lines.append({
207
+ 'coords': coords,
208
+ 'mat_id': mat_id
209
+ })
210
+
211
+ # Move to next profile line (skip 3 columns: A->D, D->G, etc.)
212
+ col += 3
170
213
 
171
214
  # === BUILD GROUND SURFACE FROM PROFILE LINES ===
172
-
173
215
  ground_surface = build_ground_surface(profile_lines)
174
216
 
175
217
  # === BUILD TENSILE CRACK LINE ===
@@ -179,42 +221,65 @@ def load_slope_data(filepath):
179
221
  tcrack_surface = LineString([(x, y - tcrack_depth) for (x, y) in ground_surface.coords])
180
222
 
181
223
  # === MATERIALS (Optimized Parsing) ===
182
- mat_df = xls.parse('mat', header=2) # header=2 because the header row is row 3 in Excel
224
+ mat_df = xls.parse('mat', header=7) # header=7 because the header row is row 8 in Excel (0-indexed row 7)
183
225
  materials = []
184
226
 
185
- # Only process rows 4-15 (Excel), which are 0-indexed 0-11 in pandas
186
- for _, row in mat_df.iloc[0:12].iterrows():
187
- # Check if the row is blank (columns 2-22, which are indices 1-21)
188
- if row.iloc[1:22].isna().all():
189
- continue
227
+ def _num(x):
228
+ v = pd.to_numeric(x, errors="coerce")
229
+ return float(v) if pd.notna(v) else 0.0
230
+
231
+ # Read materials row by row until we encounter an empty material name (Column B)
232
+ # Data starts at Excel row 9 (0-indexed row 0 after header=7)
233
+ for i in range(len(mat_df)):
234
+ row = mat_df.iloc[i]
235
+
236
+ # Check if material name (Column B) is empty - stop reading if empty
237
+ material_name = row.get('name', '')
238
+ if pd.isna(material_name) or str(material_name).strip() == '':
239
+ break # Stop reading when we encounter an empty material name
240
+
241
+ # For seep workflows, 'g' (unit weight) and shear strength properties are not required.
242
+ # A material row is considered "missing" only if Excel columns C:X are empty.
243
+ # (Excel A:B are number and name; C:X contain the actual property fields.)
244
+ start_col = 2 # C
245
+ end_col = min(mat_df.shape[1], 24) # X is column 24 (1-based) -> index 23, so slice end is 24
246
+ c_to_x_empty = True if start_col >= end_col else row.iloc[start_col:end_col].isna().all()
247
+ if c_to_x_empty:
248
+ # Excel row number: header is on row 8, first data row is row 9
249
+ excel_row = i + 9
250
+ raise ValueError(
251
+ "CRITICAL ERROR: Material row has empty property fields. "
252
+ f"Material '{material_name}' (Excel row {excel_row}) is blank in columns C:X."
253
+ )
254
+
190
255
  materials.append({
191
- "name": row.get('name', ''),
192
- "gamma": float(row.get('g', 0) or 0),
256
+ "name": str(material_name).strip(),
257
+ "gamma": _num(row.get("g", 0)),
193
258
  "option": str(row.get('option', '')).strip().lower(),
194
- "c": float(row.get('c', 0) or 0),
195
- "phi": float(row.get('f', 0) or 0),
196
- "cp": float(row.get('cp', 0) or 0),
197
- "r_elev": float(row.get('r-elev', 0) or 0),
198
- "d": float(row.get('d', 0)) if pd.notna(row.get('d')) else 0,
199
- "psi": float(row.get('ψ', 0)) if pd.notna(row.get('ψ')) else 0,
259
+ "c": _num(row.get('c', 0)),
260
+ "phi": _num(row.get('f', 0)),
261
+ "cp": _num(row.get('cp', 0)),
262
+ "r_elev": _num(row.get('r-elev', 0)),
263
+ "d": _num(row.get('d', 0)) if pd.notna(row.get('d')) else 0,
264
+ "psi": _num(row.get('ψ', 0)) if pd.notna(row.get('ψ')) else 0,
200
265
  "u": str(row.get('u', 'none')).strip().lower(),
201
- "sigma_gamma": float(row.get('s(g)', 0) or 0),
202
- "sigma_c": float(row.get('s(c)', 0) or 0),
203
- "sigma_phi": float(row.get('s(f)', 0) or 0),
204
- "sigma_cp": float(row.get('s(cp)', 0) or 0),
205
- "sigma_d": float(row.get('s(d)', 0) or 0),
206
- "sigma_psi": float(row.get('s(ψ)', 0) or 0),
207
- "k1": float(row.get('k1', 0) or 0),
208
- "k2": float(row.get('k2', 0) or 0),
209
- "alpha": float(row.get('alpha', 0) or 0),
210
- "kr0" : float(row.get('kr0', 0) or 0),
211
- "h0" : float(row.get('h0', 0) or 0),
212
- "E": float(row.get('E', 0) or 0),
213
- "nu": float(row.get('n', 0) or 0)
266
+ "sigma_gamma": _num(row.get('s(g)', 0)),
267
+ "sigma_c": _num(row.get('s(c)', 0)),
268
+ "sigma_phi": _num(row.get('s(f)', 0)),
269
+ "sigma_cp": _num(row.get('s(cp)', 0)),
270
+ "sigma_d": _num(row.get('s(d)', 0)),
271
+ "sigma_psi": _num(row.get('s(ψ)', 0)),
272
+ "k1": _num(row.get('k1', 0)),
273
+ "k2": _num(row.get('k2', 0)),
274
+ "alpha": _num(row.get('alpha', 0)),
275
+ "kr0" : _num(row.get('kr0', 0)),
276
+ "h0" : _num(row.get('h0', 0)),
277
+ "E": _num(row.get('E', 0)),
278
+ "nu": _num(row.get('n', 0))
214
279
  })
215
280
 
216
281
  # === SEEPAGE ANALYSIS FILES ===
217
- # Check if any materials use seepage analysis for pore pressure
282
+ # Check if any materials use seep analysis for pore pressure
218
283
  has_seep_materials = any(material["u"] == "seep" for material in materials)
219
284
 
220
285
  seep_mesh = None
@@ -222,111 +287,213 @@ def load_slope_data(filepath):
222
287
  seep_u2 = None
223
288
 
224
289
  if has_seep_materials:
225
- # Read seepage file names directly from Excel cells L19, L20, L21
226
290
  try:
227
- # Read the 'mat' sheet directly without header parsing
228
- mat_raw_df = xls.parse('mat', header=None)
229
-
230
- # L19 = row 18, column 11 (0-indexed)
231
- mesh_filename = str(mat_raw_df.iloc[18, 11]).strip() # L19
232
- solution1_filename = str(mat_raw_df.iloc[19, 11]).strip() # L20
233
- solution2_filename = str(mat_raw_df.iloc[20, 11]).strip() # L21
234
-
235
- # Validate required files
236
- if not mesh_filename or mesh_filename.lower() == 'nan':
237
- raise ValueError("CRITICAL ERROR: Mesh filename is required when using 'seep' pore pressure option but is blank in cell L19.")
238
- if not solution1_filename or solution1_filename.lower() == 'nan':
239
- raise ValueError("CRITICAL ERROR: Solution1 filename is required when using 'seep' pore pressure option but is blank in cell L20.")
240
-
241
- # Load mesh file
291
+ base, _ = os.path.splitext(filepath)
292
+ mesh_filename = f"{base}_mesh.json"
293
+ solution1_filename = f"{base}_seep.csv"
294
+ solution2_filename = f"{base}_seep2.csv"
295
+
296
+ missing_required = []
242
297
  if not os.path.exists(mesh_filename):
243
- raise ValueError(f"CRITICAL ERROR: Mesh file '{mesh_filename}' not found.")
244
- seep_mesh = import_mesh_from_json(mesh_filename)
245
-
246
- # Load solution1 file
298
+ missing_required.append(mesh_filename)
247
299
  if not os.path.exists(solution1_filename):
248
- raise ValueError(f"CRITICAL ERROR: Solution1 file '{solution1_filename}' not found.")
249
- solution1_df = pd.read_csv(solution1_filename)
250
- # Skip the last row which contains the total flowrate comment
251
- solution1_df = solution1_df.iloc[:-1]
252
- seep_u = solution1_df["u"].to_numpy()
253
-
254
- # Load solution2 file if provided
255
- if solution2_filename and solution2_filename.lower() != 'nan':
256
- if not os.path.exists(solution2_filename):
257
- raise ValueError(f"CRITICAL ERROR: Solution2 file '{solution2_filename}' not found.")
258
- solution2_df = pd.read_csv(solution2_filename)
259
- # Skip the last row which contains the total flowrate comment
260
- solution2_df = solution2_df.iloc[:-1]
261
- seep_u2 = solution2_df["u"].to_numpy()
262
-
263
- except Exception as e:
264
- if "CRITICAL ERROR" in str(e):
265
- raise e
300
+ missing_required.append(solution1_filename)
301
+
302
+ if missing_required:
303
+ missing_list = ", ".join(f"'{path}'" for path in missing_required)
304
+ print(
305
+ "WARNING: Seep pore pressure option selected but required seep files "
306
+ f"were not found: {missing_list}. Continuing without seep data."
307
+ )
266
308
  else:
267
- raise ValueError(f"Error reading seepage files: {e}")
309
+ seep_mesh = import_mesh_from_json(mesh_filename)
310
+ solution1_df = pd.read_csv(solution1_filename)
311
+ solution1_df = solution1_df.iloc[:-1]
312
+ seep_u = solution1_df["u"].to_numpy()
313
+
314
+ if os.path.exists(solution2_filename):
315
+ solution2_df = pd.read_csv(solution2_filename)
316
+ solution2_df = solution2_df.iloc[:-1]
317
+ seep_u2 = solution2_df["u"].to_numpy()
318
+
319
+ except Exception as e:
320
+ print(f"WARNING: Error reading seepage files: {e}. Continuing without seep data.")
268
321
 
269
322
  # === PIEZOMETRIC LINE ===
270
- piezo_df = xls.parse('piezo')
323
+ piezo_df = xls.parse('piezo', header=None)
271
324
  piezo_line = []
272
325
  piezo_line2 = []
273
326
 
274
- # Read all data once (rows 4-18)
275
- piezo_data = piezo_df.iloc[2:18].dropna(how='all')
327
+ # Read first piezometric line (columns A:B, starting at row 4, Excel row 4 = index 3)
328
+ # Keep reading until we encounter an empty row
329
+ start_row = 3 # Excel row 4 (0-indexed row 3)
330
+ x_col = 0 # Column A
331
+ y_col = 1 # Column B
276
332
 
277
- if len(piezo_data) >= 2:
278
- # Extract first table (A4:B18) - columns 0 and 1
333
+ row = start_row
334
+ while row < piezo_df.shape[0]:
279
335
  try:
280
- piezo_data1 = piezo_data.dropna(subset=[piezo_data.columns[0], piezo_data.columns[1]], how='all')
281
- if len(piezo_data1) < 2:
282
- raise ValueError("First piezometric line must contain at least two points.")
283
- piezo_line = piezo_data1.apply(lambda row: (float(row.iloc[0]), float(row.iloc[1])), axis=1).tolist()
284
- except Exception:
285
- raise ValueError("Invalid first piezometric line format.")
286
-
287
- # Extract second table (D4:E18) - columns 3 and 4
336
+ x_val = piezo_df.iloc[row, x_col]
337
+ y_val = piezo_df.iloc[row, y_col]
338
+
339
+ # Stop at first empty row (both x and y are empty)
340
+ if pd.isna(x_val) and pd.isna(y_val):
341
+ break
342
+
343
+ # If at least one coordinate is present, try to convert
344
+ if pd.notna(x_val) and pd.notna(y_val):
345
+ piezo_line.append((float(x_val), float(y_val)))
346
+ except:
347
+ break
348
+ row += 1
349
+
350
+ # Validate first piezometric line
351
+ if len(piezo_line) == 1:
352
+ raise ValueError("First piezometric line must contain at least two points.")
353
+
354
+ # Read second piezometric line (columns D:E, starting at row 4, Excel row 4 = index 3)
355
+ # Keep reading until we encounter an empty row
356
+ x_col2 = 3 # Column D
357
+ y_col2 = 4 # Column E
358
+
359
+ row = start_row
360
+ while row < piezo_df.shape[0]:
288
361
  try:
289
- piezo_data2 = piezo_data.dropna(subset=[piezo_data.columns[3], piezo_data.columns[4]], how='all')
290
- if len(piezo_data2) < 2:
291
- raise ValueError("Second piezometric line must contain at least two points.")
292
- piezo_line2 = piezo_data2.apply(lambda row: (float(row.iloc[3]), float(row.iloc[4])), axis=1).tolist()
293
- except Exception:
294
- # If second table reading fails, just leave piezo_line2 as empty list
295
- piezo_line2 = []
296
- elif len(piezo_data) == 1:
297
- raise ValueError("Piezometric line must contain at least two points.")
362
+ x_val = piezo_df.iloc[row, x_col2]
363
+ y_val = piezo_df.iloc[row, y_col2]
364
+
365
+ # Stop at first empty row (both x and y are empty)
366
+ if pd.isna(x_val) and pd.isna(y_val):
367
+ break
368
+
369
+ # If at least one coordinate is present, try to convert
370
+ if pd.notna(x_val) and pd.notna(y_val):
371
+ piezo_line2.append((float(x_val), float(y_val)))
372
+ except:
373
+ break
374
+ row += 1
375
+
376
+ # Validate second piezometric line (only if it has data)
377
+ if len(piezo_line2) == 1:
378
+ raise ValueError("Second piezometric line must contain at least two points if provided.")
298
379
 
299
380
  # === DISTRIBUTED LOADS ===
381
+ # Read first set from "dloads" tab
300
382
  dload_df = xls.parse('dloads', header=None)
301
383
  dloads = []
384
+
385
+ # Start reading from column B (index 1), each distributed load uses 3 columns (X, Y, Normal)
386
+ # Keep reading to the right until we encounter an empty distributed load
387
+ start_row = 3 # Excel row 4 (0-indexed row 3)
388
+ col = 1 # Start with column B (index 1)
389
+
390
+ while col < dload_df.shape[1]:
391
+ x_col = col
392
+ y_col = col + 1
393
+ normal_col = col + 2
394
+
395
+ # Check if dataframe has enough rows before accessing start_row
396
+ if dload_df.shape[0] <= start_row:
397
+ break # Not enough rows, stop reading
398
+
399
+ # Check if this distributed load block is empty (check first row for X coordinate)
400
+ if pd.isna(dload_df.iloc[start_row, x_col]):
401
+ break # Stop reading when we encounter an empty distributed load
402
+
403
+ # Read points for this distributed load, keep reading down until empty row
404
+ block_points = []
405
+ row = start_row
406
+ while row < dload_df.shape[0]:
407
+ try:
408
+ x_val = dload_df.iloc[row, x_col]
409
+ y_val = dload_df.iloc[row, y_col]
410
+ normal_val = dload_df.iloc[row, normal_col]
411
+
412
+ # Stop at first empty row (all three values are empty)
413
+ if pd.isna(x_val) and pd.isna(y_val) and pd.isna(normal_val):
414
+ break
415
+
416
+ # If at least coordinates are present, try to convert
417
+ if pd.notna(x_val) and pd.notna(y_val):
418
+ normal = float(normal_val) if pd.notna(normal_val) else 0.0
419
+ block_points.append({
420
+ "X": float(x_val),
421
+ "Y": float(y_val),
422
+ "Normal": normal
423
+ })
424
+ except:
425
+ break
426
+ row += 1
427
+
428
+ # Validate that we have at least 2 points
429
+ if len(block_points) == 1:
430
+ raise ValueError(f"Each distributed load must contain at least two points. Distributed load starting at column {chr(65 + col)} has only one point.")
431
+
432
+ if len(block_points) >= 2:
433
+ dloads.append(block_points)
434
+
435
+ # Move to next distributed load (skip 4 columns: 3 for the dload + 1 empty column)
436
+ col += 4
437
+
438
+ # Read second set from "dloads (2)" tab
302
439
  dloads2 = []
303
- dload_data_blocks = [
304
- {"start_row": 3, "end_row": 13},
305
- {"start_row": 16, "end_row": 26}
306
- ]
307
- dload_block_starts = [1, 5, 9, 13]
308
-
309
- for block_idx, block in enumerate(dload_data_blocks):
310
- for col in dload_block_starts:
311
- section = dload_df.iloc[block["start_row"]:block["end_row"], col:col + 3]
312
- section = section.dropna(how='all')
313
- section = section.dropna(subset=[col, col + 1], how='any')
314
- if len(section) >= 2:
440
+ try:
441
+ dload_df2 = xls.parse('dloads (2)', header=None)
442
+
443
+ # Start reading from column B (index 1), each distributed load uses 3 columns (X, Y, Normal)
444
+ # Keep reading to the right until we encounter an empty distributed load
445
+ col = 1 # Start with column B (index 1)
446
+
447
+ while col < dload_df2.shape[1]:
448
+ x_col = col
449
+ y_col = col + 1
450
+ normal_col = col + 2
451
+
452
+ # Check if dataframe has enough rows before accessing start_row
453
+ if dload_df2.shape[0] <= start_row:
454
+ break # Not enough rows, stop reading
455
+
456
+ # Check if this distributed load block is empty (check first row for X coordinate)
457
+ if pd.isna(dload_df2.iloc[start_row, x_col]):
458
+ break # Stop reading when we encounter an empty distributed load
459
+
460
+ # Read points for this distributed load, keep reading down until empty row
461
+ block_points = []
462
+ row = start_row
463
+ while row < dload_df2.shape[0]:
315
464
  try:
316
- block_points = section.apply(
317
- lambda row: {
318
- "X": float(row.iloc[0]),
319
- "Y": float(row.iloc[1]),
320
- "Normal": float(row.iloc[2])
321
- }, axis=1).tolist()
322
- if block_idx == 0:
323
- dloads.append(block_points)
324
- else:
325
- dloads2.append(block_points)
465
+ x_val = dload_df2.iloc[row, x_col]
466
+ y_val = dload_df2.iloc[row, y_col]
467
+ normal_val = dload_df2.iloc[row, normal_col]
468
+
469
+ # Stop at first empty row (all three values are empty)
470
+ if pd.isna(x_val) and pd.isna(y_val) and pd.isna(normal_val):
471
+ break
472
+
473
+ # If at least coordinates are present, try to convert
474
+ if pd.notna(x_val) and pd.notna(y_val):
475
+ normal = float(normal_val) if pd.notna(normal_val) else 0.0
476
+ block_points.append({
477
+ "X": float(x_val),
478
+ "Y": float(y_val),
479
+ "Normal": normal
480
+ })
326
481
  except:
327
- raise ValueError("Invalid data format in distributed load block.")
328
- elif len(section) == 1:
329
- raise ValueError("Each distributed load block must contain at least two points.")
482
+ break
483
+ row += 1
484
+
485
+ # Validate that we have at least 2 points
486
+ if len(block_points) == 1:
487
+ raise ValueError(f"Each distributed load must contain at least two points. Distributed load starting at column {chr(65 + col)} has only one point.")
488
+
489
+ if len(block_points) >= 2:
490
+ dloads2.append(block_points)
491
+
492
+ # Move to next distributed load (skip 4 columns: 3 for the dload + 1 empty column)
493
+ col += 4
494
+ except (ValueError, KeyError):
495
+ # If "dloads (2)" tab doesn't exist, just leave dloads2 as empty list
496
+ pass
330
497
 
331
498
  # === CIRCLES ===
332
499
 
@@ -376,11 +543,16 @@ def load_slope_data(filepath):
376
543
  reinforce_df = xls.parse('reinforce', header=1) # Header in row 2 (0-indexed row 1)
377
544
  reinforce_lines = []
378
545
 
379
- # Process rows 3-22 (Excel) which are 0-indexed rows 0-19 in pandas after header=1
380
- for i, row in reinforce_df.iloc[0:20].iterrows():
381
- # Check if the row has coordinate data (x1, y1, x2, y2)
382
- if pd.isna(row.iloc[1]) or pd.isna(row.iloc[2]) or pd.isna(row.iloc[3]) or pd.isna(row.iloc[4]):
383
- continue # Skip empty rows
546
+ # Process rows starting from row 3 (Excel) which is 0-indexed row 0 in pandas after header=1
547
+ # Keep reading until we encounter an empty value in column B
548
+ for i, row in reinforce_df.iterrows():
549
+ # Check if column B (x1 coordinate) is empty - stop reading if empty
550
+ if pd.isna(row.iloc[1]):
551
+ break # Stop reading when column B is empty
552
+
553
+ # Check if other required coordinates are present
554
+ if pd.isna(row.iloc[2]) or pd.isna(row.iloc[3]) or pd.isna(row.iloc[4]):
555
+ continue # Skip rows with incomplete coordinate data
384
556
 
385
557
  # If coordinates are present, check for required parameters (Tmax, Lp1, Lp2)
386
558
  if pd.isna(row.iloc[5]) or pd.isna(row.iloc[7]) or pd.isna(row.iloc[8]):
@@ -508,76 +680,160 @@ def load_slope_data(filepath):
508
680
 
509
681
 
510
682
  # === SEEPAGE ANALYSIS BOUNDARY CONDITIONS ===
683
+ # Read first set from "seep bc" sheet
511
684
  seep_df = xls.parse('seep bc', header=None)
512
685
  seepage_bc = {"specified_heads": [], "exit_face": []}
513
-
514
- # Specified Head #1
515
- head1 = seep_df.iloc[2, 2] if seep_df.shape[1] > 2 and seep_df.shape[0] > 2 else None
516
- coords1 = []
517
- for i in range(4, 12): # rows 5-12 (0-indexed 4-11)
518
- if i >= seep_df.shape[0]:
519
- break
520
- x = seep_df.iloc[i, 1] if seep_df.shape[1] > 1 else None
521
- y = seep_df.iloc[i, 2] if seep_df.shape[1] > 2 else None
522
- if pd.notna(x) and pd.notna(y):
523
- coords1.append((float(x), float(y)))
524
- if head1 is not None and coords1:
525
- seepage_bc["specified_heads"].append({"head": float(head1), "coords": coords1})
526
-
527
- # Specified Head #2
528
- head2 = seep_df.iloc[2, 5] if seep_df.shape[1] > 5 and seep_df.shape[0] > 2 else None
529
- coords2 = []
530
- for i in range(4, 12):
531
- if i >= seep_df.shape[0]:
532
- break
533
- x = seep_df.iloc[i, 4] if seep_df.shape[1] > 4 else None
534
- y = seep_df.iloc[i, 5] if seep_df.shape[1] > 5 else None
535
- if pd.notna(x) and pd.notna(y):
536
- coords2.append((float(x), float(y)))
537
- if head2 is not None and coords2:
538
- seepage_bc["specified_heads"].append({"head": float(head2), "coords": coords2})
539
-
540
- # Specified Head #3
541
- head3 = seep_df.iloc[2, 8] if seep_df.shape[1] > 8 and seep_df.shape[0] > 2 else None
542
- coords3 = []
543
- for i in range(4, 12):
544
- if i >= seep_df.shape[0]:
545
- break
546
- x = seep_df.iloc[i, 7] if seep_df.shape[1] > 7 else None
547
- y = seep_df.iloc[i, 8] if seep_df.shape[1] > 8 else None
548
- if pd.notna(x) and pd.notna(y):
549
- coords3.append((float(x), float(y)))
550
- if head3 is not None and coords3:
551
- seepage_bc["specified_heads"].append({"head": float(head3), "coords": coords3})
552
-
553
- # Exit Face
686
+
687
+ # Exit Face BC: starts at B5 (row 4, columns 1 and 2), continues down until empty x value
554
688
  exit_coords = []
555
- for i in range(15, 23): # rows 16-23 (0-indexed 15-22)
556
- if i >= seep_df.shape[0]:
689
+ exit_start_row = 4 # Excel row 5 (0-indexed row 4)
690
+ exit_x_col = 1 # Column B
691
+ exit_y_col = 2 # Column C
692
+
693
+ row = exit_start_row
694
+ while row < seep_df.shape[0]:
695
+ try:
696
+ x_val = seep_df.iloc[row, exit_x_col]
697
+ y_val = seep_df.iloc[row, exit_y_col]
698
+
699
+ # Stop at first empty x value
700
+ if pd.isna(x_val):
701
+ break
702
+
703
+ # If x is present, try to convert (y can be empty but we'll still add the point)
704
+ if pd.notna(x_val) and pd.notna(y_val):
705
+ exit_coords.append((float(x_val), float(y_val)))
706
+ except:
557
707
  break
558
- x = seep_df.iloc[i, 1] if seep_df.shape[1] > 1 else None
559
- y = seep_df.iloc[i, 2] if seep_df.shape[1] > 2 else None
560
- if pd.notna(x) and pd.notna(y):
561
- exit_coords.append((float(x), float(y)))
708
+ row += 1
562
709
  seepage_bc["exit_face"] = exit_coords
710
+
711
+ # Specified Head BCs: start at columns E:F, then H:I, etc.
712
+ # Head value is in row 3 (index 2), XY values start at row 5 (index 4)
713
+ # Keep reading to the right until head value in row 3 is empty
714
+ head_row = 2 # Excel row 3 (0-indexed row 2)
715
+ data_start_row = 4 # Excel row 5 (0-indexed row 4)
716
+ col = 4 # Start with column E (index 4)
717
+
718
+ while col < seep_df.shape[1]:
719
+ x_col = col
720
+ y_col = col + 1
721
+ head_col = col + 1 # Head value is in the Y column (F, I, L, etc.)
722
+
723
+ # Check if head value in row 3 is empty - stop reading if empty
724
+ if seep_df.shape[0] <= head_row:
725
+ break
726
+ head_val = seep_df.iloc[head_row, head_col]
727
+ if pd.isna(head_val):
728
+ break # Stop reading when head value is empty
729
+
730
+ # Read XY coordinates starting from row 5, continue down until empty
731
+ coords = []
732
+ row = data_start_row
733
+ while row < seep_df.shape[0]:
734
+ try:
735
+ x_val = seep_df.iloc[row, x_col]
736
+ y_val = seep_df.iloc[row, y_col]
737
+
738
+ # Stop at first empty x value
739
+ if pd.isna(x_val):
740
+ break
741
+
742
+ # If x is present, try to convert
743
+ if pd.notna(x_val) and pd.notna(y_val):
744
+ coords.append((float(x_val), float(y_val)))
745
+ except:
746
+ break
747
+ row += 1
748
+
749
+ if coords: # Only add if we have coordinates
750
+ seepage_bc["specified_heads"].append({"head": float(head_val), "coords": coords})
751
+
752
+ # Move to next specified head BC (skip 3 columns: E->H, H->K, etc.)
753
+ col += 3
754
+
755
+ # Read second set from "seep bc (2)" sheet
756
+ seepage_bc2 = {"specified_heads": [], "exit_face": []}
757
+ try:
758
+ seep_df2 = xls.parse('seep bc (2)', header=None)
759
+
760
+ # Exit Face BC: starts at B5 (row 4, columns 1 and 2), continues down until empty x value
761
+ exit_coords2 = []
762
+ row = exit_start_row
763
+ while row < seep_df2.shape[0]:
764
+ try:
765
+ x_val = seep_df2.iloc[row, exit_x_col]
766
+ y_val = seep_df2.iloc[row, exit_y_col]
767
+
768
+ # Stop at first empty x value
769
+ if pd.isna(x_val):
770
+ break
771
+
772
+ # If x is present, try to convert
773
+ if pd.notna(x_val) and pd.notna(y_val):
774
+ exit_coords2.append((float(x_val), float(y_val)))
775
+ except:
776
+ break
777
+ row += 1
778
+ seepage_bc2["exit_face"] = exit_coords2
779
+
780
+ # Specified Head BCs: same structure as first sheet
781
+ col = 4 # Start with column E (index 4)
782
+ while col < seep_df2.shape[1]:
783
+ x_col = col
784
+ y_col = col + 1
785
+ head_col = col + 1 # Head value is in the Y column
786
+
787
+ # Check if head value in row 3 is empty - stop reading if empty
788
+ if seep_df2.shape[0] <= head_row:
789
+ break
790
+ head_val = seep_df2.iloc[head_row, head_col]
791
+ if pd.isna(head_val):
792
+ break # Stop reading when head value is empty
793
+
794
+ # Read XY coordinates starting from row 5, continue down until empty
795
+ coords = []
796
+ row = data_start_row
797
+ while row < seep_df2.shape[0]:
798
+ try:
799
+ x_val = seep_df2.iloc[row, x_col]
800
+ y_val = seep_df2.iloc[row, y_col]
801
+
802
+ # Stop at first empty x value
803
+ if pd.isna(x_val):
804
+ break
805
+
806
+ # If x is present, try to convert
807
+ if pd.notna(x_val) and pd.notna(y_val):
808
+ coords.append((float(x_val), float(y_val)))
809
+ except:
810
+ break
811
+ row += 1
812
+
813
+ if coords: # Only add if we have coordinates
814
+ seepage_bc2["specified_heads"].append({"head": float(head_val), "coords": coords})
815
+
816
+ # Move to next specified head BC (skip 3 columns: E->H, H->K, etc.)
817
+ col += 3
818
+ except (ValueError, KeyError):
819
+ # If "seep bc (2)" sheet doesn't exist, just leave seepage_bc2 as empty
820
+ pass
563
821
 
564
822
  # === VALIDATION ===
565
823
 
566
824
  circular = len(circles) > 0
567
- # Check if this is a seepage-only analysis (has seepage BCs but no slope stability surfaces)
825
+ # Check if this is a seep-only analysis (has seep BCs but no slope stability surfaces)
568
826
  has_seepage_bc = (len(seepage_bc.get("specified_heads", [])) > 0 or
569
827
  len(seepage_bc.get("exit_face", [])) > 0)
570
828
  is_seepage_only = has_seepage_bc and not circular and len(non_circ) == 0
571
829
 
572
- # Only require circular/non-circular data if this is NOT a seepage-only analysis
830
+ # Only require circular/non-circular data if this is NOT a seep-only analysis
573
831
  if not is_seepage_only and not circular and len(non_circ) == 0:
574
832
  raise ValueError("Input must include either circular or non-circular surface data.")
575
833
  if not profile_lines:
576
834
  raise ValueError("Profile lines sheet is empty or invalid.")
577
835
  if not materials:
578
836
  raise ValueError("Materials sheet is empty.")
579
- if len(materials) != len(profile_lines):
580
- raise ValueError("Each profile line must have a corresponding material. You have " + str(len(materials)) + " materials and " + str(len(profile_lines)) + " profile lines.")
581
837
 
582
838
 
583
839
  # Add everything to globals_data
@@ -600,8 +856,9 @@ def load_slope_data(filepath):
600
856
  globals_data["dloads2"] = dloads2
601
857
  globals_data["reinforce_lines"] = reinforce_lines
602
858
  globals_data["seepage_bc"] = seepage_bc
859
+ globals_data["seepage_bc2"] = seepage_bc2
603
860
 
604
- # Add seepage data if available
861
+ # Add seep data if available
605
862
  if has_seep_materials:
606
863
  globals_data["seep_mesh"] = seep_mesh
607
864
  globals_data["seep_u"] = seep_u