xslope 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xslope/fileio.py CHANGED
@@ -29,8 +29,8 @@ def build_ground_surface(profile_lines):
29
29
  which represents the true ground surface.
30
30
 
31
31
  Parameters:
32
- profile_lines (list of list of tuple): A list of profile lines, each represented
33
- as a list of (x, y) coordinate tuples.
32
+ profile_lines (list of dict): A list of profile lines, each represented
33
+ as a dict with 'coords' key containing a list of (x, y) coordinate tuples.
34
34
 
35
35
  Returns:
36
36
  shapely.geometry.LineString: A LineString of the top surface, or an empty LineString
@@ -40,9 +40,12 @@ def build_ground_surface(profile_lines):
40
40
  if not profile_lines:
41
41
  return LineString([])
42
42
 
43
+ # Extract coordinate lists from profile line dicts
44
+ coord_lists = [line['coords'] for line in profile_lines]
45
+
43
46
  # Step 1: Gather all points from all profile lines
44
47
  all_points = []
45
- for line in profile_lines:
48
+ for line in coord_lists:
46
49
  all_points.extend(line)
47
50
 
48
51
  # Step 2: Group points by x-coordinate and find the highest y for each x
@@ -61,7 +64,7 @@ def build_ground_surface(profile_lines):
61
64
 
62
65
  # Check intersections with all profile lines
63
66
  is_topmost = True
64
- for profile_line in profile_lines:
67
+ for profile_line in coord_lists:
65
68
  line = LineString(profile_line)
66
69
  if line.length == 0:
67
70
  continue
@@ -125,10 +128,10 @@ def load_slope_data(filepath):
125
128
 
126
129
  try:
127
130
  template_version = main_df.iloc[4, 3] # Excel row 5, column D
128
- gamma_water = float(main_df.iloc[18, 3]) # Excel row 19, column D
129
- tcrack_depth = float(main_df.iloc[19, 3]) # Excel row 20, column D
130
- tcrack_water = float(main_df.iloc[20, 3]) # Excel row 21, column D
131
- k_seismic = float(main_df.iloc[21, 3]) # Excel row 22, column D
131
+ gamma_water = float(main_df.iloc[7, 3]) # Excel row 8, column D
132
+ tcrack_depth = float(main_df.iloc[8, 3]) # Excel row 9, column D
133
+ tcrack_water = float(main_df.iloc[9, 3]) # Excel row 10, column D
134
+ k_seismic = float(main_df.iloc[10, 3]) # Excel row 11, column D
132
135
  except Exception as e:
133
136
  raise ValueError(f"Error reading static global values from 'main' tab: {e}")
134
137
 
@@ -139,38 +142,76 @@ def load_slope_data(filepath):
139
142
  max_depth = float(profile_df.iloc[1, 1]) # Excel B2 = row 1, column 1
140
143
 
141
144
  profile_lines = []
142
-
143
- profile_data_blocks = [
144
- {"header_row": 4, "data_start": 5, "data_end": 20},
145
- {"header_row": 22, "data_start": 23, "data_end": 38},
146
- {"header_row": 40, "data_start": 41, "data_end": 56},
147
- ]
148
- profile_block_width = 3
149
-
150
- for block in profile_data_blocks:
151
- for col in range(0, profile_df.shape[1], profile_block_width):
152
- x_col, y_col = col, col + 1
145
+
146
+ # New format: single data block, profile lines arranged horizontally
147
+ # First profile line: columns A:B, second: D:E, third: G:H, etc.
148
+ # Header row is row 4 (index 3), mat_id is in B5 (row 4, column 1)
149
+ # XY coordinates start in row 7 (index 6)
150
+ header_row = 3 # Excel row 4 (0-indexed)
151
+ mat_id_row = 4 # Excel row 5 (0-indexed)
152
+ coords_start_row = 7 # Excel row 8 (0-indexed)
153
+
154
+ col = 0 # Start with column A (index 0)
155
+ while col < profile_df.shape[1]:
156
+ x_col = col
157
+ y_col = col + 1
158
+
159
+ # Check if header row is empty (stop reading if empty)
160
+ try:
161
+ header_val = str(profile_df.iloc[header_row, x_col]).strip()
162
+ if not header_val or header_val.lower() == 'nan':
163
+ break # No more profile lines
164
+ except:
165
+ break # No more profile lines
166
+
167
+ # Read mat_id from B5 (row 4, column 1) for this profile line
168
+ # Convert from 1-based to 0-based for internal use
169
+ try:
170
+ mat_id_val = profile_df.iloc[mat_id_row, y_col]
171
+ if pd.isna(mat_id_val):
172
+ mat_id = None
173
+ else:
174
+ # Convert to integer and subtract 1 to make it 0-based
175
+ mat_id = int(float(mat_id_val)) - 1
176
+ if mat_id < 0:
177
+ mat_id = None # Invalid mat_id
178
+ except (ValueError, TypeError):
179
+ mat_id = None
180
+
181
+ # Read XY coordinates starting from row 7, stop at first empty row
182
+ coords = []
183
+ row = coords_start_row
184
+ while row < profile_df.shape[0]:
153
185
  try:
154
- x_header = str(profile_df.iloc[block["header_row"], x_col]).strip().lower()
155
- y_header = str(profile_df.iloc[block["header_row"], y_col]).strip().lower()
186
+ x_val = profile_df.iloc[row, x_col]
187
+ y_val = profile_df.iloc[row, y_col]
188
+
189
+ # Stop at first empty row (both x and y are empty)
190
+ if pd.isna(x_val) and pd.isna(y_val):
191
+ break
192
+
193
+ # If at least one coordinate is present, try to convert
194
+ if pd.notna(x_val) and pd.notna(y_val):
195
+ coords.append((float(x_val), float(y_val)))
156
196
  except:
157
- continue
158
- if x_header != 'x' or y_header != 'y':
159
- continue
160
- data = profile_df.iloc[block["data_start"]:block["data_end"], [x_col, y_col]]
161
- data = data.dropna(how='all')
162
- if data.empty:
163
- continue
164
- if data.iloc[0].isna().any():
165
- continue
166
- coords = data.dropna().apply(lambda r: (float(r.iloc[0]), float(r.iloc[1])), axis=1).tolist()
167
- if len(coords) == 1:
168
- raise ValueError("Each profile line must contain at least two points.")
169
- if coords:
170
- profile_lines.append(coords)
197
+ break
198
+ row += 1
199
+
200
+ # Validate that we have at least 2 points
201
+ if len(coords) == 1:
202
+ raise ValueError(f"Each profile line must contain at least two points. Profile line starting at column {chr(65 + col)} has only one point.")
203
+
204
+ if len(coords) >= 2:
205
+ # Store as dict with coords and mat_id
206
+ profile_lines.append({
207
+ 'coords': coords,
208
+ 'mat_id': mat_id
209
+ })
210
+
211
+ # Move to next profile line (skip 3 columns: A->D, D->G, etc.)
212
+ col += 3
171
213
 
172
214
  # === BUILD GROUND SURFACE FROM PROFILE LINES ===
173
-
174
215
  ground_surface = build_ground_surface(profile_lines)
175
216
 
176
217
  # === BUILD TENSILE CRACK LINE ===
@@ -180,43 +221,39 @@ def load_slope_data(filepath):
180
221
  tcrack_surface = LineString([(x, y - tcrack_depth) for (x, y) in ground_surface.coords])
181
222
 
182
223
  # === MATERIALS (Optimized Parsing) ===
183
- mat_df = xls.parse('mat', header=2) # header=2 because the header row is row 3 in Excel
224
+ mat_df = xls.parse('mat', header=7) # header=7 because the header row is row 8 in Excel (0-indexed row 7)
184
225
  materials = []
185
226
 
186
- required_materials = len(profile_lines)
187
-
188
227
  def _num(x):
189
228
  v = pd.to_numeric(x, errors="coerce")
190
229
  return float(v) if pd.notna(v) else 0.0
191
230
 
192
- # Read exactly one material row per profile line.
193
- # Materials are positional: Excel row 4 corresponds to profile line 1, row 5 to line 2, etc.
194
- for i in range(required_materials):
195
- # Excel row number: header is on row 3, first data row is row 4
196
- excel_row = i + 4
197
-
198
- if i >= len(mat_df):
199
- raise ValueError(
200
- "CRITICAL ERROR: Materials table ended early. "
201
- f"Expected {required_materials} materials for {required_materials} profile lines, "
202
- f"but ran out of rows at Excel row {excel_row}."
203
- )
204
-
231
+ # Read materials row by row until we encounter an empty material name (Column B)
232
+ # Data starts at Excel row 9 (0-indexed row 0 after header=7)
233
+ for i in range(len(mat_df)):
205
234
  row = mat_df.iloc[i]
206
- # For seepage workflows, 'g' (unit weight) and shear strength properties are not required.
235
+
236
+ # Check if material name (Column B) is empty - stop reading if empty
237
+ material_name = row.get('name', '')
238
+ if pd.isna(material_name) or str(material_name).strip() == '':
239
+ break # Stop reading when we encounter an empty material name
240
+
241
+ # For seep workflows, 'g' (unit weight) and shear strength properties are not required.
207
242
  # A material row is considered "missing" only if Excel columns C:X are empty.
208
243
  # (Excel A:B are number and name; C:X contain the actual property fields.)
209
244
  start_col = 2 # C
210
245
  end_col = min(mat_df.shape[1], 24) # X is column 24 (1-based) -> index 23, so slice end is 24
211
246
  c_to_x_empty = True if start_col >= end_col else row.iloc[start_col:end_col].isna().all()
212
247
  if c_to_x_empty:
248
+ # Excel row number: header is on row 8, first data row is row 9
249
+ excel_row = i + 9
213
250
  raise ValueError(
214
- "CRITICAL ERROR: Missing material row for a profile line. "
215
- f"Material {i+1} of {required_materials} is blank in columns C:X (Excel row {excel_row})."
251
+ "CRITICAL ERROR: Material row has empty property fields. "
252
+ f"Material '{material_name}' (Excel row {excel_row}) is blank in columns C:X."
216
253
  )
217
254
 
218
255
  materials.append({
219
- "name": row.get('name', ''),
256
+ "name": str(material_name).strip(),
220
257
  "gamma": _num(row.get("g", 0)),
221
258
  "option": str(row.get('option', '')).strip().lower(),
222
259
  "c": _num(row.get('c', 0)),
@@ -242,7 +279,7 @@ def load_slope_data(filepath):
242
279
  })
243
280
 
244
281
  # === SEEPAGE ANALYSIS FILES ===
245
- # Check if any materials use seepage analysis for pore pressure
282
+ # Check if any materials use seep analysis for pore pressure
246
283
  has_seep_materials = any(material["u"] == "seep" for material in materials)
247
284
 
248
285
  seep_mesh = None
@@ -250,111 +287,213 @@ def load_slope_data(filepath):
250
287
  seep_u2 = None
251
288
 
252
289
  if has_seep_materials:
253
- # Read seepage file names directly from Excel cells L22, L23, L24
254
290
  try:
255
- # Read the 'mat' sheet directly without header parsing
256
- mat_raw_df = xls.parse('mat', header=None)
257
-
258
- # L22 = row 21, column 11 (0-indexed)
259
- mesh_filename = str(mat_raw_df.iloc[21, 11]).strip() # L22
260
- solution1_filename = str(mat_raw_df.iloc[22, 11]).strip() # L23
261
- solution2_filename = str(mat_raw_df.iloc[23, 11]).strip() # L24
262
-
263
- # Validate required files
264
- if not mesh_filename or mesh_filename.lower() == 'nan':
265
- raise ValueError("CRITICAL ERROR: Mesh filename is required when using 'seep' pore pressure option but is blank in cell L22.")
266
- if not solution1_filename or solution1_filename.lower() == 'nan':
267
- raise ValueError("CRITICAL ERROR: Solution1 filename is required when using 'seep' pore pressure option but is blank in cell L23.")
268
-
269
- # Load mesh file
291
+ base, _ = os.path.splitext(filepath)
292
+ mesh_filename = f"{base}_mesh.json"
293
+ solution1_filename = f"{base}_seep.csv"
294
+ solution2_filename = f"{base}_seep2.csv"
295
+
296
+ missing_required = []
270
297
  if not os.path.exists(mesh_filename):
271
- raise ValueError(f"CRITICAL ERROR: Mesh file '{mesh_filename}' not found.")
272
- seep_mesh = import_mesh_from_json(mesh_filename)
273
-
274
- # Load solution1 file
298
+ missing_required.append(mesh_filename)
275
299
  if not os.path.exists(solution1_filename):
276
- raise ValueError(f"CRITICAL ERROR: Solution1 file '{solution1_filename}' not found.")
277
- solution1_df = pd.read_csv(solution1_filename)
278
- # Skip the last row which contains the total flowrate comment
279
- solution1_df = solution1_df.iloc[:-1]
280
- seep_u = solution1_df["u"].to_numpy()
281
-
282
- # Load solution2 file if provided
283
- if solution2_filename and solution2_filename.lower() != 'nan':
284
- if not os.path.exists(solution2_filename):
285
- raise ValueError(f"CRITICAL ERROR: Solution2 file '{solution2_filename}' not found.")
286
- solution2_df = pd.read_csv(solution2_filename)
287
- # Skip the last row which contains the total flowrate comment
288
- solution2_df = solution2_df.iloc[:-1]
289
- seep_u2 = solution2_df["u"].to_numpy()
290
-
291
- except Exception as e:
292
- if "CRITICAL ERROR" in str(e):
293
- raise e
300
+ missing_required.append(solution1_filename)
301
+
302
+ if missing_required:
303
+ missing_list = ", ".join(f"'{path}'" for path in missing_required)
304
+ print(
305
+ "WARNING: Seep pore pressure option selected but required seep files "
306
+ f"were not found: {missing_list}. Continuing without seep data."
307
+ )
294
308
  else:
295
- raise ValueError(f"Error reading seepage files: {e}")
309
+ seep_mesh = import_mesh_from_json(mesh_filename)
310
+ solution1_df = pd.read_csv(solution1_filename)
311
+ solution1_df = solution1_df.iloc[:-1]
312
+ seep_u = solution1_df["u"].to_numpy()
313
+
314
+ if os.path.exists(solution2_filename):
315
+ solution2_df = pd.read_csv(solution2_filename)
316
+ solution2_df = solution2_df.iloc[:-1]
317
+ seep_u2 = solution2_df["u"].to_numpy()
318
+
319
+ except Exception as e:
320
+ print(f"WARNING: Error reading seepage files: {e}. Continuing without seep data.")
296
321
 
297
322
  # === PIEZOMETRIC LINE ===
298
- piezo_df = xls.parse('piezo')
323
+ piezo_df = xls.parse('piezo', header=None)
299
324
  piezo_line = []
300
325
  piezo_line2 = []
301
326
 
302
- # Read all data once (rows 4-18)
303
- piezo_data = piezo_df.iloc[2:18].dropna(how='all')
327
+ # Read first piezometric line (columns A:B, starting at row 4, Excel row 4 = index 3)
328
+ # Keep reading until we encounter an empty row
329
+ start_row = 3 # Excel row 4 (0-indexed row 3)
330
+ x_col = 0 # Column A
331
+ y_col = 1 # Column B
304
332
 
305
- if len(piezo_data) >= 2:
306
- # Extract first table (A4:B18) - columns 0 and 1
333
+ row = start_row
334
+ while row < piezo_df.shape[0]:
307
335
  try:
308
- piezo_data1 = piezo_data.dropna(subset=[piezo_data.columns[0], piezo_data.columns[1]], how='all')
309
- if len(piezo_data1) < 2:
310
- raise ValueError("First piezometric line must contain at least two points.")
311
- piezo_line = piezo_data1.apply(lambda row: (float(row.iloc[0]), float(row.iloc[1])), axis=1).tolist()
312
- except Exception:
313
- raise ValueError("Invalid first piezometric line format.")
314
-
315
- # Extract second table (D4:E18) - columns 3 and 4
336
+ x_val = piezo_df.iloc[row, x_col]
337
+ y_val = piezo_df.iloc[row, y_col]
338
+
339
+ # Stop at first empty row (both x and y are empty)
340
+ if pd.isna(x_val) and pd.isna(y_val):
341
+ break
342
+
343
+ # If at least one coordinate is present, try to convert
344
+ if pd.notna(x_val) and pd.notna(y_val):
345
+ piezo_line.append((float(x_val), float(y_val)))
346
+ except:
347
+ break
348
+ row += 1
349
+
350
+ # Validate first piezometric line
351
+ if len(piezo_line) == 1:
352
+ raise ValueError("First piezometric line must contain at least two points.")
353
+
354
+ # Read second piezometric line (columns D:E, starting at row 4, Excel row 4 = index 3)
355
+ # Keep reading until we encounter an empty row
356
+ x_col2 = 3 # Column D
357
+ y_col2 = 4 # Column E
358
+
359
+ row = start_row
360
+ while row < piezo_df.shape[0]:
316
361
  try:
317
- piezo_data2 = piezo_data.dropna(subset=[piezo_data.columns[3], piezo_data.columns[4]], how='all')
318
- if len(piezo_data2) < 2:
319
- raise ValueError("Second piezometric line must contain at least two points.")
320
- piezo_line2 = piezo_data2.apply(lambda row: (float(row.iloc[3]), float(row.iloc[4])), axis=1).tolist()
321
- except Exception:
322
- # If second table reading fails, just leave piezo_line2 as empty list
323
- piezo_line2 = []
324
- elif len(piezo_data) == 1:
325
- raise ValueError("Piezometric line must contain at least two points.")
362
+ x_val = piezo_df.iloc[row, x_col2]
363
+ y_val = piezo_df.iloc[row, y_col2]
364
+
365
+ # Stop at first empty row (both x and y are empty)
366
+ if pd.isna(x_val) and pd.isna(y_val):
367
+ break
368
+
369
+ # If at least one coordinate is present, try to convert
370
+ if pd.notna(x_val) and pd.notna(y_val):
371
+ piezo_line2.append((float(x_val), float(y_val)))
372
+ except:
373
+ break
374
+ row += 1
375
+
376
+ # Validate second piezometric line (only if it has data)
377
+ if len(piezo_line2) == 1:
378
+ raise ValueError("Second piezometric line must contain at least two points if provided.")
326
379
 
327
380
  # === DISTRIBUTED LOADS ===
381
+ # Read first set from "dloads" tab
328
382
  dload_df = xls.parse('dloads', header=None)
329
383
  dloads = []
384
+
385
+ # Start reading from column B (index 1), each distributed load uses 3 columns (X, Y, Normal)
386
+ # Keep reading to the right until we encounter an empty distributed load
387
+ start_row = 3 # Excel row 4 (0-indexed row 3)
388
+ col = 1 # Start with column B (index 1)
389
+
390
+ while col < dload_df.shape[1]:
391
+ x_col = col
392
+ y_col = col + 1
393
+ normal_col = col + 2
394
+
395
+ # Check if dataframe has enough rows before accessing start_row
396
+ if dload_df.shape[0] <= start_row:
397
+ break # Not enough rows, stop reading
398
+
399
+ # Check if this distributed load block is empty (check first row for X coordinate)
400
+ if pd.isna(dload_df.iloc[start_row, x_col]):
401
+ break # Stop reading when we encounter an empty distributed load
402
+
403
+ # Read points for this distributed load, keep reading down until empty row
404
+ block_points = []
405
+ row = start_row
406
+ while row < dload_df.shape[0]:
407
+ try:
408
+ x_val = dload_df.iloc[row, x_col]
409
+ y_val = dload_df.iloc[row, y_col]
410
+ normal_val = dload_df.iloc[row, normal_col]
411
+
412
+ # Stop at first empty row (all three values are empty)
413
+ if pd.isna(x_val) and pd.isna(y_val) and pd.isna(normal_val):
414
+ break
415
+
416
+ # If at least coordinates are present, try to convert
417
+ if pd.notna(x_val) and pd.notna(y_val):
418
+ normal = float(normal_val) if pd.notna(normal_val) else 0.0
419
+ block_points.append({
420
+ "X": float(x_val),
421
+ "Y": float(y_val),
422
+ "Normal": normal
423
+ })
424
+ except:
425
+ break
426
+ row += 1
427
+
428
+ # Validate that we have at least 2 points
429
+ if len(block_points) == 1:
430
+ raise ValueError(f"Each distributed load must contain at least two points. Distributed load starting at column {chr(65 + col)} has only one point.")
431
+
432
+ if len(block_points) >= 2:
433
+ dloads.append(block_points)
434
+
435
+ # Move to next distributed load (skip 4 columns: 3 for the dload + 1 empty column)
436
+ col += 4
437
+
438
+ # Read second set from "dloads (2)" tab
330
439
  dloads2 = []
331
- dload_data_blocks = [
332
- {"start_row": 3, "end_row": 13},
333
- {"start_row": 16, "end_row": 26}
334
- ]
335
- dload_block_starts = [1, 5, 9, 13]
336
-
337
- for block_idx, block in enumerate(dload_data_blocks):
338
- for col in dload_block_starts:
339
- section = dload_df.iloc[block["start_row"]:block["end_row"], col:col + 3]
340
- section = section.dropna(how='all')
341
- section = section.dropna(subset=[col, col + 1], how='any')
342
- if len(section) >= 2:
440
+ try:
441
+ dload_df2 = xls.parse('dloads (2)', header=None)
442
+
443
+ # Start reading from column B (index 1), each distributed load uses 3 columns (X, Y, Normal)
444
+ # Keep reading to the right until we encounter an empty distributed load
445
+ col = 1 # Start with column B (index 1)
446
+
447
+ while col < dload_df2.shape[1]:
448
+ x_col = col
449
+ y_col = col + 1
450
+ normal_col = col + 2
451
+
452
+ # Check if dataframe has enough rows before accessing start_row
453
+ if dload_df2.shape[0] <= start_row:
454
+ break # Not enough rows, stop reading
455
+
456
+ # Check if this distributed load block is empty (check first row for X coordinate)
457
+ if pd.isna(dload_df2.iloc[start_row, x_col]):
458
+ break # Stop reading when we encounter an empty distributed load
459
+
460
+ # Read points for this distributed load, keep reading down until empty row
461
+ block_points = []
462
+ row = start_row
463
+ while row < dload_df2.shape[0]:
343
464
  try:
344
- block_points = section.apply(
345
- lambda row: {
346
- "X": float(row.iloc[0]),
347
- "Y": float(row.iloc[1]),
348
- "Normal": float(row.iloc[2])
349
- }, axis=1).tolist()
350
- if block_idx == 0:
351
- dloads.append(block_points)
352
- else:
353
- dloads2.append(block_points)
465
+ x_val = dload_df2.iloc[row, x_col]
466
+ y_val = dload_df2.iloc[row, y_col]
467
+ normal_val = dload_df2.iloc[row, normal_col]
468
+
469
+ # Stop at first empty row (all three values are empty)
470
+ if pd.isna(x_val) and pd.isna(y_val) and pd.isna(normal_val):
471
+ break
472
+
473
+ # If at least coordinates are present, try to convert
474
+ if pd.notna(x_val) and pd.notna(y_val):
475
+ normal = float(normal_val) if pd.notna(normal_val) else 0.0
476
+ block_points.append({
477
+ "X": float(x_val),
478
+ "Y": float(y_val),
479
+ "Normal": normal
480
+ })
354
481
  except:
355
- raise ValueError("Invalid data format in distributed load block.")
356
- elif len(section) == 1:
357
- raise ValueError("Each distributed load block must contain at least two points.")
482
+ break
483
+ row += 1
484
+
485
+ # Validate that we have at least 2 points
486
+ if len(block_points) == 1:
487
+ raise ValueError(f"Each distributed load must contain at least two points. Distributed load starting at column {chr(65 + col)} has only one point.")
488
+
489
+ if len(block_points) >= 2:
490
+ dloads2.append(block_points)
491
+
492
+ # Move to next distributed load (skip 4 columns: 3 for the dload + 1 empty column)
493
+ col += 4
494
+ except (ValueError, KeyError):
495
+ # If "dloads (2)" tab doesn't exist, just leave dloads2 as empty list
496
+ pass
358
497
 
359
498
  # === CIRCLES ===
360
499
 
@@ -404,11 +543,16 @@ def load_slope_data(filepath):
404
543
  reinforce_df = xls.parse('reinforce', header=1) # Header in row 2 (0-indexed row 1)
405
544
  reinforce_lines = []
406
545
 
407
- # Process rows 3-22 (Excel) which are 0-indexed rows 0-19 in pandas after header=1
408
- for i, row in reinforce_df.iloc[0:20].iterrows():
409
- # Check if the row has coordinate data (x1, y1, x2, y2)
410
- if pd.isna(row.iloc[1]) or pd.isna(row.iloc[2]) or pd.isna(row.iloc[3]) or pd.isna(row.iloc[4]):
411
- continue # Skip empty rows
546
+ # Process rows starting from row 3 (Excel) which is 0-indexed row 0 in pandas after header=1
547
+ # Keep reading until we encounter an empty value in column B
548
+ for i, row in reinforce_df.iterrows():
549
+ # Check if column B (x1 coordinate) is empty - stop reading if empty
550
+ if pd.isna(row.iloc[1]):
551
+ break # Stop reading when column B is empty
552
+
553
+ # Check if other required coordinates are present
554
+ if pd.isna(row.iloc[2]) or pd.isna(row.iloc[3]) or pd.isna(row.iloc[4]):
555
+ continue # Skip rows with incomplete coordinate data
412
556
 
413
557
  # If coordinates are present, check for required parameters (Tmax, Lp1, Lp2)
414
558
  if pd.isna(row.iloc[5]) or pd.isna(row.iloc[7]) or pd.isna(row.iloc[8]):
@@ -536,117 +680,160 @@ def load_slope_data(filepath):
536
680
 
537
681
 
538
682
  # === SEEPAGE ANALYSIS BOUNDARY CONDITIONS ===
683
+ # Read first set from "seep bc" sheet
539
684
  seep_df = xls.parse('seep bc', header=None)
540
685
  seepage_bc = {"specified_heads": [], "exit_face": []}
541
- seepage_bc2 = {"specified_heads": [], "exit_face": []}
542
-
543
- def _read_specified_head_block(
544
- df,
545
- head_row: int,
546
- head_col: int,
547
- x_col: int,
548
- y_col: int,
549
- data_start_row: int,
550
- data_end_row: int,
551
- ):
552
- """Read a specified-head block; returns (head_value, coords_list)."""
553
- head_val = (
554
- df.iloc[head_row, head_col]
555
- if df.shape[0] > head_row and df.shape[1] > head_col
556
- else None
557
- )
558
- coords = []
559
- for r in range(data_start_row, data_end_row):
560
- if r >= df.shape[0]:
561
- break
562
- x = df.iloc[r, x_col] if df.shape[1] > x_col else None
563
- y = df.iloc[r, y_col] if df.shape[1] > y_col else None
564
- if pd.notna(x) and pd.notna(y):
565
- coords.append((float(x), float(y)))
566
- return head_val, coords
567
-
568
- # Specified Head #1
569
- head1, coords1 = _read_specified_head_block(
570
- seep_df, head_row=2, head_col=2, x_col=1, y_col=2, data_start_row=4, data_end_row=12
571
- )
572
- if head1 is not None and coords1:
573
- seepage_bc["specified_heads"].append({"head": float(head1), "coords": coords1})
574
-
575
- # Specified Head #2
576
- head2, coords2 = _read_specified_head_block(
577
- seep_df, head_row=2, head_col=5, x_col=4, y_col=5, data_start_row=4, data_end_row=12
578
- )
579
- if head2 is not None and coords2:
580
- seepage_bc["specified_heads"].append({"head": float(head2), "coords": coords2})
581
-
582
- # Specified Head #3
583
- head3, coords3 = _read_specified_head_block(
584
- seep_df, head_row=2, head_col=8, x_col=7, y_col=8, data_start_row=4, data_end_row=12
585
- )
586
- if head3 is not None and coords3:
587
- seepage_bc["specified_heads"].append({"head": float(head3), "coords": coords3})
588
-
589
- # Exit Face
686
+
687
+ # Exit Face BC: starts at B5 (row 4, columns 1 and 2), continues down until empty x value
590
688
  exit_coords = []
591
- for i in range(15, 23): # rows 16-23 (0-indexed 15-22)
592
- if i >= seep_df.shape[0]:
689
+ exit_start_row = 4 # Excel row 5 (0-indexed row 4)
690
+ exit_x_col = 1 # Column B
691
+ exit_y_col = 2 # Column C
692
+
693
+ row = exit_start_row
694
+ while row < seep_df.shape[0]:
695
+ try:
696
+ x_val = seep_df.iloc[row, exit_x_col]
697
+ y_val = seep_df.iloc[row, exit_y_col]
698
+
699
+ # Stop at first empty x value
700
+ if pd.isna(x_val):
701
+ break
702
+
703
+ # If x is present, try to convert (y can be empty but we'll still add the point)
704
+ if pd.notna(x_val) and pd.notna(y_val):
705
+ exit_coords.append((float(x_val), float(y_val)))
706
+ except:
593
707
  break
594
- x = seep_df.iloc[i, 1] if seep_df.shape[1] > 1 else None
595
- y = seep_df.iloc[i, 2] if seep_df.shape[1] > 2 else None
596
- if pd.notna(x) and pd.notna(y):
597
- exit_coords.append((float(x), float(y)))
708
+ row += 1
598
709
  seepage_bc["exit_face"] = exit_coords
599
-
600
- # --- RAPID DRAWDOWN BCs (second set) ---
601
- # User-added second set starts at:
602
- # - Specified Head #1: head in C26, coords in B28:C35
603
- head1b, coords1b = _read_specified_head_block(
604
- seep_df, head_row=25, head_col=2, x_col=1, y_col=2, data_start_row=27, data_end_row=35
605
- )
606
- if head1b is not None and coords1b:
607
- seepage_bc2["specified_heads"].append({"head": float(head1b), "coords": coords1b})
608
-
609
- # Mirror the same layout for the other two specified-head blocks (same columns as the first set)
610
- head2b, coords2b = _read_specified_head_block(
611
- seep_df, head_row=25, head_col=5, x_col=4, y_col=5, data_start_row=27, data_end_row=35
612
- )
613
- if head2b is not None and coords2b:
614
- seepage_bc2["specified_heads"].append({"head": float(head2b), "coords": coords2b})
615
-
616
- head3b, coords3b = _read_specified_head_block(
617
- seep_df, head_row=25, head_col=8, x_col=7, y_col=8, data_start_row=27, data_end_row=35
618
- )
619
- if head3b is not None and coords3b:
620
- seepage_bc2["specified_heads"].append({"head": float(head3b), "coords": coords3b})
621
-
622
- # Exit Face #2: positioned lower on the sheet (same columns as the first exit face block)
623
- exit_coords2 = []
624
- for i in range(38, 46): # rows 39-46 (0-indexed 38-45)
625
- if i >= seep_df.shape[0]:
710
+
711
+ # Specified Head BCs: start at columns E:F, then H:I, etc.
712
+ # Head value is in row 3 (index 2), XY values start at row 5 (index 4)
713
+ # Keep reading to the right until head value in row 3 is empty
714
+ head_row = 2 # Excel row 3 (0-indexed row 2)
715
+ data_start_row = 4 # Excel row 5 (0-indexed row 4)
716
+ col = 4 # Start with column E (index 4)
717
+
718
+ while col < seep_df.shape[1]:
719
+ x_col = col
720
+ y_col = col + 1
721
+ head_col = col + 1 # Head value is in the Y column (F, I, L, etc.)
722
+
723
+ # Check if head value in row 3 is empty - stop reading if empty
724
+ if seep_df.shape[0] <= head_row:
626
725
  break
627
- x = seep_df.iloc[i, 1] if seep_df.shape[1] > 1 else None
628
- y = seep_df.iloc[i, 2] if seep_df.shape[1] > 2 else None
629
- if pd.notna(x) and pd.notna(y):
630
- exit_coords2.append((float(x), float(y)))
631
- seepage_bc2["exit_face"] = exit_coords2
726
+ head_val = seep_df.iloc[head_row, head_col]
727
+ if pd.isna(head_val):
728
+ break # Stop reading when head value is empty
729
+
730
+ # Read XY coordinates starting from row 5, continue down until empty
731
+ coords = []
732
+ row = data_start_row
733
+ while row < seep_df.shape[0]:
734
+ try:
735
+ x_val = seep_df.iloc[row, x_col]
736
+ y_val = seep_df.iloc[row, y_col]
737
+
738
+ # Stop at first empty x value
739
+ if pd.isna(x_val):
740
+ break
741
+
742
+ # If x is present, try to convert
743
+ if pd.notna(x_val) and pd.notna(y_val):
744
+ coords.append((float(x_val), float(y_val)))
745
+ except:
746
+ break
747
+ row += 1
748
+
749
+ if coords: # Only add if we have coordinates
750
+ seepage_bc["specified_heads"].append({"head": float(head_val), "coords": coords})
751
+
752
+ # Move to next specified head BC (skip 3 columns: E->H, H->K, etc.)
753
+ col += 3
754
+
755
+ # Read second set from "seep bc (2)" sheet
756
+ seepage_bc2 = {"specified_heads": [], "exit_face": []}
757
+ try:
758
+ seep_df2 = xls.parse('seep bc (2)', header=None)
759
+
760
+ # Exit Face BC: starts at B5 (row 4, columns 1 and 2), continues down until empty x value
761
+ exit_coords2 = []
762
+ row = exit_start_row
763
+ while row < seep_df2.shape[0]:
764
+ try:
765
+ x_val = seep_df2.iloc[row, exit_x_col]
766
+ y_val = seep_df2.iloc[row, exit_y_col]
767
+
768
+ # Stop at first empty x value
769
+ if pd.isna(x_val):
770
+ break
771
+
772
+ # If x is present, try to convert
773
+ if pd.notna(x_val) and pd.notna(y_val):
774
+ exit_coords2.append((float(x_val), float(y_val)))
775
+ except:
776
+ break
777
+ row += 1
778
+ seepage_bc2["exit_face"] = exit_coords2
779
+
780
+ # Specified Head BCs: same structure as first sheet
781
+ col = 4 # Start with column E (index 4)
782
+ while col < seep_df2.shape[1]:
783
+ x_col = col
784
+ y_col = col + 1
785
+ head_col = col + 1 # Head value is in the Y column
786
+
787
+ # Check if head value in row 3 is empty - stop reading if empty
788
+ if seep_df2.shape[0] <= head_row:
789
+ break
790
+ head_val = seep_df2.iloc[head_row, head_col]
791
+ if pd.isna(head_val):
792
+ break # Stop reading when head value is empty
793
+
794
+ # Read XY coordinates starting from row 5, continue down until empty
795
+ coords = []
796
+ row = data_start_row
797
+ while row < seep_df2.shape[0]:
798
+ try:
799
+ x_val = seep_df2.iloc[row, x_col]
800
+ y_val = seep_df2.iloc[row, y_col]
801
+
802
+ # Stop at first empty x value
803
+ if pd.isna(x_val):
804
+ break
805
+
806
+ # If x is present, try to convert
807
+ if pd.notna(x_val) and pd.notna(y_val):
808
+ coords.append((float(x_val), float(y_val)))
809
+ except:
810
+ break
811
+ row += 1
812
+
813
+ if coords: # Only add if we have coordinates
814
+ seepage_bc2["specified_heads"].append({"head": float(head_val), "coords": coords})
815
+
816
+ # Move to next specified head BC (skip 3 columns: E->H, H->K, etc.)
817
+ col += 3
818
+ except (ValueError, KeyError):
819
+ # If "seep bc (2)" sheet doesn't exist, just leave seepage_bc2 as empty
820
+ pass
632
821
 
633
822
  # === VALIDATION ===
634
823
 
635
824
  circular = len(circles) > 0
636
- # Check if this is a seepage-only analysis (has seepage BCs but no slope stability surfaces)
825
+ # Check if this is a seep-only analysis (has seep BCs but no slope stability surfaces)
637
826
  has_seepage_bc = (len(seepage_bc.get("specified_heads", [])) > 0 or
638
827
  len(seepage_bc.get("exit_face", [])) > 0)
639
828
  is_seepage_only = has_seepage_bc and not circular and len(non_circ) == 0
640
829
 
641
- # Only require circular/non-circular data if this is NOT a seepage-only analysis
830
+ # Only require circular/non-circular data if this is NOT a seep-only analysis
642
831
  if not is_seepage_only and not circular and len(non_circ) == 0:
643
832
  raise ValueError("Input must include either circular or non-circular surface data.")
644
833
  if not profile_lines:
645
834
  raise ValueError("Profile lines sheet is empty or invalid.")
646
835
  if not materials:
647
836
  raise ValueError("Materials sheet is empty.")
648
- if len(materials) != len(profile_lines):
649
- raise ValueError("Each profile line must have a corresponding material. You have " + str(len(materials)) + " materials and " + str(len(profile_lines)) + " profile lines.")
650
837
 
651
838
 
652
839
  # Add everything to globals_data
@@ -671,7 +858,7 @@ def load_slope_data(filepath):
671
858
  globals_data["seepage_bc"] = seepage_bc
672
859
  globals_data["seepage_bc2"] = seepage_bc2
673
860
 
674
- # Add seepage data if available
861
+ # Add seep data if available
675
862
  if has_seep_materials:
676
863
  globals_data["seep_mesh"] = seep_mesh
677
864
  globals_data["seep_u"] = seep_u