toughanimator 0.1.5__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: toughanimator
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Summary: A tool for visualizing TOUGH simulation outputs.
5
5
  Home-page: https://github.com/scarletref/toughanimator
6
- Author: Your Name
7
- Author-email: your.email@example.com
6
+ Author: scarletref
7
+ Author-email: scarletreflection@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
@@ -2,12 +2,12 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='toughanimator', # Package name on PyPI
5
- version='0.1.5',
5
+ version='0.1.6',
6
6
  description='A tool for visualizing TOUGH simulation outputs.',
7
7
  long_description=open('README.md').read(),
8
8
  long_description_content_type='text/markdown',
9
- author='Your Name',
10
- author_email='your.email@example.com',
9
+ author='scarletref',
10
+ author_email='scarletreflection@gmail.com',
11
11
  url='https://github.com/scarletref/toughanimator',
12
12
  packages=find_packages(),
13
13
  include_package_data=True,
@@ -11,8 +11,8 @@ logging.basicConfig(level=logging.DEBUG)
11
11
  # Directory containing all test cases
12
12
  parent_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
13
  TEST_CASES_DIRS = [
14
- #os.path.join(parent_path, "test_cases"),
15
- os.path.join(parent_path, "unresolved"),
14
+ os.path.join(parent_path, "test_cases"),
15
+ #os.path.join(parent_path, "unresolved"),
16
16
  ]
17
17
 
18
18
 
@@ -0,0 +1,25 @@
1
+ import os
2
+ import tough_classes as ta
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+
6
+ #dir_name = "unresolved"
7
+ dir_name = "test_cases"
8
+ case_name = "PetraSim_2D_Conceptual"
9
+ test_case_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), dir_name)
10
+
11
+ case_dir = os.path.join(test_case_dir, case_name)
12
+
13
+
14
+ #case_dir = r"D:\Projects\202504\polygonal\poly_test"
15
+ #case_dir = r"D:\Projects\202507\tough系列output\tough output format\TR_MINC_exe"
16
+ #case_dir = r"D:\Projects\202508\tough_cases\WW\7_TR_MINC_petrasim2025__5spot"
17
+ #case_dir = r"D:\Projects\202508\tough_cases\WW\6_TR_MINC_exe"
18
+ reader = ta.vis_reader(case_dir)
19
+ #reader.write_eleme_conne()
20
+ #reader.write_geometry()
21
+ #reader.write_incon()
22
+ #reader.write_result()
23
+ reader.write_all()
24
+
25
+
@@ -69,7 +69,7 @@ class VisVariable:
69
69
  }
70
70
 
71
71
  class VisSetting:
72
- def __init__(self, input_file_paths, out_file_paths, vis_dir, corners_file="unkown", out_format_type=OutType.Unknown, tough_version = ToughVersion.Unknown, vis_types=[VisType.ParaView, VisType.Tecplot], mesh_type=MeshType.RegularGrid, debug=False, eos="ECO2N", minc=False):
72
+ def __init__(self, input_file_paths, out_file_paths, vis_dir, corners_file="unkown", out_format_type=OutType.Unknown, tough_version = ToughVersion.Unknown, vis_types=[VisType.ParaView, VisType.Tecplot], mesh_type=MeshType.RegularGrid, debug=False, eos="ECO2N", minc=False, selected_variables_scalar = [], selected_variables_vector = [] ):
73
73
  self.mesh_type = mesh_type
74
74
  self.out_format_type = out_format_type
75
75
  self.vis_types = vis_types
@@ -85,6 +85,8 @@ class VisSetting:
85
85
  self.debug = debug
86
86
  self.eos = eos
87
87
  self.minc = minc
88
+ self.selected_variables_scalar = selected_variables_scalar
89
+ self.selected_variables_vector = selected_variables_vector
88
90
 
89
91
 
90
92
  def setBounds(self, x_bounds, y_bounds, z_bounds):
@@ -130,6 +132,8 @@ class vis_reader:
130
132
  debug = config['debug'] if 'debug' in config else False,
131
133
  eos = config['EOS'] if 'EOS' in config else "ECO2N",
132
134
  minc = config['MINC'] if 'MINC' in config else False,
135
+ selected_variables_scalar = config['selected_variables_scalar'] if 'selected_variables_scalar' in config else [],
136
+ selected_variables_vector = config['selected_variables_vector'] if 'selected_variables_vector' in config else []
133
137
  )
134
138
 
135
139
  # check if the project is using MINC
@@ -215,6 +219,7 @@ class vis_reader:
215
219
  self.current_out_file = output_file_path
216
220
  self.__check_TOUGH_version()
217
221
  print(f' Version: {self.setting.tough_version.name}')
222
+ print(f' EOS: {self.setting.eos}')
218
223
  if self.setting.tough_version == ToughVersion.TOUGH2:
219
224
  self.__read_TOUGH2_CSV_outfile()
220
225
  elif self.setting.tough_version == ToughVersion.TOUGH3:
@@ -357,6 +362,7 @@ class vis_reader:
357
362
  def __write_incon_buffer(self):
358
363
  self.incon_buffer = io.StringIO()
359
364
  has_incon = False
365
+ self.global_incon = False
360
366
  # write temp element txt
361
367
 
362
368
  for input_file_path in self.setting.input_file_paths:
@@ -368,7 +374,7 @@ class vis_reader:
368
374
  if line.startswith('INCON-'):
369
375
  reading_incon = True
370
376
  has_incon = True
371
- find_incon = True
377
+ #find_incon = True
372
378
  continue
373
379
 
374
380
  if reading_incon:
@@ -386,9 +392,33 @@ class vis_reader:
386
392
  if len(line.split()) == 2:
387
393
  self.incon_buffer.write(line)
388
394
 
389
- if has_incon == False:
390
- print(f'Can\'t find INCON block in input_file_paths.')
391
- #sys.exit(1)
395
+
396
+ if has_incon == False or self.incon_buffer.tell() == 0:
397
+ print(f'Can\'t find INCON block in input_file_paths (or length of INCON is zero).')
398
+
399
+ # find the fifth line of the "PARAM" block
400
+ reading_pram = False
401
+ for input_file_path in self.setting.input_file_paths:
402
+ line_counter = 0
403
+ with open(input_file_path) as f:
404
+ reading_pram = False
405
+ for line in f:
406
+ if line.startswith('PARAM-'):
407
+ reading_pram = True
408
+ continue
409
+ if reading_pram:
410
+ line_counter += 1
411
+ if self.__check_if_block_end(line, line_counter):
412
+ if line_counter <5:
413
+ print(f' Can\'t find Global INCON line in PARAM block of {input_file_path}. Please check the PARAM block.')
414
+ break
415
+
416
+ if line_counter == 5:
417
+ self.global_incon = True
418
+ print(f' Found Global INCON line in PARAM block of {input_file_path}')
419
+ self.incon_buffer.write(line)
420
+ break
421
+
392
422
  else:
393
423
  print(f' Found INCON block in {found_path}')
394
424
 
@@ -418,16 +448,21 @@ class vis_reader:
418
448
  print(f' It is empty in INCON block.')
419
449
  return
420
450
 
421
- for header in incon_names:
422
- array = vtkDoubleArray()
423
- array.SetName(header)
424
- self.incon_vtk.GetCellData().AddArray(array)
425
-
426
- for i in range(0, self.incon_vtk.GetNumberOfCells()):
451
+ else:
427
452
  for header in incon_names:
428
- index = self.sequence_dist[i]
429
- value = self.__parse_float(incon_df[header][index])
430
- self.incon_vtk.GetCellData().GetArray(header).InsertNextValue(value)
453
+ array = vtkDoubleArray()
454
+ array.SetName(header)
455
+ self.incon_vtk.GetCellData().AddArray(array)
456
+
457
+ for i in range(0, self.incon_vtk.GetNumberOfCells()):
458
+ for header in incon_names:
459
+ if self.global_incon:
460
+ # if global incon, use the first row
461
+ value = self.__parse_float(incon_df[header][0])
462
+ else:
463
+ index = self.sequence_dist[i]
464
+ value = self.__parse_float(incon_df[header][index])
465
+ self.incon_vtk.GetCellData().GetArray(header).InsertNextValue(value)
431
466
 
432
467
  extension = os.path.splitext(self.main_geometry)[1]
433
468
  self.incon_path = os.path.join(self.setting.vis_dir, f'incon{extension}')
@@ -459,6 +494,9 @@ class vis_reader:
459
494
  self.__write_scalar_result(time_step, df, csv_headers)
460
495
  elif value_type == ValueType.Vector:
461
496
  self.__write_vector_result(time_step, df, csv_headers)
497
+ else:
498
+ print('Error: Your value type is not supported')
499
+ sys.exit(1)
462
500
 
463
501
  buffer.flush()
464
502
  buffer.close()
@@ -470,14 +508,17 @@ class vis_reader:
470
508
 
471
509
  if line_number == 0:
472
510
  csv_headers = [x.strip() for x in values]
511
+ #replace all " with ''"
512
+ csv_headers = [x.replace('"', '') for x in csv_headers]
473
513
  if 'ELEM' in csv_headers and 'INDEX' in csv_headers:
474
514
  value_type = ValueType.Scalar
475
- start_index = 1 # remove the first item
515
+ #start_index = 1 # remove the first item
476
516
 
477
517
  elif 'ELEM1' in csv_headers and 'ELEM2' in csv_headers:
478
518
  value_type = ValueType.Vector
479
- start_index = 4
519
+ #start_index = 1
480
520
 
521
+ start_index = 1
481
522
  # remove the first "TIME" header (to reduce the number of columns)
482
523
  csv_headers = csv_headers[start_index:]
483
524
 
@@ -505,7 +546,6 @@ class vis_reader:
505
546
  process_chunk()
506
547
 
507
548
 
508
-
509
549
  def __read_TOUGH3_CSV_outfile(self):
510
550
  scalar_buffer = io.StringIO()
511
551
  current_time_step = None
@@ -517,6 +557,93 @@ class vis_reader:
517
557
  start_index = -1
518
558
  self.time_steps_list = []
519
559
 
560
+ with open(self.current_out_file) as f:
561
+ for line in f:
562
+ line_number = line_number + 1
563
+ values = line.strip().split(',')
564
+ if line_number == 0:
565
+ values = [x.replace('"', '') for x in values]
566
+ csv_headers = [x.strip() for x in values]
567
+
568
+ if 'ELEM' in csv_headers:
569
+ value_type = ValueType.Scalar
570
+ start_index = 5
571
+
572
+ elif 'ELEM1' in csv_headers and 'ELEM2' in csv_headers:
573
+ value_type = ValueType.Vector
574
+ start_index = 5
575
+
576
+ csv_headers = csv_headers[start_index:]
577
+ f.readline() # skip next line
578
+ print(f' Value type: {value_type.name}')
579
+ continue
580
+
581
+ # Find time item
582
+ if len(values) == 1:
583
+ time_string = values[0].replace('"', '').strip()
584
+ time_string = time_string.split()[-1]
585
+ time = self.__parse_float(time_string)
586
+
587
+ # if not the first time step
588
+ if value_type == ValueType.Scalar and reading_number == self.number_of_elements:
589
+ scalar_buffer.seek(0)
590
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
591
+ self.__write_scalar_result(
592
+ current_time_step, df, csv_headers)
593
+ scalar_buffer.flush()
594
+ scalar_buffer.close()
595
+ scalar_buffer = io.StringIO()
596
+ reading_number = 0
597
+
598
+ if value_type == ValueType.Vector and reading_number == self.number_of_connections:
599
+ scalar_buffer.seek(0)
600
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
601
+ self.__write_vector_result(
602
+ current_time_step, df, csv_headers)
603
+ scalar_buffer.flush()
604
+ scalar_buffer.close()
605
+ scalar_buffer = io.StringIO()
606
+ reading_number = 0
607
+
608
+ current_time_step = VisTimeStep(
609
+ time=float(time),
610
+ time_step=tim_step_counter,
611
+ iteration=1
612
+ )
613
+
614
+ # Initialize buffer
615
+ header_string = ','.join(csv_headers)
616
+ scalar_buffer.write(header_string + '\n')
617
+ self.time_steps_list.append(current_time_step)
618
+ tim_step_counter = tim_step_counter + 1
619
+
620
+ else:
621
+ scalar_buffer.write(','.join(values[start_index:]) + '\n')
622
+ reading_number = reading_number + 1
623
+
624
+ else:
625
+ # write the last time step
626
+ if value_type == ValueType.Scalar:
627
+ scalar_buffer.seek(0)
628
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
629
+ self.__write_scalar_result(current_time_step, df, csv_headers)
630
+ if value_type == ValueType.Vector:
631
+ scalar_buffer.seek(0)
632
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
633
+ self.__write_vector_result(current_time_step, df, csv_headers)
634
+ scalar_buffer.close()
635
+
636
+ def __old_read_TOUGH3_CSV_outfile(self):
637
+ scalar_buffer = io.StringIO()
638
+ current_time_step = None
639
+ tim_step_counter = 1
640
+ csv_headers = []
641
+ line_number = -1
642
+ reading_number = 0
643
+ value_type = ValueType.Unknown
644
+ start_index = -1
645
+ self.time_steps_list = []
646
+
520
647
  with open(self.current_out_file) as f:
521
648
  for line in f:
522
649
  line_number = line_number + 1
@@ -826,6 +953,8 @@ class vis_reader:
826
953
  dataframe = dataframe.drop(columns=['INDEX'])
827
954
  headers.remove('INDEX')
828
955
  if 'ELEM' in dataframe.columns:
956
+ # change the data type of ELEM to string
957
+ dataframe['ELEM'] = dataframe['ELEM'].astype(str)
829
958
  # remove leading spaces from ELEM column
830
959
  dataframe['ELEM'] = dataframe['ELEM'].str.lstrip()
831
960
  headers.remove('ELEM')
@@ -875,6 +1004,11 @@ class vis_reader:
875
1004
  if VisType.Tecplot not in self.setting.vis_types:
876
1005
  return
877
1006
 
1007
+
1008
+ if self.setting.mesh_type == MeshType.PolygonalMesh:
1009
+ print(f' Tecplot output for polygonal mesh is not supported yet.')
1010
+ return
1011
+
878
1012
  # Start Tecplot generating
879
1013
  tec_name = pathlib.Path(self.setting.input_file_paths[0]).stem
880
1014
  self.tec_scalar_path = os.path.join(self.setting.vis_dir, f'{tec_name}_scalar.dat')
@@ -884,10 +1018,11 @@ class vis_reader:
884
1018
  file = open(self.tec_scalar_path, "a")
885
1019
  if firstFile:
886
1020
  file.write('TITLE = TECPLOT PLOT \n')
887
- header_string = '"'+'", "'.join(headers) + '"'
888
- file.write(f'VARIABLES = "X", "Y", "Z", {header_string}\n')
1021
+ selected_header_string = '"'+'", "'.join(self.setting.selected_variables_scalar) + '"'
1022
+ #header_string = '"'+'", "'.join(headers) + '"'
1023
+ file.write(f'VARIABLES = "X", "Y", "Z", {selected_header_string}\n')
889
1024
 
890
- tecplot_cell_type = 'BRICK'
1025
+ #tecplot_cell_type = 'BRICK'
891
1026
 
892
1027
  #time_statement = f'ZONE T ="{vis_time_step.time_step}, Time = {vis_time_step.time}", N = {vtu_cell_to_points.GetNumberOfPoints()}, E = {vtu_cell_to_points.GetNumberOfCells()}, F = FEPOINT, ET = {tecplot_cell_type}, SOLUTIONTIME = {vis_time_step.time}\n'
893
1028
 
@@ -916,7 +1051,7 @@ class vis_reader:
916
1051
  file.write(" \n")
917
1052
 
918
1053
  # Other data
919
- for header in headers:
1054
+ for header in self.setting.selected_variables_scalar:
920
1055
  array = vtr.GetCellData().GetArray(header)
921
1056
 
922
1057
  for e in range(0, vtr.GetNumberOfCells()):
@@ -943,8 +1078,9 @@ class vis_reader:
943
1078
  return var_string
944
1079
 
945
1080
  # write the vector result for one timestep
946
- def __write_vector_result(self, vis_time_step, dataframe, headers):
1081
+ def __write_vector_result(self, vis_time_step, dataframe, csv_headers):
947
1082
 
1083
+ headers = csv_headers.copy()
948
1084
  index = self.time_steps_list.index(vis_time_step)
949
1085
  extension = os.path.splitext(self.main_geometry)[1]
950
1086
  vtr_path = os.path.join(self.setting.vis_dir, 'paraview', f'time_step_{vis_time_step.time_step}{extension}')
@@ -958,74 +1094,96 @@ class vis_reader:
958
1094
  timesteps.SetNumberOfTuples(1)
959
1095
  timesteps.SetNumberOfComponents(1)
960
1096
  timesteps.SetTuple1(0, vis_time_step.time)
1097
+ #timesteps.SetTuple2(1, 100000)
961
1098
  vector_vtr.SetFieldData(vtkFieldData())
962
1099
  vector_vtr.GetFieldData().AddArray(timesteps)
963
1100
 
964
1101
  else:
965
- vtr_reader = vtkXMLRectilinearGridReader()
966
- vtr_reader.SetFileName(vtr_path)
967
- vtr_reader.Update()
968
- vector_vtr = vtr_reader.GetOutput()
1102
+ vector_vtr = self.__read_vtk_file(vtr_path)
969
1103
 
970
1104
  vtu_reader = vtkXMLUnstructuredGridReader()
971
1105
  vtu_reader.SetFileName(self.elem_conne_path)
972
1106
  vtu_reader.Update()
973
1107
  conne_vtu = vtu_reader.GetOutput()
974
1108
 
1109
+ # make sure to drop TIME and INDEX columns if they exist
1110
+ if 'INDEX' in dataframe.columns:
1111
+ dataframe = dataframe.drop(columns=['INDEX'])
1112
+ headers.remove('INDEX')
1113
+ if 'ELEM1' in dataframe.columns:
1114
+ # remove leading spaces from ELEM column
1115
+ dataframe['ELEM1'] = dataframe['ELEM1'].astype(str)
1116
+ dataframe['ELEM1'] = dataframe['ELEM1'].str.lstrip()
1117
+ headers.remove('ELEM1')
1118
+ if 'ELEM2' in dataframe.columns:
1119
+ # remove leading spaces from ELEM column
1120
+ dataframe['ELEM2'] = dataframe['ELEM2'].astype(str)
1121
+ dataframe['ELEM2'] = dataframe['ELEM2'].str.lstrip()
1122
+ headers.remove('ELEM2')
975
1123
 
976
1124
  variable_list = []
1125
+
1126
+ # find max number of cell connections of a element
1127
+ num_of_components = 3
1128
+ for elem_id in range(0, conne_vtu.GetNumberOfPoints()):
1129
+ cellIDs = vtkIdList()
1130
+ conne_vtu.GetPointCells(elem_id, cellIDs)
1131
+ if cellIDs.GetNumberOfIds() > num_of_components:
1132
+ num_of_components = cellIDs.GetNumberOfIds()
1133
+
1134
+
1135
+ # create double array for each header
977
1136
  for header in headers:
978
1137
  #if not header == 'ELEM1' and not header == 'ELEM2' and not header == 'INDEX':
979
1138
  array = vtkDoubleArray()
980
1139
  array.SetName(header)
981
- array.SetNumberOfComponents(3)
1140
+ array.SetNumberOfComponents(num_of_components)
982
1141
  array.SetNumberOfTuples(vector_vtr.GetNumberOfCells())
983
- array.FillComponent(0, 0)
984
- array.FillComponent(1, 0)
985
- array.FillComponent(2, 0)
1142
+ for i in range(0, num_of_components):
1143
+ # set the default value to 0
1144
+ array.FillComponent(i, 0)
986
1145
  vector_vtr.GetCellData().AddArray(array)
1146
+
987
1147
  variable_list.append(VisVariable(header, ValueType.Vector, 3))
988
1148
 
989
1149
  if self.current_out_file not in self.variable_list:
990
1150
  self.variable_list[self.current_out_file] = variable_list
991
-
1151
+
1152
+
1153
+ # prepare cell data array for cells in conne_vtu
1154
+ for header in headers:
1155
+ array = vtkDoubleArray()
1156
+ array.SetName(header)
1157
+ conne_vtu.GetCellData().AddArray(array)
992
1158
 
1159
+ # add celldata to cells in elem_conn
1160
+ for cell_id in range(0, conne_vtu.GetNumberOfCells()):
1161
+ for header in headers:
1162
+ value = dataframe.loc[cell_id, header]
1163
+ conne_vtu.GetCellData().GetArray(header).InsertNextValue(value)
1164
+ #self.__write_vtk_file(
1165
+ #conne_vtu, self.elem_conne_path)
1166
+
1167
+ # create the vector data
993
1168
  for elem_id in range(0, conne_vtu.GetNumberOfPoints()):
994
- point = conne_vtu.GetPoint(elem_id)
995
1169
  cellIDs = vtkIdList()
996
1170
  conne_vtu.GetPointCells(elem_id, cellIDs)
997
- next_x = -1
998
- next_y = -1
999
- next_z = -1
1000
1171
  for i in range(0, cellIDs.GetNumberOfIds()):
1001
1172
  cellID = cellIDs.GetId(i)
1002
- cell = conne_vtu.GetCell(cellID)
1003
- next_id = cell.GetPointId(1)
1004
-
1005
- if next_id - elem_id == 1:
1006
- next_x = cellID
1007
1173
 
1008
- elif next_id - elem_id == self.xyz_elem[0]:
1009
- if self.xyz_elem[1] == 1:
1010
- next_z = cellID
1011
- next_y = -1
1012
- break
1174
+ for header in headers:
1175
+ #value = dataframe.loc[next_id, header]
1176
+ value = conne_vtu.GetCellData().GetArray(header).GetValue(cellID)
1177
+ vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, i, value)
1013
1178
 
1179
+ # Put cell-centered data into points
1180
+ filter = vtkCellDataToPointData()
1181
+ filter.SetInputData(vector_vtr)
1182
+ filter.Update()
1183
+ vtr_cell_to_points = filter.GetOutput()
1014
1184
 
1015
- else:
1016
- next_y = cellID
1017
-
1018
- elif next_id - elem_id == self.xyz_elem[0] * self.xyz_elem[1]:
1019
- next_z = cellID
1020
-
1021
- for header in headers:
1022
- #if not header == 'ELEM1' and not header == 'ELEM2' and not header == 'INDEX':
1023
- x_value = 0 if next_x == -1 else self.__parse_float(dataframe[header][next_x])
1024
- y_value = 0 if next_y == -1 else self.__parse_float(dataframe[header][next_y])
1025
- z_value = 0 if next_z == -1 else self.__parse_float(dataframe[header][next_z])
1026
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 0, x_value)
1027
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 1, y_value)
1028
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 2, z_value)
1185
+ for i in range(0, vtr_cell_to_points.GetPointData().GetNumberOfArrays()):
1186
+ vector_vtr.GetPointData().AddArray(vtr_cell_to_points.GetPointData().GetArray(i))
1029
1187
 
1030
1188
  self.__write_vtk_file(
1031
1189
  vector_vtr, self.time_steps_list[index].vtu_file_name)
@@ -1343,8 +1501,8 @@ class vis_reader:
1343
1501
  else:
1344
1502
  is_parallel = self.__checkParallel(elem_conne_vtu)
1345
1503
  # check if polygonal mesh
1346
-
1347
1504
  if os.path.exists(self.setting.corners_file):
1505
+
1348
1506
  if is_parallel:
1349
1507
  self.setting.mesh_type = MeshType.StructuredGridFree
1350
1508
  else:
@@ -1824,6 +1982,7 @@ class vis_reader:
1824
1982
  polygon = vtkPolygon()
1825
1983
  center_point_inserted = False
1826
1984
  center_point_id = distinct_corners_points_locator.FindClosestPoint(point)
1985
+
1827
1986
  for j in range(voronoi_cell.GetNumberOfPoints()):
1828
1987
  cell_point_id = voronoi_cell.GetPointId(j)
1829
1988
  cell_point = voronoi.GetPoint(cell_point_id)
@@ -2186,7 +2345,7 @@ class vis_reader:
2186
2345
  return True
2187
2346
 
2188
2347
  trimmed = line.lstrip()
2189
- if trimmed[5] == "-" and trimmed[6] == "-":
2348
+ if len(trimmed)>6 and trimmed[5] == "-" and trimmed[6] == "-":
2190
2349
  return True
2191
2350
  return False
2192
2351
 
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: toughanimator
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Summary: A tool for visualizing TOUGH simulation outputs.
5
5
  Home-page: https://github.com/scarletref/toughanimator
6
- Author: Your Name
7
- Author-email: your.email@example.com
6
+ Author: scarletref
7
+ Author-email: scarletreflection@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
@@ -1,23 +0,0 @@
1
- import os
2
- import tough_classes as ta
3
- import pandas as pd
4
- import matplotlib.pyplot as plt
5
-
6
- dir_name = "unresolved" #"test_cases
7
- case_name = "3D five spot MINC"
8
- test_case_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), dir_name)
9
-
10
- #case_dir = os.path.join(test_case_dir, case_name)
11
- case_dir = r"D:\Projects\202507\intern\P5_eco2n_1D-radial"
12
-
13
- #case_dir = r"D:\Projects\202504\polygonal\poly_test"
14
- #case_dir = r"D:\Projects\202501\toughanimator\test_cases\P5_eco2n_1D-radial"
15
- reader = ta.vis_reader(case_dir)
16
- #reader.write_eleme_conne()
17
- #reader.write_geometry()
18
- #reader.write_incon()
19
- #reader.write_result()
20
- #reader.
21
- reader.write_all()
22
-
23
-
File without changes
File without changes