toughanimator 0.1.4__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: toughanimator
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: A tool for visualizing TOUGH simulation outputs.
5
5
  Home-page: https://github.com/scarletref/toughanimator
6
- Author: Your Name
7
- Author-email: your.email@example.com
6
+ Author: scarletref
7
+ Author-email: scarletreflection@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
@@ -2,12 +2,12 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='toughanimator', # Package name on PyPI
5
- version='0.1.4',
5
+ version='0.1.6',
6
6
  description='A tool for visualizing TOUGH simulation outputs.',
7
7
  long_description=open('README.md').read(),
8
8
  long_description_content_type='text/markdown',
9
- author='Your Name',
10
- author_email='your.email@example.com',
9
+ author='scarletref',
10
+ author_email='scarletreflection@gmail.com',
11
11
  url='https://github.com/scarletref/toughanimator',
12
12
  packages=find_packages(),
13
13
  include_package_data=True,
@@ -11,8 +11,8 @@ logging.basicConfig(level=logging.DEBUG)
11
11
  # Directory containing all test cases
12
12
  parent_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
13
  TEST_CASES_DIRS = [
14
- #os.path.join(parent_path, "test_cases"),
15
- os.path.join(parent_path, "unresolved"),
14
+ os.path.join(parent_path, "test_cases"),
15
+ #os.path.join(parent_path, "unresolved"),
16
16
  ]
17
17
 
18
18
 
@@ -3,21 +3,23 @@ import tough_classes as ta
3
3
  import pandas as pd
4
4
  import matplotlib.pyplot as plt
5
5
 
6
- dir_name = "unresolved" #"test_cases
7
- case_name = "3D five spot MINC"
6
+ #dir_name = "unresolved"
7
+ dir_name = "test_cases"
8
+ case_name = "PetraSim_2D_Conceptual"
8
9
  test_case_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), dir_name)
9
10
 
10
11
  case_dir = os.path.join(test_case_dir, case_name)
11
- #case_dir = r"D:\Projects\202506\cake"
12
+
12
13
 
13
14
  #case_dir = r"D:\Projects\202504\polygonal\poly_test"
14
- #case_dir = r"D:\Projects\202501\toughanimator\test_cases\P5_eco2n_1D-radial"
15
+ #case_dir = r"D:\Projects\202507\tough系列output\tough output format\TR_MINC_exe"
16
+ #case_dir = r"D:\Projects\202508\tough_cases\WW\7_TR_MINC_petrasim2025__5spot"
17
+ #case_dir = r"D:\Projects\202508\tough_cases\WW\6_TR_MINC_exe"
15
18
  reader = ta.vis_reader(case_dir)
16
19
  #reader.write_eleme_conne()
17
20
  #reader.write_geometry()
18
21
  #reader.write_incon()
19
22
  #reader.write_result()
20
- #reader.
21
23
  reader.write_all()
22
24
 
23
25
 
@@ -69,7 +69,7 @@ class VisVariable:
69
69
  }
70
70
 
71
71
  class VisSetting:
72
- def __init__(self, input_file_paths, out_file_paths, vis_dir, corners_file="unkown", out_format_type=OutType.Unknown, tough_version = ToughVersion.Unknown, vis_types=[VisType.ParaView, VisType.Tecplot], mesh_type=MeshType.RegularGrid, debug=False, eos="ECO2N", minc=False):
72
+ def __init__(self, input_file_paths, out_file_paths, vis_dir, corners_file="unkown", out_format_type=OutType.Unknown, tough_version = ToughVersion.Unknown, vis_types=[VisType.ParaView, VisType.Tecplot], mesh_type=MeshType.RegularGrid, debug=False, eos="ECO2N", minc=False, selected_variables_scalar = [], selected_variables_vector = [] ):
73
73
  self.mesh_type = mesh_type
74
74
  self.out_format_type = out_format_type
75
75
  self.vis_types = vis_types
@@ -85,6 +85,8 @@ class VisSetting:
85
85
  self.debug = debug
86
86
  self.eos = eos
87
87
  self.minc = minc
88
+ self.selected_variables_scalar = selected_variables_scalar
89
+ self.selected_variables_vector = selected_variables_vector
88
90
 
89
91
 
90
92
  def setBounds(self, x_bounds, y_bounds, z_bounds):
@@ -130,6 +132,8 @@ class vis_reader:
130
132
  debug = config['debug'] if 'debug' in config else False,
131
133
  eos = config['EOS'] if 'EOS' in config else "ECO2N",
132
134
  minc = config['MINC'] if 'MINC' in config else False,
135
+ selected_variables_scalar = config['selected_variables_scalar'] if 'selected_variables_scalar' in config else [],
136
+ selected_variables_vector = config['selected_variables_vector'] if 'selected_variables_vector' in config else []
133
137
  )
134
138
 
135
139
  # check if the project is using MINC
@@ -215,6 +219,7 @@ class vis_reader:
215
219
  self.current_out_file = output_file_path
216
220
  self.__check_TOUGH_version()
217
221
  print(f' Version: {self.setting.tough_version.name}')
222
+ print(f' EOS: {self.setting.eos}')
218
223
  if self.setting.tough_version == ToughVersion.TOUGH2:
219
224
  self.__read_TOUGH2_CSV_outfile()
220
225
  elif self.setting.tough_version == ToughVersion.TOUGH3:
@@ -357,6 +362,7 @@ class vis_reader:
357
362
  def __write_incon_buffer(self):
358
363
  self.incon_buffer = io.StringIO()
359
364
  has_incon = False
365
+ self.global_incon = False
360
366
  # write temp element txt
361
367
 
362
368
  for input_file_path in self.setting.input_file_paths:
@@ -368,7 +374,7 @@ class vis_reader:
368
374
  if line.startswith('INCON-'):
369
375
  reading_incon = True
370
376
  has_incon = True
371
- find_incon = True
377
+ #find_incon = True
372
378
  continue
373
379
 
374
380
  if reading_incon:
@@ -386,9 +392,33 @@ class vis_reader:
386
392
  if len(line.split()) == 2:
387
393
  self.incon_buffer.write(line)
388
394
 
389
- if has_incon == False:
390
- print(f'Can\'t find INCON block in input_file_paths.')
391
- #sys.exit(1)
395
+
396
+ if has_incon == False or self.incon_buffer.tell() == 0:
397
+ print(f'Can\'t find INCON block in input_file_paths (or length of INCON is zero).')
398
+
399
+ # find the fifth line of the "PARAM" block
400
+ reading_pram = False
401
+ for input_file_path in self.setting.input_file_paths:
402
+ line_counter = 0
403
+ with open(input_file_path) as f:
404
+ reading_pram = False
405
+ for line in f:
406
+ if line.startswith('PARAM-'):
407
+ reading_pram = True
408
+ continue
409
+ if reading_pram:
410
+ line_counter += 1
411
+ if self.__check_if_block_end(line, line_counter):
412
+ if line_counter <5:
413
+ print(f' Can\'t find Global INCON line in PARAM block of {input_file_path}. Please check the PARAM block.')
414
+ break
415
+
416
+ if line_counter == 5:
417
+ self.global_incon = True
418
+ print(f' Found Global INCON line in PARAM block of {input_file_path}')
419
+ self.incon_buffer.write(line)
420
+ break
421
+
392
422
  else:
393
423
  print(f' Found INCON block in {found_path}')
394
424
 
@@ -418,16 +448,21 @@ class vis_reader:
418
448
  print(f' It is empty in INCON block.')
419
449
  return
420
450
 
421
- for header in incon_names:
422
- array = vtkDoubleArray()
423
- array.SetName(header)
424
- self.incon_vtk.GetCellData().AddArray(array)
425
-
426
- for i in range(0, self.incon_vtk.GetNumberOfCells()):
451
+ else:
427
452
  for header in incon_names:
428
- index = self.sequence_dist[i]
429
- value = self.__parse_float(incon_df[header][index])
430
- self.incon_vtk.GetCellData().GetArray(header).InsertNextValue(value)
453
+ array = vtkDoubleArray()
454
+ array.SetName(header)
455
+ self.incon_vtk.GetCellData().AddArray(array)
456
+
457
+ for i in range(0, self.incon_vtk.GetNumberOfCells()):
458
+ for header in incon_names:
459
+ if self.global_incon:
460
+ # if global incon, use the first row
461
+ value = self.__parse_float(incon_df[header][0])
462
+ else:
463
+ index = self.sequence_dist[i]
464
+ value = self.__parse_float(incon_df[header][index])
465
+ self.incon_vtk.GetCellData().GetArray(header).InsertNextValue(value)
431
466
 
432
467
  extension = os.path.splitext(self.main_geometry)[1]
433
468
  self.incon_path = os.path.join(self.setting.vis_dir, f'incon{extension}')
@@ -459,7 +494,11 @@ class vis_reader:
459
494
  self.__write_scalar_result(time_step, df, csv_headers)
460
495
  elif value_type == ValueType.Vector:
461
496
  self.__write_vector_result(time_step, df, csv_headers)
497
+ else:
498
+ print('Error: Your value type is not supported')
499
+ sys.exit(1)
462
500
 
501
+ buffer.flush()
463
502
  buffer.close()
464
503
 
465
504
  with open(self.current_out_file) as f:
@@ -469,14 +508,17 @@ class vis_reader:
469
508
 
470
509
  if line_number == 0:
471
510
  csv_headers = [x.strip() for x in values]
511
+ #replace all " with ''"
512
+ csv_headers = [x.replace('"', '') for x in csv_headers]
472
513
  if 'ELEM' in csv_headers and 'INDEX' in csv_headers:
473
514
  value_type = ValueType.Scalar
474
- start_index = 1 # remove the first item
515
+ #start_index = 1 # remove the first item
475
516
 
476
517
  elif 'ELEM1' in csv_headers and 'ELEM2' in csv_headers:
477
518
  value_type = ValueType.Vector
478
- start_index = 4
519
+ #start_index = 1
479
520
 
521
+ start_index = 1
480
522
  # remove the first "TIME" header (to reduce the number of columns)
481
523
  csv_headers = csv_headers[start_index:]
482
524
 
@@ -504,7 +546,6 @@ class vis_reader:
504
546
  process_chunk()
505
547
 
506
548
 
507
-
508
549
  def __read_TOUGH3_CSV_outfile(self):
509
550
  scalar_buffer = io.StringIO()
510
551
  current_time_step = None
@@ -516,6 +557,93 @@ class vis_reader:
516
557
  start_index = -1
517
558
  self.time_steps_list = []
518
559
 
560
+ with open(self.current_out_file) as f:
561
+ for line in f:
562
+ line_number = line_number + 1
563
+ values = line.strip().split(',')
564
+ if line_number == 0:
565
+ values = [x.replace('"', '') for x in values]
566
+ csv_headers = [x.strip() for x in values]
567
+
568
+ if 'ELEM' in csv_headers:
569
+ value_type = ValueType.Scalar
570
+ start_index = 5
571
+
572
+ elif 'ELEM1' in csv_headers and 'ELEM2' in csv_headers:
573
+ value_type = ValueType.Vector
574
+ start_index = 5
575
+
576
+ csv_headers = csv_headers[start_index:]
577
+ f.readline() # skip next line
578
+ print(f' Value type: {value_type.name}')
579
+ continue
580
+
581
+ # Find time item
582
+ if len(values) == 1:
583
+ time_string = values[0].replace('"', '').strip()
584
+ time_string = time_string.split()[-1]
585
+ time = self.__parse_float(time_string)
586
+
587
+ # if not the first time step
588
+ if value_type == ValueType.Scalar and reading_number == self.number_of_elements:
589
+ scalar_buffer.seek(0)
590
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
591
+ self.__write_scalar_result(
592
+ current_time_step, df, csv_headers)
593
+ scalar_buffer.flush()
594
+ scalar_buffer.close()
595
+ scalar_buffer = io.StringIO()
596
+ reading_number = 0
597
+
598
+ if value_type == ValueType.Vector and reading_number == self.number_of_connections:
599
+ scalar_buffer.seek(0)
600
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
601
+ self.__write_vector_result(
602
+ current_time_step, df, csv_headers)
603
+ scalar_buffer.flush()
604
+ scalar_buffer.close()
605
+ scalar_buffer = io.StringIO()
606
+ reading_number = 0
607
+
608
+ current_time_step = VisTimeStep(
609
+ time=float(time),
610
+ time_step=tim_step_counter,
611
+ iteration=1
612
+ )
613
+
614
+ # Initialize buffer
615
+ header_string = ','.join(csv_headers)
616
+ scalar_buffer.write(header_string + '\n')
617
+ self.time_steps_list.append(current_time_step)
618
+ tim_step_counter = tim_step_counter + 1
619
+
620
+ else:
621
+ scalar_buffer.write(','.join(values[start_index:]) + '\n')
622
+ reading_number = reading_number + 1
623
+
624
+ else:
625
+ # write the last time step
626
+ if value_type == ValueType.Scalar:
627
+ scalar_buffer.seek(0)
628
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
629
+ self.__write_scalar_result(current_time_step, df, csv_headers)
630
+ if value_type == ValueType.Vector:
631
+ scalar_buffer.seek(0)
632
+ df = pd.read_csv(scalar_buffer, sep=',', header=0)
633
+ self.__write_vector_result(current_time_step, df, csv_headers)
634
+ scalar_buffer.close()
635
+
636
+ def __old_read_TOUGH3_CSV_outfile(self):
637
+ scalar_buffer = io.StringIO()
638
+ current_time_step = None
639
+ tim_step_counter = 1
640
+ csv_headers = []
641
+ line_number = -1
642
+ reading_number = 0
643
+ value_type = ValueType.Unknown
644
+ start_index = -1
645
+ self.time_steps_list = []
646
+
519
647
  with open(self.current_out_file) as f:
520
648
  for line in f:
521
649
  line_number = line_number + 1
@@ -676,7 +804,7 @@ class vis_reader:
676
804
  if vtr.GetCellData().GetArray("PRES") is not None:
677
805
  p_name = "PRES"
678
806
 
679
- if vtr.GetCellData().GetArray(p_name) is not None:
807
+ if vtr.GetCellData().GetArray(p_name) is not None and self.incon_vtk.GetCellData().GetArray('Pressure') is not None:
680
808
  delPArray = vtkDoubleArray()
681
809
  delPArray.SetName(f'del_{p_name}')
682
810
  for i in range(0, vtr.GetNumberOfCells()):
@@ -786,13 +914,13 @@ class vis_reader:
786
914
  vtr.GetPointData().AddArray(vtr_cell_to_points.GetPointData().GetArray(variabl_name))
787
915
 
788
916
  if len(post_variable_list) > 0:
789
- self.variable_list["psot"] = post_variable_list
917
+ self.variable_list["post"] = post_variable_list
790
918
  self.__write_vtk_file(vtr, vtr_path)
791
919
 
792
920
 
793
- def __write_scalar_result(self, vis_time_step, dataframe, headers):
794
-
921
+ def __write_scalar_result(self, vis_time_step, dataframe, csv_headers):
795
922
 
923
+ headers = csv_headers.copy()
796
924
  index = self.time_steps_list.index(vis_time_step)
797
925
  #vtr_path = os.path.join(self.setting.vis_dir, 'paraview', f'time_step_{vis_time_step.time_step}.vtr')
798
926
 
@@ -816,7 +944,6 @@ class vis_reader:
816
944
  else:
817
945
  scalar_vtr = self.__read_vtk_file(vtr_path)
818
946
 
819
-
820
947
  vtr = scalar_vtr
821
948
 
822
949
  variable_list = []
@@ -826,6 +953,8 @@ class vis_reader:
826
953
  dataframe = dataframe.drop(columns=['INDEX'])
827
954
  headers.remove('INDEX')
828
955
  if 'ELEM' in dataframe.columns:
956
+ # change the data type of ELEM to string
957
+ dataframe['ELEM'] = dataframe['ELEM'].astype(str)
829
958
  # remove leading spaces from ELEM column
830
959
  dataframe['ELEM'] = dataframe['ELEM'].str.lstrip()
831
960
  headers.remove('ELEM')
@@ -846,6 +975,9 @@ class vis_reader:
846
975
  index = self.sequence_dist[i]
847
976
  if 'ELEM' in dataframe.columns:
848
977
  index = dataframe['ELEM'].tolist().index(elemID)
978
+ #elem_string = dataframe['ELEM'].iloc[index]
979
+ #target_row = dataframe.iloc[index]
980
+ #print(f' Processing ELEM {elem_string} at index {index}')
849
981
  for header in headers:
850
982
  value = float(self.__parse_float(dataframe[header].iloc[index]))
851
983
  vtr.GetCellData().GetArray(header).InsertNextValue(value)
@@ -872,6 +1004,11 @@ class vis_reader:
872
1004
  if VisType.Tecplot not in self.setting.vis_types:
873
1005
  return
874
1006
 
1007
+
1008
+ if self.setting.mesh_type == MeshType.PolygonalMesh:
1009
+ print(f' Tecplot output for polygonal mesh is not supported yet.')
1010
+ return
1011
+
875
1012
  # Start Tecplot generating
876
1013
  tec_name = pathlib.Path(self.setting.input_file_paths[0]).stem
877
1014
  self.tec_scalar_path = os.path.join(self.setting.vis_dir, f'{tec_name}_scalar.dat')
@@ -881,10 +1018,11 @@ class vis_reader:
881
1018
  file = open(self.tec_scalar_path, "a")
882
1019
  if firstFile:
883
1020
  file.write('TITLE = TECPLOT PLOT \n')
884
- header_string = '"'+'", "'.join(headers) + '"'
885
- file.write(f'VARIABLES = "X", "Y", "Z", {header_string}\n')
1021
+ selected_header_string = '"'+'", "'.join(self.setting.selected_variables_scalar) + '"'
1022
+ #header_string = '"'+'", "'.join(headers) + '"'
1023
+ file.write(f'VARIABLES = "X", "Y", "Z", {selected_header_string}\n')
886
1024
 
887
- tecplot_cell_type = 'BRICK'
1025
+ #tecplot_cell_type = 'BRICK'
888
1026
 
889
1027
  #time_statement = f'ZONE T ="{vis_time_step.time_step}, Time = {vis_time_step.time}", N = {vtu_cell_to_points.GetNumberOfPoints()}, E = {vtu_cell_to_points.GetNumberOfCells()}, F = FEPOINT, ET = {tecplot_cell_type}, SOLUTIONTIME = {vis_time_step.time}\n'
890
1028
 
@@ -913,7 +1051,7 @@ class vis_reader:
913
1051
  file.write(" \n")
914
1052
 
915
1053
  # Other data
916
- for header in headers:
1054
+ for header in self.setting.selected_variables_scalar:
917
1055
  array = vtr.GetCellData().GetArray(header)
918
1056
 
919
1057
  for e in range(0, vtr.GetNumberOfCells()):
@@ -940,8 +1078,9 @@ class vis_reader:
940
1078
  return var_string
941
1079
 
942
1080
  # write the vector result for one timestep
943
- def __write_vector_result(self, vis_time_step, dataframe, headers):
1081
+ def __write_vector_result(self, vis_time_step, dataframe, csv_headers):
944
1082
 
1083
+ headers = csv_headers.copy()
945
1084
  index = self.time_steps_list.index(vis_time_step)
946
1085
  extension = os.path.splitext(self.main_geometry)[1]
947
1086
  vtr_path = os.path.join(self.setting.vis_dir, 'paraview', f'time_step_{vis_time_step.time_step}{extension}')
@@ -955,74 +1094,96 @@ class vis_reader:
955
1094
  timesteps.SetNumberOfTuples(1)
956
1095
  timesteps.SetNumberOfComponents(1)
957
1096
  timesteps.SetTuple1(0, vis_time_step.time)
1097
+ #timesteps.SetTuple2(1, 100000)
958
1098
  vector_vtr.SetFieldData(vtkFieldData())
959
1099
  vector_vtr.GetFieldData().AddArray(timesteps)
960
1100
 
961
1101
  else:
962
- vtr_reader = vtkXMLRectilinearGridReader()
963
- vtr_reader.SetFileName(vtr_path)
964
- vtr_reader.Update()
965
- vector_vtr = vtr_reader.GetOutput()
1102
+ vector_vtr = self.__read_vtk_file(vtr_path)
966
1103
 
967
1104
  vtu_reader = vtkXMLUnstructuredGridReader()
968
1105
  vtu_reader.SetFileName(self.elem_conne_path)
969
1106
  vtu_reader.Update()
970
1107
  conne_vtu = vtu_reader.GetOutput()
971
1108
 
1109
+ # make sure to drop TIME and INDEX columns if they exist
1110
+ if 'INDEX' in dataframe.columns:
1111
+ dataframe = dataframe.drop(columns=['INDEX'])
1112
+ headers.remove('INDEX')
1113
+ if 'ELEM1' in dataframe.columns:
1114
+ # remove leading spaces from ELEM column
1115
+ dataframe['ELEM1'] = dataframe['ELEM1'].astype(str)
1116
+ dataframe['ELEM1'] = dataframe['ELEM1'].str.lstrip()
1117
+ headers.remove('ELEM1')
1118
+ if 'ELEM2' in dataframe.columns:
1119
+ # remove leading spaces from ELEM column
1120
+ dataframe['ELEM2'] = dataframe['ELEM2'].astype(str)
1121
+ dataframe['ELEM2'] = dataframe['ELEM2'].str.lstrip()
1122
+ headers.remove('ELEM2')
972
1123
 
973
1124
  variable_list = []
1125
+
1126
+ # find max number of cell connections of a element
1127
+ num_of_components = 3
1128
+ for elem_id in range(0, conne_vtu.GetNumberOfPoints()):
1129
+ cellIDs = vtkIdList()
1130
+ conne_vtu.GetPointCells(elem_id, cellIDs)
1131
+ if cellIDs.GetNumberOfIds() > num_of_components:
1132
+ num_of_components = cellIDs.GetNumberOfIds()
1133
+
1134
+
1135
+ # create double array for each header
974
1136
  for header in headers:
975
1137
  #if not header == 'ELEM1' and not header == 'ELEM2' and not header == 'INDEX':
976
1138
  array = vtkDoubleArray()
977
1139
  array.SetName(header)
978
- array.SetNumberOfComponents(3)
1140
+ array.SetNumberOfComponents(num_of_components)
979
1141
  array.SetNumberOfTuples(vector_vtr.GetNumberOfCells())
980
- array.FillComponent(0, 0)
981
- array.FillComponent(1, 0)
982
- array.FillComponent(2, 0)
1142
+ for i in range(0, num_of_components):
1143
+ # set the default value to 0
1144
+ array.FillComponent(i, 0)
983
1145
  vector_vtr.GetCellData().AddArray(array)
1146
+
984
1147
  variable_list.append(VisVariable(header, ValueType.Vector, 3))
985
1148
 
986
1149
  if self.current_out_file not in self.variable_list:
987
1150
  self.variable_list[self.current_out_file] = variable_list
988
-
1151
+
989
1152
 
1153
+ # prepare cell data array for cells in conne_vtu
1154
+ for header in headers:
1155
+ array = vtkDoubleArray()
1156
+ array.SetName(header)
1157
+ conne_vtu.GetCellData().AddArray(array)
1158
+
1159
+ # add celldata to cells in elem_conn
1160
+ for cell_id in range(0, conne_vtu.GetNumberOfCells()):
1161
+ for header in headers:
1162
+ value = dataframe.loc[cell_id, header]
1163
+ conne_vtu.GetCellData().GetArray(header).InsertNextValue(value)
1164
+ #self.__write_vtk_file(
1165
+ #conne_vtu, self.elem_conne_path)
1166
+
1167
+ # create the vector data
990
1168
  for elem_id in range(0, conne_vtu.GetNumberOfPoints()):
991
- point = conne_vtu.GetPoint(elem_id)
992
1169
  cellIDs = vtkIdList()
993
1170
  conne_vtu.GetPointCells(elem_id, cellIDs)
994
- next_x = -1
995
- next_y = -1
996
- next_z = -1
997
1171
  for i in range(0, cellIDs.GetNumberOfIds()):
998
1172
  cellID = cellIDs.GetId(i)
999
- cell = conne_vtu.GetCell(cellID)
1000
- next_id = cell.GetPointId(1)
1001
-
1002
- if next_id - elem_id == 1:
1003
- next_x = cellID
1004
1173
 
1005
- elif next_id - elem_id == self.xyz_elem[0]:
1006
- if self.xyz_elem[1] == 1:
1007
- next_z = cellID
1008
- next_y = -1
1009
- break
1174
+ for header in headers:
1175
+ #value = dataframe.loc[next_id, header]
1176
+ value = conne_vtu.GetCellData().GetArray(header).GetValue(cellID)
1177
+ vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, i, value)
1010
1178
 
1179
+ # Put cell-centered data into points
1180
+ filter = vtkCellDataToPointData()
1181
+ filter.SetInputData(vector_vtr)
1182
+ filter.Update()
1183
+ vtr_cell_to_points = filter.GetOutput()
1011
1184
 
1012
- else:
1013
- next_y = cellID
1014
-
1015
- elif next_id - elem_id == self.xyz_elem[0] * self.xyz_elem[1]:
1016
- next_z = cellID
1017
-
1018
- for header in headers:
1019
- #if not header == 'ELEM1' and not header == 'ELEM2' and not header == 'INDEX':
1020
- x_value = 0 if next_x == -1 else self.__parse_float(dataframe[header][next_x])
1021
- y_value = 0 if next_y == -1 else self.__parse_float(dataframe[header][next_y])
1022
- z_value = 0 if next_z == -1 else self.__parse_float(dataframe[header][next_z])
1023
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 0, x_value)
1024
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 1, y_value)
1025
- vector_vtr.GetCellData().GetArray(header).SetComponent(elem_id, 2, z_value)
1185
+ for i in range(0, vtr_cell_to_points.GetPointData().GetNumberOfArrays()):
1186
+ vector_vtr.GetPointData().AddArray(vtr_cell_to_points.GetPointData().GetArray(i))
1026
1187
 
1027
1188
  self.__write_vtk_file(
1028
1189
  vector_vtr, self.time_steps_list[index].vtu_file_name)
@@ -1340,8 +1501,8 @@ class vis_reader:
1340
1501
  else:
1341
1502
  is_parallel = self.__checkParallel(elem_conne_vtu)
1342
1503
  # check if polygonal mesh
1343
-
1344
1504
  if os.path.exists(self.setting.corners_file):
1505
+
1345
1506
  if is_parallel:
1346
1507
  self.setting.mesh_type = MeshType.StructuredGridFree
1347
1508
  else:
@@ -1821,6 +1982,7 @@ class vis_reader:
1821
1982
  polygon = vtkPolygon()
1822
1983
  center_point_inserted = False
1823
1984
  center_point_id = distinct_corners_points_locator.FindClosestPoint(point)
1985
+
1824
1986
  for j in range(voronoi_cell.GetNumberOfPoints()):
1825
1987
  cell_point_id = voronoi_cell.GetPointId(j)
1826
1988
  cell_point = voronoi.GetPoint(cell_point_id)
@@ -2183,7 +2345,7 @@ class vis_reader:
2183
2345
  return True
2184
2346
 
2185
2347
  trimmed = line.lstrip()
2186
- if trimmed[5] == "-" and trimmed[6] == "-":
2348
+ if len(trimmed)>6 and trimmed[5] == "-" and trimmed[6] == "-":
2187
2349
  return True
2188
2350
  return False
2189
2351
 
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: toughanimator
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: A tool for visualizing TOUGH simulation outputs.
5
5
  Home-page: https://github.com/scarletref/toughanimator
6
- Author: Your Name
7
- Author-email: your.email@example.com
6
+ Author: scarletref
7
+ Author-email: scarletreflection@gmail.com
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
File without changes
File without changes