labfreed 0.0.8__py2.py3-none-any.whl → 0.0.10__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
labfreed/validation.py CHANGED
@@ -1,71 +1,149 @@
1
1
  from pydantic import BaseModel, Field, PrivateAttr
2
2
  from typing import List, Set, Tuple
3
3
 
4
+ from rich import print
5
+ from rich.text import Text
6
+
4
7
 
5
8
  domain_name_pattern = r"(?!-)([A-Za-z0-9-]{1,63}(?<!-)\.)+[A-Za-z]{2,63}"
6
9
  hsegment_pattern = r"[A-Za-z0-9_\-\.~!$&'()+,:;=@]|%[0-9A-Fa-f]{2}"
7
10
 
8
11
 
9
- class ValidationWarning(BaseModel):
12
+ class ValidationMessage(BaseModel):
10
13
  source:str
11
14
  type: str
12
15
  problem_msg:str
13
16
  recommendation_msg: str = ""
14
17
  highlight:str = "" #this can be used to highlight problematic parts
15
18
  highlight_sub:list[str] = Field(default_factory=list())
19
+
20
+ @property
21
+ def emphazised_highlight(self):
22
+ fmt = lambda s: f'[emph]{s}[/emph]'
23
+
24
+ if not self.highlight_sub:
25
+ return fmt(self.highlight)
26
+
27
+ result = []
28
+ for c in self.highlight:
29
+ if c in self.highlight_sub:
30
+ result.append(fmt(c))
31
+ else:
32
+ result.append(c)
33
+
34
+ return ''.join(result)
35
+
36
+
37
+ class LabFREEDValidationError(ValueError):
38
+ def __init__(self, message=None, validation_msgs=None):
39
+ super().__init__(message)
40
+ self._validation_msgs = validation_msgs
41
+
42
+ @property
43
+ def validation_msgs(self):
44
+ return self._validation_msgs
16
45
 
17
46
 
18
47
 
19
48
 
20
- class BaseModelWithWarnings(BaseModel):
49
+ class BaseModelWithValidationMessages(BaseModel):
21
50
  """ Extension of Pydantic BaseModel, so that validator can issue warnings.
22
51
  The purpose of that is to allow only minimal validation but on top check for stricter recommendations"""
23
- _warnings: list[ValidationWarning] = PrivateAttr(default_factory=list)
52
+ _validation_messages: list[ValidationMessage] = PrivateAttr(default_factory=list)
24
53
 
25
- def add_warning(self, *, msg: str, type:str, recommendation:str="", source:str="", highlight_pattern="", highlight_sub=None):
54
+ def add_validation_message(self, *, msg: str, type:str, recommendation:str="", source:str="", highlight_pattern="", highlight_sub=None):
26
55
  if not highlight_sub:
27
56
  highlight_sub = []
28
- w = ValidationWarning(problem_msg=msg, recommendation_msg=recommendation, source=source, type=type, highlight=highlight_pattern, highlight_sub=highlight_sub)
29
- if not w in self._warnings:
30
- self._warnings.append(w)
57
+ w = ValidationMessage(problem_msg=msg, recommendation_msg=recommendation, source=source, type=type, highlight=highlight_pattern, highlight_sub=highlight_sub)
31
58
 
32
- def get_warnings(self) -> list[ValidationWarning]:
33
- return self._warnings
59
+ if not w in self._validation_messages:
60
+ self._validation_messages.append(w)
34
61
 
35
- def clear_warnings(self):
36
- self._warnings.clear()
62
+ def get_validation_messages(self) -> list[ValidationMessage]:
63
+ return self._validation_messages
37
64
 
65
+ def get_errors(self) -> list[ValidationMessage]:
66
+ return filter_errors(self._validation_messages)
38
67
 
39
- # Function to extract warnings from a model and its nested models
40
- def extract_warnings(model: BaseModelWithWarnings, parent_name: str = "", visited: Set[int] = None) -> List[ValidationWarning]:
41
- """
42
- Recursively extract warnings from a Pydantic model and its nested fields.
68
+ def get_warnings(self) -> list[ValidationMessage]:
69
+ return filter_warnings(self._validation_messages)
43
70
 
44
- :param model: The Pydantic model instance to inspect.
45
- :param parent_name: The name of the parent model to track the path.
46
- :return: List of tuples containing (model name, warning message).
47
- """
48
- if visited is None:
49
- visited = set()
50
-
51
- model_id = id(model)
52
- if model_id in visited:
53
- return []
54
- visited.add(model_id)
55
-
56
- warnings_list = [(parent_name or model.__class__.__name__, model_id, warning) for warning in model.get_warnings()]
71
+ def is_valid(self) -> bool:
72
+ return len(filter_errors(self.get_nested_validation_messages())) == 0
73
+
74
+ # Function to extract warnings from a model and its nested models
75
+ def get_nested_validation_messages(self, parent_name: str = "", visited: Set[int] = None) -> List[ValidationMessage]:
76
+ """
77
+ Recursively extract warnings from a Pydantic model and its nested fields.
78
+
79
+ :param model: The Pydantic model instance to inspect.
80
+ :param parent_name: The name of the parent model to track the path.
81
+ :return: List of tuples containing (model name, warning message).
82
+ """
83
+ if visited is None:
84
+ visited = set()
85
+
86
+ model_id = id(self)
87
+ if model_id in visited:
88
+ return []
89
+ visited.add(model_id)
90
+
91
+ warnings_list = [warning for warning in self.get_validation_messages()]
92
+ # warnings_list = [(parent_name or self.__class__.__name__, model_id, warning) for warning in self.get_validation_messages()]
93
+
57
94
 
58
- for field_name, field in model.__fields__.items():
59
- full_path = f"{parent_name}.{field_name}" if parent_name else field_name
60
- value = getattr(model, field_name)
95
+ for field_name, field in self.__fields__.items():
96
+ full_path = f"{parent_name}.{field_name}" if parent_name else field_name
97
+ value = getattr(self, field_name)
61
98
 
62
- if isinstance(value, BaseModelWithWarnings):
63
- warnings_list.extend(extract_warnings(value, full_path, visited))
64
- elif isinstance(value, list):
65
- for index, item in enumerate(value):
66
- if isinstance(item, BaseModelWithWarnings):
67
- list_path = f"{full_path}[{index}]"
68
- warnings_list.extend(extract_warnings(item, list_path, visited))
99
+ if isinstance(value, BaseModelWithValidationMessages):
100
+ warnings_list.extend(value.get_nested_validation_messages(full_path, visited))
101
+ elif isinstance(value, list):
102
+ for index, item in enumerate(value):
103
+ if isinstance(item, BaseModelWithValidationMessages):
104
+ list_path = f"{full_path}[{index}]"
105
+ warnings_list.extend(item.get_nested_validation_messages(list_path, visited))
106
+ return warnings_list
107
+
108
+
109
+ def get_nested_errors(self) -> list[ValidationMessage]:
110
+ return filter_errors(self.get_nested_validation_messages())
111
+
112
+ def get_nested_warnings(self) -> list[ValidationMessage]:
113
+ return filter_warnings(self.get_nested_validation_messages())
114
+
115
+
116
+ def print_validation_messages(self, str_to_highlight_in=None):
117
+ if not str_to_highlight_in:
118
+ str_to_highlight_in = str(self)
119
+ msgs = self.get_nested_validation_messages()
120
+ print('\n'.join(['\n',
121
+ '=======================================',
122
+ 'Validation Results',
123
+ '---------------------------------------'
124
+ ]
125
+ )
126
+ )
127
+
128
+ for m in msgs:
129
+ if m.type.casefold() == "error":
130
+ color = 'red'
131
+ else:
132
+ color = 'yellow'
133
+
134
+ text = Text.from_markup(f'\n [bold {color}]{m.type} [/bold {color}] in \t {m.source}' )
135
+ print(text)
136
+ formatted_highlight = m.emphazised_highlight.replace('emph', f'bold {color}')
137
+ fmtd = str_to_highlight_in.replace(m.highlight, formatted_highlight)
138
+ fmtd = Text.from_markup(fmtd)
139
+ print(fmtd)
140
+ print(Text.from_markup(f'{m.problem_msg}'))
141
+
142
+
143
+
144
+ def filter_errors(val_msg:list[ValidationMessage]) -> list[ValidationMessage]:
145
+ return [ m for m in val_msg if m.type.casefold() == "error" ]
69
146
 
70
- return warnings_list
147
+ def filter_warnings(val_msg:list[ValidationMessage]) -> list[ValidationMessage]:
148
+ return [ m for m in val_msg if m.type.casefold() != "error" ]
71
149
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: labfreed
3
- Version: 0.0.8
3
+ Version: 0.0.10
4
4
  Summary: Python implementation of LabFREED building blocks
5
5
  Author-email: Reto Thürer <thuerer.r@buchi.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,22 @@
1
+ labfreed/__init__.py,sha256=OWHCq64GWIUC0FwQzQWmRekkMBw8RG2IMyQYi-AKqaY,88
2
+ labfreed/parse_pac.py,sha256=HA3-jAnw2crsXMW_D7Tw-z99qnUWL5MBQVXEzdYP2m4,6287
3
+ labfreed/validation.py,sha256=QwkZWJhAjWbPUZtJJwjVYsw9TxeFhdbZaKjrPPIpuAA,5937
4
+ labfreed/DisplayNameExtension/DisplayNameExtension.py,sha256=l9JZY2eRS0V-H5h3-WXIHiiBJuljns-_e_t9Bp84_CU,1155
5
+ labfreed/PAC_CAT/__init__.py,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
6
+ labfreed/PAC_CAT/data_model copy.py,sha256=JWMVkwkX9vWZayOLOzdTHk3VZVYBuyupumNqL-cWCxU,9611
7
+ labfreed/PAC_CAT/data_model.py,sha256=pcib1lEQuqejWP7dfmPUtLakz-y-zeDb9CIe94Jmz0A,13677
8
+ labfreed/PAC_ID/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ labfreed/PAC_ID/data_model.py,sha256=g09qgC-TV6fjJw9VyDF6mTJ6co2i2RKZc0Z-BmiiUIQ,7483
10
+ labfreed/PAC_ID/extensions.py,sha256=bvuZnlNKUdwsDLrPm8fyifqPn_PR4wCVkkScFnvRiuM,1158
11
+ labfreed/TREX/UneceUnits.json,sha256=kwfQSp_nTuWbADfBBgqTWrvPl6XtM5SedEVLbMJrM7M,898953
12
+ labfreed/TREX/data_model.py,sha256=_xhnYGMcMPa0uf_020epq88zqHT1wdsUPC2ELJcSRWE,29684
13
+ labfreed/TREX/parse.py,sha256=86962VEJpkrTcT436iFIB5dNed5WHABzpjxRjkA3PXo,2043
14
+ labfreed/TREX/unece_units.py,sha256=scPKdsPzY1neAdFOhA08_tRZaR-yplM8mBhIzzDqZBk,3006
15
+ labfreed/utilities/base36.py,sha256=_yX8aQ1OwrK5tnJU1NUEzQSFGr9xAVnNvPObpNzCPYs,2895
16
+ labfreed/utilities/extension_intertpreters.py,sha256=B3IFJLfVMJQuPfBBtX6ywlDUZEi7_x6tY4g8V7SpWSs,124
17
+ labfreed/utilities/utility_types.py,sha256=Zhk8Mu4hHjkn1gs8oh7vOxxaT7L7wLMVG40ZOWCKGK4,2865
18
+ labfreed/utilities/well_known_keys.py,sha256=nqk66kHdSwJTJfMKlP-xQbBglS8F_NoWsGkfOVITFN0,331
19
+ labfreed-0.0.10.dist-info/licenses/LICENSE,sha256=gHFOv9FRKHxO8cInP3YXyPoJnuNeqrvcHjaE_wPSsQ8,1100
20
+ labfreed-0.0.10.dist-info/WHEEL,sha256=BXjIu84EnBiZ4HkNUBN93Hamt5EPQMQ6VkF7-VZ_Pu0,100
21
+ labfreed-0.0.10.dist-info/METADATA,sha256=ukautezUMEqYcU-eqihpy3uyTiBIRqg8PCVc_NSAJ1c,207
22
+ labfreed-0.0.10.dist-info/RECORD,,
labfreed/PAC_ID/parse.py DELETED
@@ -1,142 +0,0 @@
1
-
2
-
3
- import re
4
- from types import MappingProxyType
5
- from .data_model import *
6
-
7
- from ..validation import extract_warnings, ValidationWarning
8
-
9
-
10
- category_conventions = MappingProxyType(
11
- {
12
- '-MD': ['240', '21'],
13
- '-MS': ['240', '10', '20', '21', '250'],
14
- '-MC': ['240', '10', '20', '21', '250'],
15
- '-MM': ['240', '10', '20', '21', '250']
16
- }
17
- )
18
-
19
-
20
- extension_convention = MappingProxyType(
21
- {
22
- 0: { 'name': 'N', 'type': 'N'},
23
- 1: { 'name': 'SUM', 'type': 'TREX'}
24
- }
25
- )
26
-
27
-
28
-
29
- class PAC_Parser():
30
-
31
- def __init__(self, extension_interpreters:dict[str, Extension]=None):
32
- self.extension_interpreters = extension_interpreters or {}
33
-
34
- def parse_pac_url(self, pac_url:str) -> tuple[PACID_With_Extensions, list[ValidationWarning] ]:
35
- if '*' in pac_url:
36
- id_str, ext_str = pac_url.split('*', 1)
37
- else:
38
- id_str = pac_url
39
- ext_str = ""
40
-
41
- pac_id = self.parse_pac_id(id_str)
42
- extensions = self.parse_extensions(ext_str)
43
-
44
- pac_with_extension = PACID_With_Extensions(pac_id=pac_id, extensions=extensions)
45
- warnings = extract_warnings(pac_with_extension)
46
-
47
- return pac_with_extension, warnings
48
-
49
-
50
- def parse_id_segments(self, identifier:str):
51
- if not identifier:
52
- return []
53
-
54
- id_segments = list()
55
- if len(identifier) > 0 and identifier[0] == '/':
56
- identifier = identifier[1:]
57
- for s in identifier.split('/'):
58
- tmp = s.split(':')
59
-
60
- if len(tmp) == 1:
61
- segment = IDSegment(value=tmp[0])
62
- elif len(tmp) == 2:
63
- segment = IDSegment(key=tmp[0], value=tmp[1])
64
- else:
65
- raise ValueError(f'invalid segment: {s}')
66
-
67
- id_segments.append(segment)
68
- return id_segments
69
-
70
-
71
- def _apply_category_defaults(self, segments_in: list[IDSegment]):
72
-
73
- segments = segments_in.copy()
74
- default_keys = None
75
- for s in segments:
76
- if not s.key and default_keys:
77
- s.key = default_keys.pop(0)
78
- else:
79
- default_keys = None
80
-
81
- # category starts: start with new defaults.
82
- if s.value in category_conventions.keys():
83
- default_keys = category_conventions.get(s.value).copy() #copy, so the entries can be popped when used
84
- return segments
85
-
86
-
87
-
88
- def parse_pac_id(self,id_str:str) -> PACID:
89
- m = re.match(f'(HTTPS://)?(PAC.)?(?P<issuer>.+?\..+?)/(?P<identifier>.*)', id_str)
90
- d = m.groupdict()
91
-
92
- id_segments = list()
93
- default_keys = None
94
- id_segments = self.parse_id_segments(d.get('identifier'))
95
- id_segments = self._apply_category_defaults(id_segments)
96
-
97
- pac = PACID(issuer= d.get('issuer'),
98
- identifier=Identifier(segments=id_segments)
99
- )
100
- return pac
101
-
102
-
103
- def parse_extensions(self, extensions_str:str|None) -> list[Extension]:
104
- extensions = list()
105
-
106
- if not extensions_str:
107
- return extensions
108
-
109
- defaults = extension_convention
110
- for i, e in enumerate(extensions_str.split('*')):
111
- if e == '': #this will happen if first extension starts with *
112
- continue
113
- d = re.match('((?P<name>.+)\$(?P<type>.+)/)?(?P<data>.+)', e).groupdict()
114
-
115
- name = d.get('name')
116
- type = d.get('type')
117
- data = d.get('data')
118
-
119
- if name:
120
- defaults = None # once a name was specified no longer assign defaults
121
- else:
122
- if defaults:
123
- name = defaults.get(i).get('name')
124
- type = defaults.get(i).get('type')
125
- else:
126
- raise ValueError('extension number {i}, must have name and type')
127
-
128
- #convert to subtype if they were given
129
- subtype = self.extension_interpreters.get(type) or UnknownExtension
130
- e = subtype.from_spec_fields(name=name, type=type, data=data)
131
- extensions.append(e)
132
-
133
- return extensions
134
-
135
-
136
-
137
-
138
-
139
- if __name__ == "__main__":
140
- pacid_str = 'HTTPS://PAC.METTORIUS.COM/-DR/AB378/-MD/B-500/1235/-MS/AB/X:88/WWW/-MS/240:11/BB*ABCFD*A$HUR:25+B$CEL:99*BLUBB$TREX/A$HUR:25+B$CEL:99'
141
-
142
- pac = PAC_Parser().parse_pac(pacid_str)
@@ -1,60 +0,0 @@
1
-
2
- from .data_model import *
3
-
4
-
5
-
6
- class PAC_Serializer():
7
- def to_url(self, pac:PACID|PACID_With_Extensions, extensions:list[Extension]=None, use_short_notation_for_extensions=False, uppercase_only=False) -> str:
8
- if isinstance(pac, PACID_With_Extensions):
9
- if extensions:
10
- raise ValueError('Extensions were given twice, as part of PACID_With_Extension and as method parameter.')
11
- extensions = pac.extensions
12
- pac = pac.pac_id
13
- issuer = pac.issuer
14
- extensions_str = self._serialize_extensions(extensions, use_short_notation_for_extensions)
15
- id_segments = self._serialize_id_segments(pac.identifier.segments)
16
- out = f"HTTPS://PAC.{issuer}{id_segments}{extensions_str}"
17
- if uppercase_only:
18
- out = out.upper()
19
- return out
20
-
21
-
22
- def _serialize_id_segments(self, segments):
23
- out = ''
24
- for s in segments:
25
- if s.key:
26
- out += f'/{s.key}:{s.value}'
27
- else:
28
- out += f'/{s.value}'
29
- return out
30
-
31
-
32
- def _serialize_extensions(self, extensions:list[Extension], use_short_notation_for_extensions):
33
- out = ''
34
- short_notation = use_short_notation_for_extensions
35
- for i, e in enumerate(extensions):
36
-
37
- if short_notation and i==0:
38
- if e.name=='N':
39
- out += f'*{e.data}'
40
- continue
41
- else:
42
- short_notation = False
43
- if short_notation and i==1:
44
- if e.name=='SUM':
45
- out += f'*{e.data}'
46
- continue
47
- else:
48
- short_notation = False
49
-
50
- out += f'*{e.name}${e.type}/{e.data}'
51
- return out
52
-
53
-
54
-
55
- def main():
56
- pass
57
-
58
-
59
- if __name__ == "__main__":
60
- main()
@@ -1,239 +0,0 @@
1
- from datetime import datetime
2
- from enum import Enum
3
- import logging
4
- import re
5
-
6
- from pydantic import BaseModel, ValidationError, field_validator
7
- from abc import ABC
8
-
9
- from .unit_utilities import *
10
- from ..PAC_ID.data_model import Extension
11
-
12
- re_table_pattern = re.compile(f"(?P<tablename>[\w\.-]*?)\$\$(?P<header>[\w\.,\$:]*?)::(?P<body>.*)")
13
- re_col_head_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*)")
14
- re_scalar_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*?):(?P<value>.*)")
15
-
16
- TREX_DATEFORMAT = '%Y%m%dT%H%M%S'
17
- TREX_TIMEFORMAT = '%Y%m%d'
18
-
19
- class TREX_types(Enum):
20
- BOOL = 'T.B'
21
- DATE = 'T.D'
22
- TEXT = 'T.A'
23
- ERROR = 'E'
24
-
25
-
26
-
27
- class T_REX_Segment_ParseError(BaseException):
28
- pass
29
-
30
-
31
- class TREX_Segment(BaseModel, ABC):
32
- segment_name: str = None
33
-
34
- def as_trex_segment_str(self, segment_name):
35
- pass
36
-
37
-
38
- class TREX_SimpleSegment(TREX_Segment):
39
- type: str
40
- value: str
41
-
42
- @field_validator('type', mode='before')
43
- def validate_type(t):
44
- if isinstance(t, TREX_types):
45
- t = t.value
46
- return t
47
-
48
- @staticmethod
49
- def from_trex_segmentstring(segment_str):
50
-
51
- matches = re_scalar_pattern.match(segment_str)
52
- if not matches:
53
- raise T_REX_Segment_ParseError("Segment is not a valid TREX Scalar")
54
-
55
- name, type_, value = matches.groups()
56
-
57
- out = TREX_SimpleSegment(type=type_, value=value, segment_name=name)
58
- return out
59
-
60
- @property
61
- def value_as_builtin_or_quantity_type(self) -> datetime|bool|str|PydanticUncertainQuantity:
62
- return _value_as_builtin_or_quantity(self.value, self.type)
63
-
64
- def as_trex_segment_str(self, segment_name) -> str:
65
- return f'{segment_name}${self.type}:{self.value}'
66
-
67
-
68
- class TREX_Table(TREX_Segment):
69
- col_names: list[str]
70
- col_types: list[str]
71
- data: list[list[str]]
72
-
73
- @staticmethod
74
- def from_trex_segmentstring( segment_str:str):
75
- matches = re_table_pattern.match(segment_str)
76
- if not matches:
77
- raise T_REX_Segment_ParseError(f"Segment is not a valid TREX table: {segment_str}")
78
- name, header, body = matches.groups()
79
-
80
- column_heads = [re_col_head_pattern.match(colhead).groups() for colhead in header.split(':')]
81
- col_names = [ch[0] for ch in column_heads]
82
- col_types = [ch[1] for ch in column_heads]
83
-
84
- data = [row.split(':') for row in body.split('::') ]
85
-
86
- out = TREX_Table(col_names=col_names, col_types=col_types, data=data, segment_name=name)
87
- return out
88
-
89
- def n_rows(self) -> int:
90
- return len(self.data)
91
-
92
- def n_cols(self) -> int:
93
- return len(self.col_names)
94
-
95
- def row_data(self, row:int) -> list:
96
- out = [_value_as_builtin_or_quantity(element, self.col_types[i]) for i, element in enumerate(self.data)]
97
- return out
98
-
99
- def col_data(self, col:str|int) -> list:
100
- col_index = self._get_col_index(col)
101
- type = self.col_types[col_index]
102
- out = [_value_as_builtin_or_quantity(row[col_index],type) for row in self.data]
103
- return out
104
-
105
- def cell_data(self, row:int, col:str|int):
106
- try:
107
- col_index = self._get_col_index(col)
108
- value = self.data[row][col_index]
109
- type = self.col_types[col_index]
110
- except ValueError:
111
- logging.warning(f"row {row}, column {col} not found")
112
- return None
113
-
114
- return _value_as_builtin_or_quantity(value, type)
115
-
116
- def _get_col_index(self, col:str|int):
117
- if isinstance(col, str):
118
- col_index = self.col_names.index(col)
119
- elif isinstance(col, int):
120
- col_index = col
121
- else:
122
- raise TypeError(f"Column must be specified as string or int: {col.__name__}")
123
- return col_index
124
-
125
- def as_trex_segment_str(self, name):
126
- header = ':'.join([f'{el[0]}${el[1]}' for el in zip(self.col_names, self.col_types)])
127
- date_rows = list()
128
- for r in self.data:
129
- row = ':'.join([str(cell) for cell in r])
130
- date_rows.append(row)
131
- data = '::'.join(date_rows)
132
- s = f'{name}$${header}::{data}'
133
- return s
134
-
135
-
136
-
137
- class TREX(Extension, BaseModel):
138
- name_:str
139
- segments: dict[str,TREX_Segment]
140
-
141
- @property
142
- def name(self)->str:
143
- return self.name_
144
-
145
- @property
146
- def type(self)->str:
147
- return 'TREX'
148
-
149
- @property
150
- def data(self)->str:
151
- seg_strings = list()
152
- for s_name, s in self.segments.items():
153
- seg_strings.append(s.as_trex_segment_str(s_name))
154
- s_out = '+'.join(seg_strings)
155
- return s_out
156
-
157
- @staticmethod
158
- def from_spec_fields(name, type, data):
159
- if type != 'TREX':
160
- logging.warning(f'Type {name} was given, but this extension should only be used with type "TREX". Will try to parse data as TREX')
161
-
162
- if not data:
163
- raise ValueError(f'T-REX must be a string of non zero length')
164
-
165
- trex_str = data
166
-
167
- # remove extension indicator. Precaution in case it is not done yet
168
- if trex_str[0]=="*":
169
- trex_str=trex_str[1:-1]
170
- # remove line breaks. for editing T-REXes it's more convenient to have them in, so one never knows
171
- trex_str = re.sub(r"\s+", "", trex_str)
172
-
173
- segment_strings = trex_str.split('+')
174
- out_segments = dict()
175
- for s in segment_strings:
176
- # there are only two valid options. The segment is a scalar or a table.
177
- # Constructors do the parsing anyways and raise exceptions if invalid data
178
- # try both options and then let it fail
179
- try:
180
- segment = TREX_SimpleSegment.from_trex_segmentstring(s)
181
- except T_REX_Segment_ParseError:
182
- segment = TREX_Table.from_trex_segmentstring(s)
183
- out_segments[segment.segment_name] = segment
184
-
185
- return TREX(name_=name, segments=out_segments)
186
-
187
- def get_segment(self, segment_id:str) -> TREX_Segment:
188
- return self.segments.get(segment_id)
189
-
190
-
191
-
192
-
193
- class TREX_Struct(TREX_Segment):
194
- """Struct is a special interpretation of a T-REX Table with one row"""
195
- wrapped_table:TREX_Table
196
-
197
- @property
198
- def segment_name_(self):
199
- return self.wrapped_table.segment_name
200
-
201
- @field_validator('wrapped_table')
202
- def validate_table(table):
203
- if len(table.data) != 1:
204
- raise ValidationError("Too many input rows. Struct can only have one row")
205
- return table
206
-
207
- def get(self, key):
208
- return self.wrapped_table.cell_data(0, key)
209
-
210
- def keys(self):
211
- return self.wrapped_table.col_names
212
-
213
-
214
-
215
-
216
- def _to_datetime(trex_datetime):
217
- try:
218
- # return datetime.fromisoformat(trex_datetime) # should work with python 3.11
219
- return datetime.strptime(trex_datetime, TREX_DATEFORMAT)
220
- except (ValueError , TypeError) as e:
221
- try:
222
- return datetime.strptime(trex_datetime, TREX_TIMEFORMAT)
223
- except (ValueError, TypeError):
224
- return None
225
-
226
- def _value_as_builtin_or_quantity(v:str|list[str], type:str) -> datetime|bool|str|PydanticUncertainQuantity:
227
- match type:
228
- case 'T.D':
229
- return _to_datetime(v)
230
- case 'T.B':
231
- return v == 'T' or bool(v)
232
- case 'T.A':
233
- return v
234
- case 'T.X':
235
- raise NotImplementedError("Base36 encoded T-REX segment not implemented")
236
- case 'E':
237
- return v
238
- case _:
239
- return quantity_from_UN_CEFACT(v, type)