labfreed 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of labfreed might be problematic. Click here for more details.

Files changed (44) hide show
  1. labfreed/__init__.py +11 -11
  2. labfreed/labfreed_infrastructure.py +258 -258
  3. labfreed/pac_cat/__init__.py +19 -19
  4. labfreed/pac_cat/category_base.py +51 -51
  5. labfreed/pac_cat/pac_cat.py +150 -150
  6. labfreed/pac_cat/predefined_categories.py +200 -200
  7. labfreed/pac_id/__init__.py +19 -19
  8. labfreed/pac_id/extension.py +48 -48
  9. labfreed/pac_id/id_segment.py +89 -89
  10. labfreed/pac_id/pac_id.py +140 -140
  11. labfreed/pac_id/url_parser.py +155 -155
  12. labfreed/pac_id/url_serializer.py +85 -84
  13. labfreed/pac_id_resolver/__init__.py +2 -2
  14. labfreed/pac_id_resolver/cit_common.py +81 -81
  15. labfreed/pac_id_resolver/cit_v1.py +244 -244
  16. labfreed/pac_id_resolver/cit_v2.py +313 -313
  17. labfreed/pac_id_resolver/resolver.py +97 -97
  18. labfreed/pac_id_resolver/services.py +82 -82
  19. labfreed/qr/__init__.py +1 -1
  20. labfreed/qr/generate_qr.py +422 -422
  21. labfreed/trex/__init__.py +16 -16
  22. labfreed/trex/python_convenience/__init__.py +3 -3
  23. labfreed/trex/python_convenience/data_table.py +87 -87
  24. labfreed/trex/python_convenience/pyTREX.py +248 -248
  25. labfreed/trex/python_convenience/quantity.py +66 -66
  26. labfreed/trex/table_segment.py +245 -245
  27. labfreed/trex/trex.py +69 -69
  28. labfreed/trex/trex_base_models.py +209 -209
  29. labfreed/trex/value_segments.py +99 -99
  30. labfreed/utilities/base36.py +82 -82
  31. labfreed/well_known_extensions/__init__.py +4 -4
  32. labfreed/well_known_extensions/default_extension_interpreters.py +6 -6
  33. labfreed/well_known_extensions/display_name_extension.py +40 -40
  34. labfreed/well_known_extensions/trex_extension.py +30 -30
  35. labfreed/well_known_keys/gs1/__init__.py +5 -5
  36. labfreed/well_known_keys/gs1/gs1.py +3 -3
  37. labfreed/well_known_keys/labfreed/well_known_keys.py +15 -15
  38. labfreed/well_known_keys/unece/__init__.py +3 -3
  39. labfreed/well_known_keys/unece/unece_units.py +67 -67
  40. {labfreed-0.2.8.dist-info → labfreed-0.2.9.dist-info}/METADATA +11 -8
  41. labfreed-0.2.9.dist-info/RECORD +45 -0
  42. {labfreed-0.2.8.dist-info → labfreed-0.2.9.dist-info}/licenses/LICENSE +21 -21
  43. labfreed-0.2.8.dist-info/RECORD +0 -45
  44. {labfreed-0.2.8.dist-info → labfreed-0.2.9.dist-info}/WHEEL +0 -0
@@ -1,245 +1,245 @@
1
-
2
-
3
- from collections import Counter
4
- import logging
5
- import re
6
-
7
- from pydantic import RootModel, model_validator
8
- from labfreed.trex.trex_base_models import Value
9
- from labfreed.well_known_keys.unece.unece_units import unece_unit_codes
10
- from labfreed.labfreed_infrastructure import LabFREED_BaseModel, ValidationMsgLevel, _quote_texts
11
- from labfreed.trex.trex_base_models import AlphanumericValue, BinaryValue, BoolValue, DateValue, ErrorValue, NumericValue, TREX_Segment, TextValue
12
-
13
-
14
- class ColumnHeader(LabFREED_BaseModel):
15
- '''Header of a table Column'''
16
- key:str
17
- type:str
18
-
19
- @model_validator(mode='after')
20
- def _validate_key(self):
21
- if not_allowed_chars := set(re.sub(r'[A-Z0-9\.-]', '', self.key)):
22
- self._add_validation_message(
23
- source=f"TREX table column {self.key}",
24
- level= ValidationMsgLevel.ERROR,
25
- msg=f"Column header key contains invalid characters: {_quote_texts(not_allowed_chars)}",
26
- highlight_pattern = f'{self.key}$',
27
- highlight_sub=not_allowed_chars
28
- )
29
- return self
30
-
31
- @model_validator(mode='after')
32
- def _validate_type(self):
33
- valid_types = unece_unit_codes() + ['T.D', 'T.B', 'T.A', 'T.T', 'T.X', 'E']
34
- if self.type not in valid_types:
35
- self._add_validation_message(
36
- source=f"TREX table column {self.key}",
37
- level= ValidationMsgLevel.ERROR,
38
- msg=f"Type '{self.type}' is invalid. Must be 'T.D', 'T.B', 'T.A', 'T.T', 'T.X', 'E' or a UNECE unit",
39
- highlight_pattern = self.type
40
- )
41
- return self
42
-
43
- class TableRow(RootModel[list[Value]]):
44
- """
45
- Represents a row in a table.
46
-
47
- This class is a Pydantic RootModel that wraps a `list[ValueMixin]`.
48
- Each element in the list corresponds to a cell in the row.
49
-
50
- All common list operations (indexing, iteration, append, pop, etc.) are supported.
51
- Internally, it wraps a list in the `.root` attribute.
52
- """
53
- def serialize(self):
54
- return ':'.join([e.serialize() for e in self.root])
55
-
56
- def __len__(self):
57
- return len(self.root)
58
-
59
- def __iter__(self):
60
- return iter(self.root)
61
-
62
- def __repr__(self):
63
- return f"TableRow({self.root!r}) # wraps list[{Value.__name__}]"
64
-
65
-
66
- class TableSegment(TREX_Segment):
67
- '''TREX Segment which represents tabular data'''
68
- key:str
69
- column_headers: list[ColumnHeader]
70
- data: list[TableRow]
71
-
72
- @property
73
- def column_names(self):
74
- return [h.key for h in self.column_headers]
75
-
76
- @property
77
- def column_types(self):
78
- return [h.type for h in self.column_headers]
79
-
80
- @model_validator(mode='after')
81
- def _validate_sizes(self):
82
- sizes = [len(self.column_headers)]
83
- sizes.extend( [ len(row) for row in self.data ] )
84
- most_common_len, count = Counter(sizes).most_common(1)[0]
85
-
86
- if len(self.column_headers) != most_common_len:
87
- self._add_validation_message(
88
- source=f"Table {self.key}",
89
- level= ValidationMsgLevel.ERROR,
90
- msg=f"Size mismatch: Table header contains {self.column_names} keys, while most rows have {most_common_len}",
91
- highlight_pattern = self.key
92
- )
93
- expected_row_len = most_common_len
94
- else:
95
- expected_row_len = len(self.column_headers)
96
-
97
-
98
- for i, row in enumerate(self.data):
99
- if len(row) != expected_row_len:
100
- self._add_validation_message(
101
- source=f"Table {self.key}",
102
- level= ValidationMsgLevel.ERROR,
103
- msg=f"Size mismatch: Table row {i} contains {len(row)} elements. Expected size is {expected_row_len}",
104
- highlight_pattern = row.serialize()
105
- )
106
- return self
107
-
108
- @model_validator(mode='after')
109
- def _validate_data_types(self):
110
- expected_types = self.column_types
111
- i = 0
112
- for row in self.data:
113
- for e, t_expected, nm in zip(row, expected_types, self.column_names):
114
- try:
115
- match t_expected:
116
- case 'T.D':
117
- assert isinstance(e, DateValue)
118
- case 'T.B':
119
- assert isinstance(e, BoolValue)
120
- case 'T.A':
121
- assert isinstance(e, AlphanumericValue)
122
- case 'T.T':
123
- assert isinstance(e, TextValue)
124
- case 'T.X':
125
- assert isinstance(e, BinaryValue)
126
- case 'E':
127
- assert isinstance(e, ErrorValue)
128
- case _:
129
- assert isinstance(e, NumericValue)
130
- except AssertionError:
131
- self._add_validation_message(
132
- source=f"Table {self.key}",
133
- level= ValidationMsgLevel.ERROR,
134
- msg=f"Type mismatch: Table row {i}, column {nm} is of wrong type. According to the header it should be {t_expected}",
135
- highlight_pattern = row.serialize(),
136
- highlight_sub=[c for c in e.value]
137
- )
138
-
139
- if msg := e.errors():
140
- for m in msg:
141
- self._add_validation_message(
142
- source=f"Table {self.key}",
143
- level= ValidationMsgLevel.ERROR,
144
- msg=m.msg,
145
- highlight_pattern = row.serialize(),
146
- highlight_sub=[c for c in e.value]
147
- )
148
- i += 1
149
- return self
150
-
151
-
152
- def _get_col_index(self, col:str|int):
153
- if isinstance(col, str):
154
- col_index = self.column_names.index(col)
155
- elif isinstance(col, int):
156
- col_index = col
157
- else:
158
- raise TypeError(f"Column must be specified as string or int: {col.__name__}")
159
- return col_index
160
-
161
-
162
-
163
- def serialize(self):
164
- header = ':'.join([f'{h.key}${h.type}' for h in self.column_headers])
165
- data = '::'.join([r.serialize() for r in self.data])
166
- s = f'{self.key}$${header}::{data}'
167
- return s
168
-
169
-
170
- def n_rows(self) -> int:
171
- return len(self.data)
172
-
173
- def n_cols(self) -> int:
174
- return len(self.column_headers)
175
-
176
- def row_data(self, row:int) -> list:
177
- out = self.data[row]
178
- return out
179
-
180
-
181
- def column_data(self, col:str|int) -> list:
182
- col_index = self._get_col_index(col)
183
- out = [row[col_index] for row in self.data]
184
- return out
185
-
186
-
187
- def cell_data(self, row:int, col:str|int):
188
- try:
189
- col_index = self._get_col_index(col)
190
- value = self.data[row][col_index]
191
- except ValueError:
192
- logging.warning(f"row {row}, column {col} not found")
193
- return None
194
- return value
195
-
196
-
197
-
198
-
199
-
200
-
201
- def _deserialize_table_segment_from_trex_segment_str(trex_segment_str) -> TableSegment:
202
- # re_table_pattern = re.compile(f"(?P<tablename>[\w\.-]*?)\$\$(?P<header>[\w\.,\$:]*?)::(?P<body>.*)")
203
- # re_col_head_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*)")
204
- re_table_pattern = re.compile(r"(?P<tablename>.+?)\$\$(?P<header>.+?)::(?P<body>.+)")
205
-
206
- matches = re_table_pattern.match(trex_segment_str)
207
- if not matches:
208
- return None
209
- name, header, body = matches.groups()
210
-
211
- column_headers_str = header.split(':')
212
-
213
- headers = []
214
- for colum_header in column_headers_str:
215
- ch = colum_header.split('$')
216
- col_key = ch[0]
217
- col_type = ch[1] if len(ch) > 1 else ''
218
- headers.append(ColumnHeader(key=col_key, type=col_type))
219
-
220
- data = [row.split(':') for row in body.split('::') ]
221
- # convert to correct value types
222
- data_with_types = [[_str_to_value_type(h.type, cv) for cv, h in zip(r, headers)] for r in data]
223
- data = [ TableRow(r) for r in data_with_types]
224
-
225
- out = TableSegment(column_headers=headers, data=data, key=name)
226
- return out
227
-
228
- def _str_to_value_type(type_, s):
229
- match type_:
230
- case 'T.D':
231
- out = DateValue(value=s)
232
- case 'T.B':
233
- out = BoolValue(value=s)
234
- case 'T.A':
235
- out = AlphanumericValue(value=s)
236
- case 'T.T':
237
- out = TextValue(value=s)
238
- case 'T.X':
239
- out = BinaryValue(value=s)
240
- case 'E':
241
- out = ErrorValue(value=s)
242
- case _:
243
- out = NumericValue(value=s)
244
- return out
245
-
1
+
2
+
3
+ from collections import Counter
4
+ import logging
5
+ import re
6
+
7
+ from pydantic import RootModel, model_validator
8
+ from labfreed.trex.trex_base_models import Value
9
+ from labfreed.well_known_keys.unece.unece_units import unece_unit_codes
10
+ from labfreed.labfreed_infrastructure import LabFREED_BaseModel, ValidationMsgLevel, _quote_texts
11
+ from labfreed.trex.trex_base_models import AlphanumericValue, BinaryValue, BoolValue, DateValue, ErrorValue, NumericValue, TREX_Segment, TextValue
12
+
13
+
14
+ class ColumnHeader(LabFREED_BaseModel):
15
+ '''Header of a table Column'''
16
+ key:str
17
+ type:str
18
+
19
+ @model_validator(mode='after')
20
+ def _validate_key(self):
21
+ if not_allowed_chars := set(re.sub(r'[A-Z0-9\.-]', '', self.key)):
22
+ self._add_validation_message(
23
+ source=f"TREX table column {self.key}",
24
+ level= ValidationMsgLevel.ERROR,
25
+ msg=f"Column header key contains invalid characters: {_quote_texts(not_allowed_chars)}",
26
+ highlight_pattern = f'{self.key}$',
27
+ highlight_sub=not_allowed_chars
28
+ )
29
+ return self
30
+
31
+ @model_validator(mode='after')
32
+ def _validate_type(self):
33
+ valid_types = unece_unit_codes() + ['T.D', 'T.B', 'T.A', 'T.T', 'T.X', 'E']
34
+ if self.type not in valid_types:
35
+ self._add_validation_message(
36
+ source=f"TREX table column {self.key}",
37
+ level= ValidationMsgLevel.ERROR,
38
+ msg=f"Type '{self.type}' is invalid. Must be 'T.D', 'T.B', 'T.A', 'T.T', 'T.X', 'E' or a UNECE unit",
39
+ highlight_pattern = self.type
40
+ )
41
+ return self
42
+
43
+ class TableRow(RootModel[list[Value]]):
44
+ """
45
+ Represents a row in a table.
46
+
47
+ This class is a Pydantic RootModel that wraps a `list[ValueMixin]`.
48
+ Each element in the list corresponds to a cell in the row.
49
+
50
+ All common list operations (indexing, iteration, append, pop, etc.) are supported.
51
+ Internally, it wraps a list in the `.root` attribute.
52
+ """
53
+ def serialize(self):
54
+ return ':'.join([e.serialize() for e in self.root])
55
+
56
+ def __len__(self):
57
+ return len(self.root)
58
+
59
+ def __iter__(self):
60
+ return iter(self.root)
61
+
62
+ def __repr__(self):
63
+ return f"TableRow({self.root!r}) # wraps list[{Value.__name__}]"
64
+
65
+
66
+ class TableSegment(TREX_Segment):
67
+ '''TREX Segment which represents tabular data'''
68
+ key:str
69
+ column_headers: list[ColumnHeader]
70
+ data: list[TableRow]
71
+
72
+ @property
73
+ def column_names(self):
74
+ return [h.key for h in self.column_headers]
75
+
76
+ @property
77
+ def column_types(self):
78
+ return [h.type for h in self.column_headers]
79
+
80
+ @model_validator(mode='after')
81
+ def _validate_sizes(self):
82
+ sizes = [len(self.column_headers)]
83
+ sizes.extend( [ len(row) for row in self.data ] )
84
+ most_common_len, count = Counter(sizes).most_common(1)[0]
85
+
86
+ if len(self.column_headers) != most_common_len:
87
+ self._add_validation_message(
88
+ source=f"Table {self.key}",
89
+ level= ValidationMsgLevel.ERROR,
90
+ msg=f"Size mismatch: Table header contains {self.column_names} keys, while most rows have {most_common_len}",
91
+ highlight_pattern = self.key
92
+ )
93
+ expected_row_len = most_common_len
94
+ else:
95
+ expected_row_len = len(self.column_headers)
96
+
97
+
98
+ for i, row in enumerate(self.data):
99
+ if len(row) != expected_row_len:
100
+ self._add_validation_message(
101
+ source=f"Table {self.key}",
102
+ level= ValidationMsgLevel.ERROR,
103
+ msg=f"Size mismatch: Table row {i} contains {len(row)} elements. Expected size is {expected_row_len}",
104
+ highlight_pattern = row.serialize()
105
+ )
106
+ return self
107
+
108
+ @model_validator(mode='after')
109
+ def _validate_data_types(self):
110
+ expected_types = self.column_types
111
+ i = 0
112
+ for row in self.data:
113
+ for e, t_expected, nm in zip(row, expected_types, self.column_names):
114
+ try:
115
+ match t_expected:
116
+ case 'T.D':
117
+ assert isinstance(e, DateValue)
118
+ case 'T.B':
119
+ assert isinstance(e, BoolValue)
120
+ case 'T.A':
121
+ assert isinstance(e, AlphanumericValue)
122
+ case 'T.T':
123
+ assert isinstance(e, TextValue)
124
+ case 'T.X':
125
+ assert isinstance(e, BinaryValue)
126
+ case 'E':
127
+ assert isinstance(e, ErrorValue)
128
+ case _:
129
+ assert isinstance(e, NumericValue)
130
+ except AssertionError:
131
+ self._add_validation_message(
132
+ source=f"Table {self.key}",
133
+ level= ValidationMsgLevel.ERROR,
134
+ msg=f"Type mismatch: Table row {i}, column {nm} is of wrong type. According to the header it should be {t_expected}",
135
+ highlight_pattern = row.serialize(),
136
+ highlight_sub=[c for c in e.value]
137
+ )
138
+
139
+ if msg := e.errors():
140
+ for m in msg:
141
+ self._add_validation_message(
142
+ source=f"Table {self.key}",
143
+ level= ValidationMsgLevel.ERROR,
144
+ msg=m.msg,
145
+ highlight_pattern = row.serialize(),
146
+ highlight_sub=[c for c in e.value]
147
+ )
148
+ i += 1
149
+ return self
150
+
151
+
152
+ def _get_col_index(self, col:str|int):
153
+ if isinstance(col, str):
154
+ col_index = self.column_names.index(col)
155
+ elif isinstance(col, int):
156
+ col_index = col
157
+ else:
158
+ raise TypeError(f"Column must be specified as string or int: {col.__name__}")
159
+ return col_index
160
+
161
+
162
+
163
+ def serialize(self):
164
+ header = ':'.join([f'{h.key}${h.type}' for h in self.column_headers])
165
+ data = '::'.join([r.serialize() for r in self.data])
166
+ s = f'{self.key}$${header}::{data}'
167
+ return s
168
+
169
+
170
+ def n_rows(self) -> int:
171
+ return len(self.data)
172
+
173
+ def n_cols(self) -> int:
174
+ return len(self.column_headers)
175
+
176
+ def row_data(self, row:int) -> list:
177
+ out = self.data[row]
178
+ return out
179
+
180
+
181
+ def column_data(self, col:str|int) -> list:
182
+ col_index = self._get_col_index(col)
183
+ out = [row[col_index] for row in self.data]
184
+ return out
185
+
186
+
187
+ def cell_data(self, row:int, col:str|int):
188
+ try:
189
+ col_index = self._get_col_index(col)
190
+ value = self.data[row][col_index]
191
+ except ValueError:
192
+ logging.warning(f"row {row}, column {col} not found")
193
+ return None
194
+ return value
195
+
196
+
197
+
198
+
199
+
200
+
201
+ def _deserialize_table_segment_from_trex_segment_str(trex_segment_str) -> TableSegment:
202
+ # re_table_pattern = re.compile(f"(?P<tablename>[\w\.-]*?)\$\$(?P<header>[\w\.,\$:]*?)::(?P<body>.*)")
203
+ # re_col_head_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*)")
204
+ re_table_pattern = re.compile(r"(?P<tablename>.+?)\$\$(?P<header>.+?)::(?P<body>.+)")
205
+
206
+ matches = re_table_pattern.match(trex_segment_str)
207
+ if not matches:
208
+ return None
209
+ name, header, body = matches.groups()
210
+
211
+ column_headers_str = header.split(':')
212
+
213
+ headers = []
214
+ for colum_header in column_headers_str:
215
+ ch = colum_header.split('$')
216
+ col_key = ch[0]
217
+ col_type = ch[1] if len(ch) > 1 else ''
218
+ headers.append(ColumnHeader(key=col_key, type=col_type))
219
+
220
+ data = [row.split(':') for row in body.split('::') ]
221
+ # convert to correct value types
222
+ data_with_types = [[_str_to_value_type(h.type, cv) for cv, h in zip(r, headers)] for r in data]
223
+ data = [ TableRow(r) for r in data_with_types]
224
+
225
+ out = TableSegment(column_headers=headers, data=data, key=name)
226
+ return out
227
+
228
+ def _str_to_value_type(type_, s):
229
+ match type_:
230
+ case 'T.D':
231
+ out = DateValue(value=s)
232
+ case 'T.B':
233
+ out = BoolValue(value=s)
234
+ case 'T.A':
235
+ out = AlphanumericValue(value=s)
236
+ case 'T.T':
237
+ out = TextValue(value=s)
238
+ case 'T.X':
239
+ out = BinaryValue(value=s)
240
+ case 'E':
241
+ out = ErrorValue(value=s)
242
+ case _:
243
+ out = NumericValue(value=s)
244
+ return out
245
+
labfreed/trex/trex.py CHANGED
@@ -1,69 +1,69 @@
1
- from collections import Counter
2
- from typing import Self
3
- from pydantic import Field, field_validator
4
-
5
- from labfreed.labfreed_infrastructure import LabFREED_BaseModel
6
- from labfreed.trex.table_segment import _deserialize_table_segment_from_trex_segment_str
7
- from labfreed.trex.trex_base_models import TREX_Segment
8
- from labfreed.trex.value_segments import _deserialize_value_segment_from_trex_segment_str
9
-
10
-
11
- class TREX(LabFREED_BaseModel):
12
- '''Represents a T-REX extension'''
13
- segments: list[TREX_Segment] = Field(default_factory=list)
14
-
15
- @classmethod
16
- def deserialize(cls, data) -> Self:
17
- segment_strings = data.split('+')
18
- segments = list()
19
- for s in segment_strings:
20
- # there are only two valid options. The segment is a scalar or a table.
21
- # Constructors do the parsing anyways and raise exceptions if invalid data
22
- # try both options and then let it fail
23
- segment = _deserialize_table_segment_from_trex_segment_str(s)
24
- if not segment:
25
- segment = _deserialize_value_segment_from_trex_segment_str(s)
26
- if not segment:
27
- raise ValueError('TREX contains neither valid value segment nor table')
28
-
29
- segments.append(segment)
30
- trex = TREX(segments=segments)
31
- return trex
32
-
33
-
34
- def serialize(self):
35
- seg_strings = list()
36
- for s in self.segments:
37
- seg_strings.append(s.serialize())
38
- s_out = '+'.join(seg_strings)
39
- return s_out
40
-
41
-
42
- def get_segment(self, segment_key:str) -> TREX_Segment:
43
- '''Get a segment by key'''
44
- s = [s for s in self.segments if s.key == segment_key]
45
- if s:
46
- return s[0]
47
- else:
48
- return None
49
-
50
-
51
- def __str__(self):
52
- s = self.serialize().replace('+', '\n+').replace('::', '::\n ')
53
- return s
54
-
55
-
56
-
57
- @field_validator('segments')
58
- @classmethod
59
- def _validate_segments(cls, segments):
60
- segment_keys = [s.key for s in segments]
61
- duplicates = [item for item, count in Counter(segment_keys).items() if count > 1]
62
- if duplicates:
63
- raise ValueError(f"Duplicate segment keys: {','.join(duplicates)}")
64
- return segments
65
-
66
-
67
-
68
-
69
-
1
+ from collections import Counter
2
+ from typing import Self
3
+ from pydantic import Field, field_validator
4
+
5
+ from labfreed.labfreed_infrastructure import LabFREED_BaseModel
6
+ from labfreed.trex.table_segment import _deserialize_table_segment_from_trex_segment_str
7
+ from labfreed.trex.trex_base_models import TREX_Segment
8
+ from labfreed.trex.value_segments import _deserialize_value_segment_from_trex_segment_str
9
+
10
+
11
+ class TREX(LabFREED_BaseModel):
12
+ '''Represents a T-REX extension'''
13
+ segments: list[TREX_Segment] = Field(default_factory=list)
14
+
15
+ @classmethod
16
+ def deserialize(cls, data) -> Self:
17
+ segment_strings = data.split('+')
18
+ segments = list()
19
+ for s in segment_strings:
20
+ # there are only two valid options. The segment is a scalar or a table.
21
+ # Constructors do the parsing anyways and raise exceptions if invalid data
22
+ # try both options and then let it fail
23
+ segment = _deserialize_table_segment_from_trex_segment_str(s)
24
+ if not segment:
25
+ segment = _deserialize_value_segment_from_trex_segment_str(s)
26
+ if not segment:
27
+ raise ValueError('TREX contains neither valid value segment nor table')
28
+
29
+ segments.append(segment)
30
+ trex = TREX(segments=segments)
31
+ return trex
32
+
33
+
34
+ def serialize(self):
35
+ seg_strings = list()
36
+ for s in self.segments:
37
+ seg_strings.append(s.serialize())
38
+ s_out = '+'.join(seg_strings)
39
+ return s_out
40
+
41
+
42
+ def get_segment(self, segment_key:str) -> TREX_Segment:
43
+ '''Get a segment by key'''
44
+ s = [s for s in self.segments if s.key == segment_key]
45
+ if s:
46
+ return s[0]
47
+ else:
48
+ return None
49
+
50
+
51
+ def __str__(self):
52
+ s = self.serialize().replace('+', '\n+').replace('::', '::\n ')
53
+ return s
54
+
55
+
56
+
57
+ @field_validator('segments')
58
+ @classmethod
59
+ def _validate_segments(cls, segments):
60
+ segment_keys = [s.key for s in segments]
61
+ duplicates = [item for item, count in Counter(segment_keys).items() if count > 1]
62
+ if duplicates:
63
+ raise ValueError(f"Duplicate segment keys: {','.join(duplicates)}")
64
+ return segments
65
+
66
+
67
+
68
+
69
+