labfreed 0.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- labfreed-0.0.3/.vscode/launch.json +18 -0
- labfreed-0.0.3/.vscode/settings.json +7 -0
- labfreed-0.0.3/LICENSE +21 -0
- labfreed-0.0.3/PKG-INFO +15 -0
- labfreed-0.0.3/README.md +3 -0
- labfreed-0.0.3/labfreed/DisplayNameExtension/DisplayNameExtension.py +34 -0
- labfreed-0.0.3/labfreed/DisplayNameExtension/base36.py +66 -0
- labfreed-0.0.3/labfreed/PAC_CAT/__init__.py +1 -0
- labfreed-0.0.3/labfreed/PAC_CAT/data_model.py +109 -0
- labfreed-0.0.3/labfreed/PAC_ID/__init__.py +0 -0
- labfreed-0.0.3/labfreed/PAC_ID/data_model.py +114 -0
- labfreed-0.0.3/labfreed/PAC_ID/parse.py +133 -0
- labfreed-0.0.3/labfreed/PAC_ID/serialize.py +57 -0
- labfreed-0.0.3/labfreed/TREXExtension/data_model.py +239 -0
- labfreed-0.0.3/labfreed/TREXExtension/parse.py +46 -0
- labfreed-0.0.3/labfreed/TREXExtension/uncertainty.py +32 -0
- labfreed-0.0.3/labfreed/TREXExtension/unit_utilities.py +134 -0
- labfreed-0.0.3/labfreed/__init__.py +5 -0
- labfreed-0.0.3/main.py +22 -0
- labfreed-0.0.3/pyproject.toml +17 -0
- labfreed-0.0.3/pytest.ini +2 -0
- labfreed-0.0.3/tests/test_PAC_CAT/test_PAC_CAT.py +117 -0
- labfreed-0.0.3/tests/test_PAC_ID/test_pac_id_parse.py +265 -0
- labfreed-0.0.3/tests/test_PAC_ID/test_pac_id_serialization.py +33 -0
- labfreed-0.0.3/tests/test_TREXExtension/test_TREX.py +31 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
{
|
|
2
|
+
// Use IntelliSense to learn about possible attributes.
|
|
3
|
+
// Hover to view descriptions of existing attributes.
|
|
4
|
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
|
5
|
+
"version": "0.2.0",
|
|
6
|
+
"configurations": [
|
|
7
|
+
{
|
|
8
|
+
"name": "Python Debugger: Current File",
|
|
9
|
+
"type": "debugpy",
|
|
10
|
+
"request": "launch",
|
|
11
|
+
"program": "${file}",
|
|
12
|
+
"console": "integratedTerminal",
|
|
13
|
+
"env": {
|
|
14
|
+
"PYTHONPATH": "$(pwd)"
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
]
|
|
18
|
+
}
|
labfreed-0.0.3/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
The MIT License (MIT)
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Reto Thürer
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
|
13
|
+
all copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
21
|
+
THE SOFTWARE.
|
labfreed-0.0.3/PKG-INFO
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: labfreed
|
|
3
|
+
Version: 0.0.3
|
|
4
|
+
Summary: Python implementation of LabFREED building blocks
|
|
5
|
+
Author-email: Reto Thürer <reto.thuerer@gmail.com>
|
|
6
|
+
Requires-Python: >=3.12
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
|
|
13
|
+
# LabFREED for Python
|
|
14
|
+
|
|
15
|
+
A python implementation of [LabFREED](www.labfreed.com).
|
labfreed-0.0.3/README.md
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
from ..PAC_ID.data_model import Extension
|
|
4
|
+
from .base36 import from_base36, to_base36
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class DisplayNames(Extension, BaseModel):
|
|
8
|
+
display_names: list[str]
|
|
9
|
+
|
|
10
|
+
@property
|
|
11
|
+
def name(self)->str:
|
|
12
|
+
return 'N'
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def type(self)->str:
|
|
16
|
+
return 'N'
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def data(self)->str:
|
|
20
|
+
return '/'.join([to_base36(dn) for dn in self.display_names])
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
def from_spec_fields(name, type, data):
|
|
24
|
+
if name != 'N':
|
|
25
|
+
logging.warning(f'Name {name} was given, but this extension should only be used with name "N". Will ignore input')
|
|
26
|
+
|
|
27
|
+
if type != 'N':
|
|
28
|
+
logging.warning(f'Type {name} was given, but this extension should only be used with type "N". Will try to parse data as display names')
|
|
29
|
+
|
|
30
|
+
display_names = [from_base36(b36) for b36 in data.split('/')]
|
|
31
|
+
|
|
32
|
+
return DisplayNames(display_names=display_names)
|
|
33
|
+
|
|
34
|
+
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import string
|
|
2
|
+
|
|
3
|
+
def alphabet(base):
|
|
4
|
+
""" returns an alphabet, which corresponds to what pythons int(s:str, base:int=10) function used.
|
|
5
|
+
"""
|
|
6
|
+
if base < 2 or base > 36:
|
|
7
|
+
ValueError('base can only be between 2 and 36')
|
|
8
|
+
alphabet = (string.digits + string.ascii_uppercase)[0:base]
|
|
9
|
+
return alphabet
|
|
10
|
+
|
|
11
|
+
def to_base36(s:str):
|
|
12
|
+
"""Takes a string, encodes it in UTF-8 and then as base36 string."""
|
|
13
|
+
utf8_encoded = s.encode('utf-8')
|
|
14
|
+
num = int.from_bytes(utf8_encoded, byteorder='big', signed=False)
|
|
15
|
+
|
|
16
|
+
# note: this cannot be arbitrarily chosen. The choice here corresponds to what pythons int(s:str, base:int=10) function used.
|
|
17
|
+
base36_chars = alphabet(base=36)
|
|
18
|
+
if num == 0:
|
|
19
|
+
return base36_chars[0]
|
|
20
|
+
base36 = []
|
|
21
|
+
_num = num
|
|
22
|
+
while _num:
|
|
23
|
+
_num, i = divmod(_num, 36)
|
|
24
|
+
base36.append(base36_chars[i])
|
|
25
|
+
return ''.join(reversed(base36))
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def from_base36(s36:str):
|
|
29
|
+
"""inverse of to_base36"""
|
|
30
|
+
# this built in function interprets each character as number in a base represented by the standartd alphabet [0-9 (A-Z|a-z)][0:base] it is case INsensitive.
|
|
31
|
+
num = int(s36, 36)
|
|
32
|
+
num_bytes = (num.bit_length() + 7) // 8
|
|
33
|
+
_bytes = num.to_bytes(num_bytes, byteorder='big')
|
|
34
|
+
s = _bytes.decode('utf-8')
|
|
35
|
+
return s
|
|
36
|
+
|
|
37
|
+
if __name__ == "__main__":
|
|
38
|
+
ss = ["A",
|
|
39
|
+
"B-500 B",
|
|
40
|
+
"B-500 Ba",
|
|
41
|
+
"B-500 Bal",
|
|
42
|
+
"B-500 Bala",
|
|
43
|
+
"B-500 Balanc",
|
|
44
|
+
"B-500 Balance",
|
|
45
|
+
"B-500 D",
|
|
46
|
+
"Mini Spray Dryer S-300",
|
|
47
|
+
"w3ApashAt!!£NAGDSAF*ç%&/()",
|
|
48
|
+
"HELLOWORLD",
|
|
49
|
+
"Helloworld",
|
|
50
|
+
"$£äö!'?^{]<@#¦&¬|¢)&§°😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉😀你好🌍🏯😇🎵🔥你👻🐉",
|
|
51
|
+
"往跟住!師立甲錯什正再圓身升因月室",
|
|
52
|
+
"Balance BAL500 @☣️Lab",
|
|
53
|
+
"BAL500 @☣️Lab",
|
|
54
|
+
"BAL-CLEAN",
|
|
55
|
+
"Smørrebrød µ-Nutrients",
|
|
56
|
+
"Demo Result from R-300",
|
|
57
|
+
"Rotavapor R-300",
|
|
58
|
+
"Rotavapor R-250",
|
|
59
|
+
"Rotavapor R-220",
|
|
60
|
+
"SyncorePlus"
|
|
61
|
+
]
|
|
62
|
+
for s in ss:
|
|
63
|
+
s36 = to_base36(s)
|
|
64
|
+
s_back = from_base36(s36)
|
|
65
|
+
identical = (s == s_back)
|
|
66
|
+
print(f'{s} >> {s36} >> {s_back}: match:{identical}')
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from pydantic import Field
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from ..PAC_ID.data_model import IDSegment, Category
|
|
6
|
+
|
|
7
|
+
class CATBase(BaseModel, ABC):
|
|
8
|
+
category_key:str
|
|
9
|
+
additional_segments: list[IDSegment] = Field(default_factory=list)
|
|
10
|
+
|
|
11
|
+
class Config:
|
|
12
|
+
populate_by_name = True # this will allow field names, as well as aliases in validation
|
|
13
|
+
|
|
14
|
+
def to_identifier_category(self, use_short_notation=False):
|
|
15
|
+
'''Creates a Category with the correct segments.
|
|
16
|
+
Segments are in order of the Pydantic model fields.
|
|
17
|
+
Segment keys are omitted as long as the recommendation is followed.
|
|
18
|
+
Additional segments are added at the end'''
|
|
19
|
+
segments = []
|
|
20
|
+
can_omit_keys = use_short_notation # keeps track of whether keys can still be omitted. That is the case when the segment recommendation is followed
|
|
21
|
+
for field_name, field_info in self.model_fields.items():
|
|
22
|
+
if field_name in ['category_key', 'additional_segments']:
|
|
23
|
+
continue
|
|
24
|
+
if value := getattr(self, field_name):
|
|
25
|
+
if can_omit_keys:
|
|
26
|
+
key = None
|
|
27
|
+
else:
|
|
28
|
+
key = field_info.alias
|
|
29
|
+
segments.append(IDSegment(key= key, value= value) )
|
|
30
|
+
else:
|
|
31
|
+
can_omit_keys = False
|
|
32
|
+
if self.additional_segments:
|
|
33
|
+
segments.extend(self.additional_segments)
|
|
34
|
+
return Category(key=self.category_key,
|
|
35
|
+
segments=segments)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Material_Device(CATBase):
|
|
41
|
+
category_key: str = Field(default='-MD', frozen=True)
|
|
42
|
+
model_number: str = Field( alias='240', min_length=1)
|
|
43
|
+
serial_number: str = Field( alias='21', min_length=1)
|
|
44
|
+
|
|
45
|
+
class Material_Substance(CATBase):
|
|
46
|
+
category_key: str = Field(default='-MS', frozen=True)
|
|
47
|
+
product_number:str = Field( alias='240', min_length=1)
|
|
48
|
+
batch_number:str|None = Field(default=None, alias='10')
|
|
49
|
+
container_size:str|None = Field(default=None, alias='20')
|
|
50
|
+
container_number:str|None = Field(default=None, alias='21')
|
|
51
|
+
aliquot:str|None = Field(default=None, alias='250')
|
|
52
|
+
|
|
53
|
+
class Material_Consumable(CATBase):
|
|
54
|
+
category_key: str = Field(default='-MC', frozen=True)
|
|
55
|
+
product_number:str = Field( alias='240', min_length=1)
|
|
56
|
+
batch_number:str|None = Field(default=None, alias='10')
|
|
57
|
+
packing_size:str|None = Field(default=None, alias='20')
|
|
58
|
+
serial_number:str|None = Field(default=None, alias='21')
|
|
59
|
+
aliquot:str|None = Field(default=None, alias='250')
|
|
60
|
+
|
|
61
|
+
class Material_Misc(Material_Consumable):
|
|
62
|
+
category_key: str = Field(default='-MM', frozen=True)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class Data_Result(CATBase):
|
|
67
|
+
category_key: str = Field(default='-DR', frozen=True)
|
|
68
|
+
id:str = Field( alias='240', min_length=1)
|
|
69
|
+
|
|
70
|
+
class Data_Method(CATBase):
|
|
71
|
+
category_key: str = Field(default='-DM', frozen=True)
|
|
72
|
+
id:str = Field( alias='240', min_length=1)
|
|
73
|
+
|
|
74
|
+
class Data_Calibration(CATBase):
|
|
75
|
+
category_key: str = Field(default='-DC', frozen=True)
|
|
76
|
+
id:str = Field( alias='240', min_length=1)
|
|
77
|
+
|
|
78
|
+
class Data_Progress(CATBase):
|
|
79
|
+
category_key: str = Field(default='-DP', frozen=True)
|
|
80
|
+
id:str = Field( alias='240', min_length=1)
|
|
81
|
+
|
|
82
|
+
class Data_Static(CATBase):
|
|
83
|
+
category_key: str = Field(default='-DS', frozen=True)
|
|
84
|
+
id:str = Field( alias='240', min_length=1)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
mapping = {
|
|
90
|
+
'-MD': Material_Device,
|
|
91
|
+
'-MS': Material_Substance,
|
|
92
|
+
'-MC': Material_Consumable,
|
|
93
|
+
'-MM': Material_Misc,
|
|
94
|
+
'-DM': Data_Method,
|
|
95
|
+
'-DR': Data_Result,
|
|
96
|
+
'-DC': Data_Calibration,
|
|
97
|
+
'-DP': Data_Progress,
|
|
98
|
+
'-DS': Data_Static
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
def CAT_from_category(category:Category) -> CATBase|None:
|
|
102
|
+
raise NotImplementedError()
|
|
103
|
+
|
|
104
|
+
def CAT_from_category_key(category_key) -> CATBase|None:
|
|
105
|
+
return mapping.get(category_key)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
if __name__ == "__main__":
|
|
109
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from typing_extensions import Self
|
|
3
|
+
from pydantic import BaseModel, Field, computed_field, conlist, model_validator
|
|
4
|
+
from abc import ABC, abstractproperty, abstractstaticmethod
|
|
5
|
+
|
|
6
|
+
class IDSegment(BaseModel):
|
|
7
|
+
key:Optional[str] = Field(None, pattern=r'^[A-Z0-9-+]+$', min_length=1)
|
|
8
|
+
value:str = Field(..., pattern=r'^[A-Z0-9-+]+$', min_length=1)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Category(BaseModel):
|
|
12
|
+
key:str|None = None
|
|
13
|
+
segments: list[IDSegment]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Identifier(BaseModel):
|
|
17
|
+
segments: conlist(IDSegment, min_length=1) = Field(..., exclude=True) # exclude=True prevents this from being serialized by Pydantic
|
|
18
|
+
|
|
19
|
+
@computed_field
|
|
20
|
+
@property
|
|
21
|
+
def categories(self) -> list[Category]:
|
|
22
|
+
categories = list()
|
|
23
|
+
c = Category(segments=[])
|
|
24
|
+
categories.append(c)
|
|
25
|
+
for s in self.segments:
|
|
26
|
+
# new category starts with "-"
|
|
27
|
+
if s.value[0] == '-':
|
|
28
|
+
cat_key = s.value
|
|
29
|
+
c = Category(key=cat_key, segments=[])
|
|
30
|
+
categories.append(c)
|
|
31
|
+
else:
|
|
32
|
+
c.segments.append(s)
|
|
33
|
+
|
|
34
|
+
# the first category might have no segments. remove categories without segments
|
|
35
|
+
if not categories[0].segments:
|
|
36
|
+
categories = categories[1:]
|
|
37
|
+
|
|
38
|
+
return categories
|
|
39
|
+
|
|
40
|
+
@model_validator(mode='after')
|
|
41
|
+
def check_keys_are_unique_in_each_category(self) -> Self:
|
|
42
|
+
for c in self.categories:
|
|
43
|
+
keys = [s.key for s in c.segments if s.key]
|
|
44
|
+
duplicate_keys = [k for k in set(keys) if keys.count(k) > 1]
|
|
45
|
+
if duplicate_keys:
|
|
46
|
+
raise ValueError(f'Duplicate keys {",".join(duplicate_keys)} in category {c.key}')
|
|
47
|
+
return self
|
|
48
|
+
|
|
49
|
+
@staticmethod
|
|
50
|
+
def from_categories(categories:list[Category]) :
|
|
51
|
+
segments = list()
|
|
52
|
+
for c in categories:
|
|
53
|
+
if c.key:
|
|
54
|
+
segments.append(IDSegment(value=c.key))
|
|
55
|
+
segments.extend(c.segments)
|
|
56
|
+
return Identifier(segments=segments)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Extension(ABC, BaseModel):
|
|
61
|
+
|
|
62
|
+
@abstractproperty
|
|
63
|
+
def name(self)->str:
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
@abstractproperty
|
|
67
|
+
def type(self)->str:
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
@abstractproperty
|
|
71
|
+
def data(self)->str:
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
@abstractstaticmethod
|
|
75
|
+
def from_spec_fields(name, type, data):
|
|
76
|
+
pass
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class UnknownExtension(Extension):
|
|
80
|
+
name_:str
|
|
81
|
+
type_:str
|
|
82
|
+
data_:str
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def name(self)->str:
|
|
86
|
+
return self.name_
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def type(self)->str:
|
|
90
|
+
return self.type_
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def data(self)->str:
|
|
94
|
+
return self.data_
|
|
95
|
+
|
|
96
|
+
@staticmethod
|
|
97
|
+
def from_spec_fields(name, type, data):
|
|
98
|
+
return UnknownExtension(name_=name, type_=type, data_=data)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class PACID(BaseModel):
|
|
103
|
+
issuer:str
|
|
104
|
+
identifier: Identifier
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class PACID_With_Extensions(BaseModel):
|
|
108
|
+
pac_id: PACID
|
|
109
|
+
extensions: list[Extension] = Field(default_factory=list)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from types import MappingProxyType
|
|
5
|
+
from .data_model import *
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
category_conventions = MappingProxyType(
|
|
9
|
+
{
|
|
10
|
+
'-MD': ['240', '21'],
|
|
11
|
+
'-MS': ['240', '10', '20', '21', '250'],
|
|
12
|
+
'-MC': ['240', '10', '20', '21', '250'],
|
|
13
|
+
'-MM': ['240', '10', '20', '21', '250']
|
|
14
|
+
}
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
extension_convention = MappingProxyType(
|
|
19
|
+
{
|
|
20
|
+
0: { 'name': 'N', 'type': 'N'},
|
|
21
|
+
1: { 'name': 'SUM', 'type': 'TREX'}
|
|
22
|
+
}
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class PAC_Parser():
|
|
28
|
+
|
|
29
|
+
def __init__(self, extension_interpreters:dict[str, Extension]=None):
|
|
30
|
+
self.extension_interpreters = extension_interpreters or {}
|
|
31
|
+
|
|
32
|
+
def parse_pac_url(self, pac_url:str) -> PACID_With_Extensions:
|
|
33
|
+
if '*' in pac_url:
|
|
34
|
+
id_str, ext_str = pac_url.split('*', 1)
|
|
35
|
+
else:
|
|
36
|
+
id_str = pac_url
|
|
37
|
+
ext_str = ""
|
|
38
|
+
|
|
39
|
+
pac_id = self.parse_pac_id(id_str)
|
|
40
|
+
extensions = self.parse_extensions(ext_str)
|
|
41
|
+
return PACID_With_Extensions(pac_id=pac_id, extensions=extensions)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def parse_id_segments(self, identifier:str):
|
|
45
|
+
if not identifier:
|
|
46
|
+
return []
|
|
47
|
+
|
|
48
|
+
id_segments = list()
|
|
49
|
+
for s in identifier.split('/'):
|
|
50
|
+
tmp = s.split(':')
|
|
51
|
+
|
|
52
|
+
if len(tmp) == 1:
|
|
53
|
+
segment = IDSegment(value=tmp[0])
|
|
54
|
+
elif len(tmp) == 2:
|
|
55
|
+
segment = IDSegment(key=tmp[0], value=tmp[1])
|
|
56
|
+
else:
|
|
57
|
+
raise ValueError(f'invalid segment: {s}')
|
|
58
|
+
|
|
59
|
+
id_segments.append(segment)
|
|
60
|
+
return id_segments
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _apply_category_defaults(self, segments_in: list[IDSegment]):
|
|
64
|
+
|
|
65
|
+
segments = segments_in.copy()
|
|
66
|
+
default_keys = None
|
|
67
|
+
for s in segments:
|
|
68
|
+
if not s.key and default_keys:
|
|
69
|
+
s.key = default_keys.pop(0)
|
|
70
|
+
else:
|
|
71
|
+
default_keys = None
|
|
72
|
+
|
|
73
|
+
# category starts: start with new defaults.
|
|
74
|
+
if s.value in category_conventions.keys():
|
|
75
|
+
default_keys = category_conventions.get(s.value).copy() #copy, so the entries can be popped when used
|
|
76
|
+
return segments
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def parse_pac_id(self,id_str:str) -> PACID:
|
|
81
|
+
m = re.match(f'(HTTPS://)?(PAC.)?(?P<issuer>.+?\..+?)/(?P<identifier>.*)', id_str)
|
|
82
|
+
d = m.groupdict()
|
|
83
|
+
|
|
84
|
+
id_segments = list()
|
|
85
|
+
default_keys = None
|
|
86
|
+
id_segments = self.parse_id_segments(d.get('identifier'))
|
|
87
|
+
id_segments = self._apply_category_defaults(id_segments)
|
|
88
|
+
|
|
89
|
+
return PACID(issuer= d.get('issuer'),
|
|
90
|
+
identifier=Identifier(segments=id_segments)
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def parse_extensions(self, extensions_str:str|None) -> list[Extension]:
|
|
95
|
+
extensions = list()
|
|
96
|
+
|
|
97
|
+
if not extensions_str:
|
|
98
|
+
return extensions
|
|
99
|
+
|
|
100
|
+
defaults = extension_convention
|
|
101
|
+
for i, e in enumerate(extensions_str.split('*')):
|
|
102
|
+
if e == '': #this will happen if first extension starts with *
|
|
103
|
+
continue
|
|
104
|
+
d = re.match('((?P<name>.+)\$(?P<type>.+)/)?(?P<data>.+)', e).groupdict()
|
|
105
|
+
|
|
106
|
+
name = d.get('name')
|
|
107
|
+
type = d.get('type')
|
|
108
|
+
data = d.get('data')
|
|
109
|
+
|
|
110
|
+
if name:
|
|
111
|
+
defaults = None # once a name was specified no longer assign defaults
|
|
112
|
+
else:
|
|
113
|
+
if defaults:
|
|
114
|
+
name = defaults.get(i).get('name')
|
|
115
|
+
type = defaults.get(i).get('type')
|
|
116
|
+
else:
|
|
117
|
+
raise ValueError('extension number {i}, must have name and type')
|
|
118
|
+
|
|
119
|
+
#convert to subtype if they were given
|
|
120
|
+
subtype = self.extension_interpreters.get(type) or UnknownExtension
|
|
121
|
+
e = subtype.from_spec_fields(name=name, type=type, data=data)
|
|
122
|
+
extensions.append(e)
|
|
123
|
+
|
|
124
|
+
return extensions
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
if __name__ == "__main__":
|
|
131
|
+
pacid_str = 'HTTPS://PAC.METTORIUS.COM/-DR/AB378/-MD/B-500/1235/-MS/AB/X:88/WWW/-MS/240:11/BB*ABCFD*A$HUR:25+B$CEL:99*BLUBB$TREX/A$HUR:25+B$CEL:99'
|
|
132
|
+
|
|
133
|
+
pac = PAC_Parser().parse_pac(pacid_str)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
|
|
2
|
+
from .data_model import *
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PAC_Serializer():
|
|
7
|
+
def to_url(self, pac:PACID|PACID_With_Extensions, extensions:list[Extension]=None, use_short_notation_for_extensions=False) -> str:
|
|
8
|
+
if isinstance(pac, PACID_With_Extensions):
|
|
9
|
+
if extensions:
|
|
10
|
+
raise ValueError('Extensions were given twice, as part of PACID_With_Extension and as method parameter.')
|
|
11
|
+
extensions = pac.extensions
|
|
12
|
+
pac = pac.pac_id
|
|
13
|
+
issuer = pac.issuer
|
|
14
|
+
extensions_str = self._serialize_extensions(extensions, use_short_notation_for_extensions)
|
|
15
|
+
id_segments = self._serialize_id_segments(pac.identifier.segments)
|
|
16
|
+
return f"HTTPS://PAC.{issuer}{id_segments}{extensions_str}".upper()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _serialize_id_segments(self, segments):
|
|
20
|
+
out = ''
|
|
21
|
+
for s in segments:
|
|
22
|
+
if s.key:
|
|
23
|
+
out += f'/{s.key}:{s.value}'
|
|
24
|
+
else:
|
|
25
|
+
out += f'/{s.value}'
|
|
26
|
+
return out
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _serialize_extensions(self, extensions:list[Extension], use_short_notation_for_extensions):
|
|
30
|
+
out = ''
|
|
31
|
+
short_notation = use_short_notation_for_extensions
|
|
32
|
+
for i, e in enumerate(extensions):
|
|
33
|
+
|
|
34
|
+
if short_notation and i==0:
|
|
35
|
+
if e.name=='N':
|
|
36
|
+
out += f'*{e.data}'
|
|
37
|
+
continue
|
|
38
|
+
else:
|
|
39
|
+
short_notation = False
|
|
40
|
+
if short_notation and i==1:
|
|
41
|
+
if e.name=='SUM':
|
|
42
|
+
out += f'*{e.data}'
|
|
43
|
+
continue
|
|
44
|
+
else:
|
|
45
|
+
short_notation = False
|
|
46
|
+
|
|
47
|
+
out += f'*{e.name}${e.type}/{e.data}'
|
|
48
|
+
return out
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def main():
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
if __name__ == "__main__":
|
|
57
|
+
main()
|