labfreed 0.0.4__py3-none-any.whl → 0.2.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. labfreed/PAC_CAT/__init__.py +16 -0
  2. labfreed/PAC_CAT/category_base.py +51 -0
  3. labfreed/PAC_CAT/pac_cat.py +159 -0
  4. labfreed/PAC_CAT/predefined_categories.py +190 -0
  5. labfreed/PAC_ID/__init__.py +19 -0
  6. labfreed/PAC_ID/extension.py +48 -0
  7. labfreed/PAC_ID/id_segment.py +90 -0
  8. labfreed/PAC_ID/pac_id.py +140 -0
  9. labfreed/PAC_ID/url_parser.py +154 -0
  10. labfreed/PAC_ID/url_serializer.py +80 -0
  11. labfreed/PAC_ID_Resolver/__init__.py +2 -0
  12. labfreed/PAC_ID_Resolver/cit_v1.py +149 -0
  13. labfreed/PAC_ID_Resolver/cit_v2.py +303 -0
  14. labfreed/PAC_ID_Resolver/resolver.py +81 -0
  15. labfreed/PAC_ID_Resolver/services.py +80 -0
  16. labfreed/__init__.py +4 -1
  17. labfreed/labfreed_infrastructure.py +276 -0
  18. labfreed/qr/__init__.py +1 -0
  19. labfreed/qr/generate_qr.py +422 -0
  20. labfreed/trex/__init__.py +16 -0
  21. labfreed/trex/python_convenience/__init__.py +3 -0
  22. labfreed/trex/python_convenience/data_table.py +45 -0
  23. labfreed/trex/python_convenience/pyTREX.py +242 -0
  24. labfreed/trex/python_convenience/quantity.py +46 -0
  25. labfreed/trex/table_segment.py +227 -0
  26. labfreed/trex/trex.py +69 -0
  27. labfreed/trex/trex_base_models.py +336 -0
  28. labfreed/trex/value_segments.py +111 -0
  29. labfreed/{DisplayNameExtension → utilities}/base36.py +29 -13
  30. labfreed/well_known_extensions/__init__.py +5 -0
  31. labfreed/well_known_extensions/default_extension_interpreters.py +7 -0
  32. labfreed/well_known_extensions/display_name_extension.py +40 -0
  33. labfreed/well_known_extensions/trex_extension.py +31 -0
  34. labfreed/well_known_keys/gs1/__init__.py +6 -0
  35. labfreed/well_known_keys/gs1/gs1.py +4 -0
  36. labfreed/well_known_keys/gs1/gs1_ai_enum_sorted.py +57 -0
  37. labfreed/well_known_keys/labfreed/well_known_keys.py +16 -0
  38. labfreed/well_known_keys/unece/UneceUnits.json +33730 -0
  39. labfreed/well_known_keys/unece/__init__.py +4 -0
  40. labfreed/well_known_keys/unece/unece_units.py +68 -0
  41. labfreed-0.2.0b0.dist-info/METADATA +329 -0
  42. labfreed-0.2.0b0.dist-info/RECORD +44 -0
  43. {labfreed-0.0.4.dist-info → labfreed-0.2.0b0.dist-info}/WHEEL +1 -1
  44. labfreed/DisplayNameExtension/DisplayNameExtension.py +0 -34
  45. labfreed/PAC_CAT/data_model.py +0 -109
  46. labfreed/PAC_ID/data_model.py +0 -114
  47. labfreed/PAC_ID/parse.py +0 -133
  48. labfreed/PAC_ID/serialize.py +0 -57
  49. labfreed/TREXExtension/data_model.py +0 -239
  50. labfreed/TREXExtension/parse.py +0 -46
  51. labfreed/TREXExtension/uncertainty.py +0 -32
  52. labfreed/TREXExtension/unit_utilities.py +0 -134
  53. labfreed-0.0.4.dist-info/METADATA +0 -15
  54. labfreed-0.0.4.dist-info/RECORD +0 -17
  55. {labfreed-0.0.4.dist-info → labfreed-0.2.0b0.dist-info}/licenses/LICENSE +0 -0
@@ -1,114 +0,0 @@
1
- from typing import Optional
2
- from typing_extensions import Self
3
- from pydantic import BaseModel, Field, computed_field, conlist, model_validator
4
- from abc import ABC, abstractproperty, abstractstaticmethod
5
-
6
- class IDSegment(BaseModel):
7
- key:Optional[str] = Field(None, pattern=r'^[A-Z0-9-+]+$', min_length=1)
8
- value:str = Field(..., pattern=r'^[A-Z0-9-+]+$', min_length=1)
9
-
10
-
11
- class Category(BaseModel):
12
- key:str|None = None
13
- segments: list[IDSegment]
14
-
15
-
16
- class Identifier(BaseModel):
17
- segments: conlist(IDSegment, min_length=1) = Field(..., exclude=True) # exclude=True prevents this from being serialized by Pydantic
18
-
19
- @computed_field
20
- @property
21
- def categories(self) -> list[Category]:
22
- categories = list()
23
- c = Category(segments=[])
24
- categories.append(c)
25
- for s in self.segments:
26
- # new category starts with "-"
27
- if s.value[0] == '-':
28
- cat_key = s.value
29
- c = Category(key=cat_key, segments=[])
30
- categories.append(c)
31
- else:
32
- c.segments.append(s)
33
-
34
- # the first category might have no segments. remove categories without segments
35
- if not categories[0].segments:
36
- categories = categories[1:]
37
-
38
- return categories
39
-
40
- @model_validator(mode='after')
41
- def check_keys_are_unique_in_each_category(self) -> Self:
42
- for c in self.categories:
43
- keys = [s.key for s in c.segments if s.key]
44
- duplicate_keys = [k for k in set(keys) if keys.count(k) > 1]
45
- if duplicate_keys:
46
- raise ValueError(f'Duplicate keys {",".join(duplicate_keys)} in category {c.key}')
47
- return self
48
-
49
- @staticmethod
50
- def from_categories(categories:list[Category]) :
51
- segments = list()
52
- for c in categories:
53
- if c.key:
54
- segments.append(IDSegment(value=c.key))
55
- segments.extend(c.segments)
56
- return Identifier(segments=segments)
57
-
58
-
59
-
60
- class Extension(ABC, BaseModel):
61
-
62
- @abstractproperty
63
- def name(self)->str:
64
- pass
65
-
66
- @abstractproperty
67
- def type(self)->str:
68
- pass
69
-
70
- @abstractproperty
71
- def data(self)->str:
72
- pass
73
-
74
- @abstractstaticmethod
75
- def from_spec_fields(name, type, data):
76
- pass
77
-
78
-
79
- class UnknownExtension(Extension):
80
- name_:str
81
- type_:str
82
- data_:str
83
-
84
- @property
85
- def name(self)->str:
86
- return self.name_
87
-
88
- @property
89
- def type(self)->str:
90
- return self.type_
91
-
92
- @property
93
- def data(self)->str:
94
- return self.data_
95
-
96
- @staticmethod
97
- def from_spec_fields(name, type, data):
98
- return UnknownExtension(name_=name, type_=type, data_=data)
99
-
100
-
101
-
102
- class PACID(BaseModel):
103
- issuer:str
104
- identifier: Identifier
105
-
106
-
107
- class PACID_With_Extensions(BaseModel):
108
- pac_id: PACID
109
- extensions: list[Extension] = Field(default_factory=list)
110
-
111
-
112
-
113
-
114
-
labfreed/PAC_ID/parse.py DELETED
@@ -1,133 +0,0 @@
1
-
2
-
3
- import re
4
- from types import MappingProxyType
5
- from .data_model import *
6
-
7
-
8
- category_conventions = MappingProxyType(
9
- {
10
- '-MD': ['240', '21'],
11
- '-MS': ['240', '10', '20', '21', '250'],
12
- '-MC': ['240', '10', '20', '21', '250'],
13
- '-MM': ['240', '10', '20', '21', '250']
14
- }
15
- )
16
-
17
-
18
- extension_convention = MappingProxyType(
19
- {
20
- 0: { 'name': 'N', 'type': 'N'},
21
- 1: { 'name': 'SUM', 'type': 'TREX'}
22
- }
23
- )
24
-
25
-
26
-
27
- class PAC_Parser():
28
-
29
- def __init__(self, extension_interpreters:dict[str, Extension]=None):
30
- self.extension_interpreters = extension_interpreters or {}
31
-
32
- def parse_pac_url(self, pac_url:str) -> PACID_With_Extensions:
33
- if '*' in pac_url:
34
- id_str, ext_str = pac_url.split('*', 1)
35
- else:
36
- id_str = pac_url
37
- ext_str = ""
38
-
39
- pac_id = self.parse_pac_id(id_str)
40
- extensions = self.parse_extensions(ext_str)
41
- return PACID_With_Extensions(pac_id=pac_id, extensions=extensions)
42
-
43
-
44
- def parse_id_segments(self, identifier:str):
45
- if not identifier:
46
- return []
47
-
48
- id_segments = list()
49
- for s in identifier.split('/'):
50
- tmp = s.split(':')
51
-
52
- if len(tmp) == 1:
53
- segment = IDSegment(value=tmp[0])
54
- elif len(tmp) == 2:
55
- segment = IDSegment(key=tmp[0], value=tmp[1])
56
- else:
57
- raise ValueError(f'invalid segment: {s}')
58
-
59
- id_segments.append(segment)
60
- return id_segments
61
-
62
-
63
- def _apply_category_defaults(self, segments_in: list[IDSegment]):
64
-
65
- segments = segments_in.copy()
66
- default_keys = None
67
- for s in segments:
68
- if not s.key and default_keys:
69
- s.key = default_keys.pop(0)
70
- else:
71
- default_keys = None
72
-
73
- # category starts: start with new defaults.
74
- if s.value in category_conventions.keys():
75
- default_keys = category_conventions.get(s.value).copy() #copy, so the entries can be popped when used
76
- return segments
77
-
78
-
79
-
80
- def parse_pac_id(self,id_str:str) -> PACID:
81
- m = re.match(f'(HTTPS://)?(PAC.)?(?P<issuer>.+?\..+?)/(?P<identifier>.*)', id_str)
82
- d = m.groupdict()
83
-
84
- id_segments = list()
85
- default_keys = None
86
- id_segments = self.parse_id_segments(d.get('identifier'))
87
- id_segments = self._apply_category_defaults(id_segments)
88
-
89
- return PACID(issuer= d.get('issuer'),
90
- identifier=Identifier(segments=id_segments)
91
- )
92
-
93
-
94
- def parse_extensions(self, extensions_str:str|None) -> list[Extension]:
95
- extensions = list()
96
-
97
- if not extensions_str:
98
- return extensions
99
-
100
- defaults = extension_convention
101
- for i, e in enumerate(extensions_str.split('*')):
102
- if e == '': #this will happen if first extension starts with *
103
- continue
104
- d = re.match('((?P<name>.+)\$(?P<type>.+)/)?(?P<data>.+)', e).groupdict()
105
-
106
- name = d.get('name')
107
- type = d.get('type')
108
- data = d.get('data')
109
-
110
- if name:
111
- defaults = None # once a name was specified no longer assign defaults
112
- else:
113
- if defaults:
114
- name = defaults.get(i).get('name')
115
- type = defaults.get(i).get('type')
116
- else:
117
- raise ValueError('extension number {i}, must have name and type')
118
-
119
- #convert to subtype if they were given
120
- subtype = self.extension_interpreters.get(type) or UnknownExtension
121
- e = subtype.from_spec_fields(name=name, type=type, data=data)
122
- extensions.append(e)
123
-
124
- return extensions
125
-
126
-
127
-
128
-
129
-
130
- if __name__ == "__main__":
131
- pacid_str = 'HTTPS://PAC.METTORIUS.COM/-DR/AB378/-MD/B-500/1235/-MS/AB/X:88/WWW/-MS/240:11/BB*ABCFD*A$HUR:25+B$CEL:99*BLUBB$TREX/A$HUR:25+B$CEL:99'
132
-
133
- pac = PAC_Parser().parse_pac(pacid_str)
@@ -1,57 +0,0 @@
1
-
2
- from .data_model import *
3
-
4
-
5
-
6
- class PAC_Serializer():
7
- def to_url(self, pac:PACID|PACID_With_Extensions, extensions:list[Extension]=None, use_short_notation_for_extensions=False) -> str:
8
- if isinstance(pac, PACID_With_Extensions):
9
- if extensions:
10
- raise ValueError('Extensions were given twice, as part of PACID_With_Extension and as method parameter.')
11
- extensions = pac.extensions
12
- pac = pac.pac_id
13
- issuer = pac.issuer
14
- extensions_str = self._serialize_extensions(extensions, use_short_notation_for_extensions)
15
- id_segments = self._serialize_id_segments(pac.identifier.segments)
16
- return f"HTTPS://PAC.{issuer}{id_segments}{extensions_str}".upper()
17
-
18
-
19
- def _serialize_id_segments(self, segments):
20
- out = ''
21
- for s in segments:
22
- if s.key:
23
- out += f'/{s.key}:{s.value}'
24
- else:
25
- out += f'/{s.value}'
26
- return out
27
-
28
-
29
- def _serialize_extensions(self, extensions:list[Extension], use_short_notation_for_extensions):
30
- out = ''
31
- short_notation = use_short_notation_for_extensions
32
- for i, e in enumerate(extensions):
33
-
34
- if short_notation and i==0:
35
- if e.name=='N':
36
- out += f'*{e.data}'
37
- continue
38
- else:
39
- short_notation = False
40
- if short_notation and i==1:
41
- if e.name=='SUM':
42
- out += f'*{e.data}'
43
- continue
44
- else:
45
- short_notation = False
46
-
47
- out += f'*{e.name}${e.type}/{e.data}'
48
- return out
49
-
50
-
51
-
52
- def main():
53
- pass
54
-
55
-
56
- if __name__ == "__main__":
57
- main()
@@ -1,239 +0,0 @@
1
- from datetime import datetime
2
- from enum import Enum
3
- import logging
4
- import re
5
-
6
- from pydantic import BaseModel, ValidationError, field_validator
7
- from abc import ABC
8
-
9
- from .unit_utilities import *
10
- from ..PAC_ID.data_model import Extension
11
-
12
- re_table_pattern = re.compile(f"(?P<tablename>[\w\.-]*?)\$\$(?P<header>[\w\.,\$:]*?)::(?P<body>.*)")
13
- re_col_head_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*)")
14
- re_scalar_pattern = re.compile(f"(?P<name>[\w\.-]*?)\$(?P<unit>[\w\.]*?):(?P<value>.*)")
15
-
16
- TREX_DATEFORMAT = '%Y%m%dT%H%M%S'
17
- TREX_TIMEFORMAT = '%Y%m%d'
18
-
19
- class TREX_types(Enum):
20
- BOOL = 'T.B'
21
- DATE = 'T.D'
22
- TEXT = 'T.A'
23
- ERROR = 'E'
24
-
25
-
26
-
27
- class T_REX_Segment_ParseError(BaseException):
28
- pass
29
-
30
-
31
- class TREX_Segment(BaseModel, ABC):
32
- segment_name: str = None
33
-
34
- def as_trex_segment_str(self, segment_name):
35
- pass
36
-
37
-
38
- class TREX_SimpleSegment(TREX_Segment):
39
- type: str
40
- value: str
41
-
42
- @field_validator('type', mode='before')
43
- def validate_type(t):
44
- if isinstance(t, TREX_types):
45
- t = t.value
46
- return t
47
-
48
- @staticmethod
49
- def from_trex_segmentstring(segment_str):
50
-
51
- matches = re_scalar_pattern.match(segment_str)
52
- if not matches:
53
- raise T_REX_Segment_ParseError("Segment is not a valid TREX Scalar")
54
-
55
- name, type_, value = matches.groups()
56
-
57
- out = TREX_SimpleSegment(type=type_, value=value, segment_name=name)
58
- return out
59
-
60
- @property
61
- def value_as_builtin_or_quantity_type(self) -> datetime|bool|str|PydanticUncertainQuantity:
62
- return _value_as_builtin_or_quantity(self.value, self.type)
63
-
64
- def as_trex_segment_str(self, segment_name) -> str:
65
- return f'{segment_name}${self.type}:{self.value}'
66
-
67
-
68
- class TREX_Table(TREX_Segment):
69
- col_names: list[str]
70
- col_types: list[str]
71
- data: list[list[str]]
72
-
73
- @staticmethod
74
- def from_trex_segmentstring( segment_str:str):
75
- matches = re_table_pattern.match(segment_str)
76
- if not matches:
77
- raise T_REX_Segment_ParseError(f"Segment is not a valid TREX table: {segment_str}")
78
- name, header, body = matches.groups()
79
-
80
- column_heads = [re_col_head_pattern.match(colhead).groups() for colhead in header.split(':')]
81
- col_names = [ch[0] for ch in column_heads]
82
- col_types = [ch[1] for ch in column_heads]
83
-
84
- data = [row.split(':') for row in body.split('::') ]
85
-
86
- out = TREX_Table(col_names=col_names, col_types=col_types, data=data, segment_name=name)
87
- return out
88
-
89
- def n_rows(self) -> int:
90
- return len(self.data)
91
-
92
- def n_cols(self) -> int:
93
- return len(self.col_names)
94
-
95
- def row_data(self, row:int) -> list:
96
- out = [_value_as_builtin_or_quantity(element, self.col_types[i]) for i, element in enumerate(self.data)]
97
- return out
98
-
99
- def col_data(self, col:str|int) -> list:
100
- col_index = self._get_col_index(col)
101
- type = self.col_types[col_index]
102
- out = [_value_as_builtin_or_quantity(row[col_index],type) for row in self.data]
103
- return out
104
-
105
- def cell_data(self, row:int, col:str|int):
106
- try:
107
- col_index = self._get_col_index(col)
108
- value = self.data[row][col_index]
109
- type = self.col_types[col_index]
110
- except ValueError:
111
- logging.warning(f"row {row}, column {col} not found")
112
- return None
113
-
114
- return _value_as_builtin_or_quantity(value, type)
115
-
116
- def _get_col_index(self, col:str|int):
117
- if isinstance(col, str):
118
- col_index = self.col_names.index(col)
119
- elif isinstance(col, int):
120
- col_index = col
121
- else:
122
- raise TypeError(f"Column must be specified as string or int: {col.__name__}")
123
- return col_index
124
-
125
- def as_trex_segment_str(self, name):
126
- header = ':'.join([f'{el[0]}${el[1]}' for el in zip(self.col_names, self.col_types)])
127
- date_rows = list()
128
- for r in self.data:
129
- row = ':'.join([str(cell) for cell in r])
130
- date_rows.append(row)
131
- data = '::'.join(date_rows)
132
- s = f'{name}$${header}::{data}'
133
- return s
134
-
135
-
136
-
137
- class TREX(Extension, BaseModel):
138
- name_:str
139
- segments: dict[str,TREX_Segment]
140
-
141
- @property
142
- def name(self)->str:
143
- return self.name_
144
-
145
- @property
146
- def type(self)->str:
147
- return 'TREX'
148
-
149
- @property
150
- def data(self)->str:
151
- seg_strings = list()
152
- for s_name, s in self.segments.items():
153
- seg_strings.append(s.as_trex_segment_str(s_name))
154
- s_out = '+'.join(seg_strings)
155
- return s_out
156
-
157
- @staticmethod
158
- def from_spec_fields(name, type, data):
159
- if type != 'TREX':
160
- logging.warning(f'Type {name} was given, but this extension should only be used with type "TREX". Will try to parse data as TREX')
161
-
162
- if not data:
163
- raise ValueError(f'T-REX must be a string of non zero length')
164
-
165
- trex_str = data
166
-
167
- # remove extension indicator. Precaution in case it is not done yet
168
- if trex_str[0]=="*":
169
- trex_str=trex_str[1:-1]
170
- # remove line breaks. for editing T-REXes it's more convenient to have them in, so one never knows
171
- trex_str = re.sub(r"\s+", "", trex_str)
172
-
173
- segment_strings = trex_str.split('+')
174
- out_segments = dict()
175
- for s in segment_strings:
176
- # there are only two valid options. The segment is a scalar or a table.
177
- # Constructors do the parsing anyways and raise exceptions if invalid data
178
- # try both options and then let it fail
179
- try:
180
- segment = TREX_SimpleSegment.from_trex_segmentstring(s)
181
- except T_REX_Segment_ParseError:
182
- segment = TREX_Table.from_trex_segmentstring(s)
183
- out_segments[segment.segment_name] = segment
184
-
185
- return TREX(name_=name, segments=out_segments)
186
-
187
- def get_segment(self, segment_id:str) -> TREX_Segment:
188
- return self.segments.get(segment_id)
189
-
190
-
191
-
192
-
193
- class TREX_Struct(TREX_Segment):
194
- """Struct is a special interpretation of a T-REX Table with one row"""
195
- wrapped_table:TREX_Table
196
-
197
- @property
198
- def segment_name_(self):
199
- return self.wrapped_table.segment_name
200
-
201
- @field_validator('wrapped_table')
202
- def validate_table(table):
203
- if len(table.data) != 1:
204
- raise ValidationError("Too many input rows. Struct can only have one row")
205
- return table
206
-
207
- def get(self, key):
208
- return self.wrapped_table.cell_data(0, key)
209
-
210
- def keys(self):
211
- return self.wrapped_table.col_names
212
-
213
-
214
-
215
-
216
- def _to_datetime(trex_datetime):
217
- try:
218
- # return datetime.fromisoformat(trex_datetime) # should work with python 3.11
219
- return datetime.strptime(trex_datetime, TREX_DATEFORMAT)
220
- except (ValueError , TypeError) as e:
221
- try:
222
- return datetime.strptime(trex_datetime, TREX_TIMEFORMAT)
223
- except (ValueError, TypeError):
224
- return None
225
-
226
- def _value_as_builtin_or_quantity(v:str|list[str], type:str) -> datetime|bool|str|PydanticUncertainQuantity:
227
- match type:
228
- case 'T.D':
229
- return _to_datetime(v)
230
- case 'T.B':
231
- return v == 'T' or bool(v)
232
- case 'T.A':
233
- return v
234
- case 'T.X':
235
- raise NotImplementedError("Base36 encoded T-REX segment not implemented")
236
- case 'E':
237
- return v
238
- case _:
239
- return quantity_from_UN_CEFACT(v, type)
@@ -1,46 +0,0 @@
1
- import logging
2
- import re
3
-
4
- from .data_model import TREX, T_REX_Segment_ParseError, TREX_SimpleSegment, TREX_Table
5
-
6
-
7
- def from_trex_string(trex_str, enforce_type=True) -> TREX:
8
- if not trex_str:
9
- raise ValueError(f'T-REX must be a string of non zero length')
10
-
11
- # remove extension indicator. Precaution in case it is not done yet
12
- if trex_str[0]=="*":
13
- trex_str=trex_str[1:-1]
14
- # remove line breaks. for editing T-REXes it's more convenient to have them in, so one never knows
15
- trex_str = trex_str.replace('\n','')
16
-
17
- d = re.match('((?P<name>.+)\$(?P<type>.+)/)?(?P<data>.+)', trex_str).groupdict()
18
- if not d:
19
- raise ValueError('TREX is invalid.')
20
- type = d.get('type')
21
- name = d.get('name')
22
- data = d.get('data')
23
-
24
- if not type:
25
- logging.warning('No type given. Assume its trex')
26
- elif type != 'TREX' and enforce_type:
27
- logging.error(f'Extension type {type} is not TREX. Aborting')
28
- raise ValueError(f'Extension type {type} is not TREX.')
29
- else:
30
- logging.warning('Extension type {type} is not TREX. Try anyways')
31
-
32
-
33
- segment_strings = data.split('+')
34
- out_segments = dict()
35
- for s in segment_strings:
36
- # there are only two valid options. The segment is a scalar or a table.
37
- # Constructors do the parsing anyways and raise exceptions if invalid data
38
- # try both options and then let it fail
39
- try:
40
- segment = TREX_SimpleSegment.from_trex_segmentstring(s)
41
- except T_REX_Segment_ParseError:
42
- segment = TREX_Table.from_trex_segmentstring(s)
43
- out_segments[segment.segment_name] = segment
44
- trex = TREX(name_= name, segments=out_segments)
45
- trex._trex_str = trex_str
46
- return trex
@@ -1,32 +0,0 @@
1
- from math import floor, log10, pow
2
-
3
- def to_significant_digits_str(x:int|float, uncertainty:float|int) -> str:
4
- if uncertainty == None:
5
- if isinstance(x, float):
6
- Warning(f'Uncertainty was given as none. Returning unrounded number')
7
- return str(x)
8
- else:
9
- uncertainty = 1
10
-
11
-
12
- log_least_significant_digit = floor(log10(uncertainty))
13
- digits = -log_least_significant_digit
14
-
15
- x_significant = round(x, digits)
16
-
17
- if digits <= 0:
18
- return str(int(x_significant))
19
- else:
20
- return str(x_significant)
21
-
22
-
23
-
24
- if __name__ == "__main__":
25
- print(to_significant_digits_str(111111.1111111, 1000))
26
- print(to_significant_digits_str(111111.1111111, 100))
27
- print(to_significant_digits_str(111111.1111111, 10))
28
- print(to_significant_digits_str(111111.1111111, 1))
29
- print(to_significant_digits_str(111111.1111111, 0.1))
30
- print(to_significant_digits_str(111111.1111111, 0.01))
31
- print(to_significant_digits_str(111111.1111111, 0.001))
32
- print(to_significant_digits_str(111111.1111111, 0.0001))