kodexa 7.0.10350737552__py3-none-any.whl → 7.0.10402571165__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,339 @@
1
+ import logging
2
+ import os
3
+ import uuid
4
+ from typing import Optional, List
5
+
6
+ import jinja2
7
+ from pydantic import BaseModel
8
+
9
+ from kodexa import ContentNode
10
+ from kodexa.model.model import Tag
11
+ from kodexa.model.objects import ContentException, Taxon, Taxonomy, Assistant
12
+ from kodexa.utils import taxon_to_property_name, taxon_to_class_name, taxon_to_group_path, snake_to_camel, \
13
+ to_snake
14
+
15
+ logger = logging.getLogger()
16
+
17
+
18
+ class LLMDataAttribute(BaseModel):
19
+ """
20
+ This is the data structure that is used take the results from the LLM so
21
+ we can use it. We use this as a base class for building classes that align
22
+ with a taxonomy
23
+ """
24
+
25
+ value: Optional[str] = None
26
+ line_ids: Optional[list[str]] = None
27
+ taxon_path: Optional[str] = None
28
+ data_type: Optional[str] = None
29
+ value_path: Optional[str] = None
30
+ normalized_text: Optional[str] = None
31
+ node_uuid_list: Optional[List[int]] = None
32
+ tag_uuid: Optional[str] = None
33
+ exceptions: Optional[list[ContentException]] = None
34
+
35
+ def create_exception(
36
+ self,
37
+ exception_type_id: str,
38
+ exception_type: str,
39
+ normalized_text: str,
40
+ message: str,
41
+ exception_detail: str,
42
+ ):
43
+ content_exception = ContentException(
44
+ exception_type=exception_type,
45
+ exception_detail=exception_detail,
46
+ message=message,
47
+ tag_uuid=self.tag_uuid,
48
+ )
49
+ self.exceptions.append(content_exception)
50
+
51
+
52
+ class LLMDataObject(BaseModel):
53
+ """
54
+ A class to represent a LLM (Large Language Model) data object.
55
+
56
+ ...
57
+
58
+ Attributes
59
+ ----------
60
+ group_uuid : str, optional
61
+ A unique identifier for the group, by default None
62
+ cell_index : int, optional
63
+ The index of the cell which is really the row, by default 0
64
+
65
+ Methods
66
+ -------
67
+ __init__(self, document: "KodexaDocumentLLMWrapper" = None, **data: Any):
68
+ Initializes the LLMDataObject with a given document and additional data.
69
+ apply_labels(self, document: KodexaDocumentLLMWrapper, parent_group_uuid: str = None):
70
+ Applies labels to the document if it exists.
71
+ """
72
+
73
+ group_uuid: Optional[str] = None
74
+ cell_index: int = 0
75
+ exceptions: Optional[list[ContentException]] = None
76
+
77
+ class Config:
78
+ arbitrary_types_allowed = True
79
+
80
+ def __init__(self, group_uuid: str = None, cell_index: int = 0):
81
+ """
82
+ Initializes the LLMDataObject
83
+ """
84
+ super().__init__()
85
+ self.cell_index = cell_index
86
+ if group_uuid is None:
87
+ self.group_uuid = str(uuid.uuid4())
88
+ else:
89
+ self.group_uuid = group_uuid
90
+
91
+ def create_exception(
92
+ self,
93
+ exception_type_id: str,
94
+ exception_type: str,
95
+ message: str,
96
+ exception_detail: str,
97
+ severity: str = "ERROR",
98
+ ):
99
+ content_exception = ContentException(
100
+ exception_type=exception_type,
101
+ exception_details=exception_detail,
102
+ message=message,
103
+ group_uuid=self.group_uuid,
104
+ severity=severity,
105
+ )
106
+ if self.exceptions is None:
107
+ self.exceptions = []
108
+
109
+ self.exceptions.append(content_exception)
110
+
111
+ def apply_labels(
112
+ self, document: "KodexaDocumentLLMWrapper", parent_group_uuid: str = None,
113
+ assistant: Optional["Assistant"] = None
114
+ ):
115
+ """
116
+ Applies labels to the document if it exists.
117
+
118
+ If a document has been assigned to the LLMDataObject, it calls the
119
+ apply_labels method of the document with the current LLMDataObject and
120
+ the parent group uuid.
121
+
122
+ Parameters
123
+ ----------
124
+ document : KodexaDocumentLLMWrapper
125
+ The Kodexa document LLM wrapper
126
+ parent_group_uuid : str, optional
127
+ A unique identifier for the parent group, by default None
128
+ assistant : Assistant, optional
129
+ """
130
+
131
+ # Lets make sure we add all the content exceptions
132
+ if self.exceptions is not None:
133
+ for exception in self.exceptions:
134
+ # We have two types of exception, one in the API and one in the
135
+ # document
136
+ from kodexa.model import ContentException as KodexaContentException
137
+ internal_exception = KodexaContentException(
138
+ exception_type=exception.exception_type,
139
+ message=exception.message,
140
+ exception_details=exception.exception_details,
141
+ severity=exception.severity,
142
+ group_uuid=exception.group_uuid,
143
+ tag_uuid=exception.tag_uuid,
144
+ )
145
+ document.doc.add_exception(internal_exception)
146
+
147
+ # Let's go through this data object and find all the attributes that have a value
148
+ # then we will apply the labels to the document
149
+ for field in self.__fields__:
150
+ logger.info(f"Processing field {field}")
151
+ value = getattr(self, field)
152
+
153
+ if isinstance(value, list):
154
+ logger.info(f"Processing as a list {value}")
155
+ for item in value:
156
+ self.process_child(item, document, parent_group_uuid, assistant)
157
+ else:
158
+ logger.info(f"Processing as a single value {value}")
159
+ self.process_child(value, document, parent_group_uuid, assistant)
160
+
161
+ def process_child(self, value, document, parent_group_uuid, assistant):
162
+
163
+ logger.info(f"Processing child {value}")
164
+ if isinstance(value, LLMDataAttribute):
165
+ # We need to add the label to the document for this attribute
166
+
167
+ tag = value.taxon_path
168
+
169
+ # TODO need to work out why we are missing them?
170
+ logger.info(f"Value: {value.normalized_text}, node_uuid_list: {value.node_uuid_list}")
171
+ if value.node_uuid_list is None:
172
+ value.node_uuid_list = value.line_ids
173
+ logger.info(f"Applying label {tag} to node UUIDs {value.node_uuid_list}")
174
+
175
+ if isinstance(value.node_uuid_list, int):
176
+ value.node_uuid_list = [value.node_uuid_list]
177
+
178
+ nodes_to_label: list[ContentNode] = (
179
+ [
180
+ document.doc.get_persistence().get_node(node_uuid)
181
+ for node_uuid in value.node_uuid_list if (node_uuid != '0' and node_uuid != 0)
182
+ ]
183
+ if value.node_uuid_list
184
+ else []
185
+ )
186
+
187
+ tag_uuid = str(uuid.uuid4())
188
+ for node in nodes_to_label:
189
+ if node:
190
+ if not node.has_tag(tag):
191
+ try:
192
+ confidence = -1 if value.value_path == 'DERIVED' else 1
193
+ node.tag(
194
+ tag_to_apply=tag,
195
+ value=value.normalized_text,
196
+ tag_uuid=tag_uuid,
197
+ cell_index=self.cell_index,
198
+ selector="//word",
199
+ confidence=confidence,
200
+ group_uuid=self.group_uuid,
201
+ parent_group_uuid=parent_group_uuid,
202
+ owner_uri=f"assistant://{assistant.id}" if assistant else f"model://taxonomy-llm",
203
+ )
204
+ except:
205
+ logger.error(f"Error tagging node {node.uuid} with tag {tag}")
206
+ else:
207
+ current_value = node.get_feature_values("tag", tag)
208
+ new_tag = Tag(cell_index=self.cell_index,
209
+ uuid=tag_uuid,
210
+ value=value.normalized_text,
211
+ confidence=-1,
212
+ group_uuid=self.group_uuid,
213
+ parent_group_uuid=parent_group_uuid,
214
+ owner_uri=f"assistant://{assistant.id}" if assistant else f"model://taxonomy-llm")
215
+ current_value.append(new_tag)
216
+ node.remove_feature("tag", tag)
217
+ node.add_feature("tag", tag, current_value, single=False)
218
+
219
+ logger.info(f"Applied label {tag} to {len(nodes_to_label)} nodes")
220
+ if isinstance(value, LLMDataObject):
221
+ # We need to apply the labels to the document for this object
222
+ value.apply_labels(document, parent_group_uuid=self.group_uuid)
223
+ # logger.info(f"Applied labels to data object {value.group_uuid}")
224
+
225
+
226
+ def find_nearby_word_to_tag(node, tag):
227
+ logger.info(f"find_nearby_word_to_tag: {tag}")
228
+ # Create an ordered list of the lines on the page, sorted by distance from the target node
229
+ target_line_index = node.index if node.node_type == 'line' else node.select('parent::line')[0].index
230
+ all_lines_on_page = node.select('parent::page')[0].select('//line')
231
+
232
+ print(target_line_index, len(all_lines_on_page), all_lines_on_page)
233
+ sorted_lines = sorted(all_lines_on_page, key=lambda line: abs(target_line_index - line.index))
234
+ # Find the first word that isn't yet tagged by this tag
235
+ for line in sorted_lines:
236
+ for word in line.select('//word'):
237
+ if not word.has_tag(tag):
238
+ return word
239
+ return None
240
+
241
+
242
+ def get_template_env():
243
+ """Get the Jinja2 template environmnet
244
+
245
+ :return:
246
+
247
+ Args:
248
+
249
+ Returns:
250
+
251
+ """
252
+ package_location = os.path.dirname(os.path.abspath(__file__))
253
+ template_loader = jinja2.FileSystemLoader([os.getcwd(), package_location])
254
+ env = jinja2.Environment(loader=template_loader, autoescape=True)
255
+ env.globals["snake_to_camel"] = snake_to_camel
256
+ env.globals["to_snake"] = to_snake
257
+ env.globals['taxon_to_property_name'] = taxon_to_property_name
258
+ env.globals['taxon_to_class_name'] = taxon_to_class_name
259
+ env.globals['taxon_to_group_path'] = taxon_to_group_path
260
+ return env
261
+
262
+
263
+ def write_template(template, output_location, output_filename, context):
264
+ """
265
+ Write the given template out to a file
266
+
267
+ Args:
268
+ template: the name of the template
269
+ output_location: the location to write the output
270
+ output_filename: the name of the output file
271
+ context: the context
272
+ """
273
+ template = get_template_env().get_template(template)
274
+ processed_template = template.render(context)
275
+
276
+ from pathlib import Path
277
+
278
+ Path(output_location).mkdir(parents=True, exist_ok=True)
279
+ with open(output_location + "/" + output_filename, "w") as text_file:
280
+ text_file.write(processed_template)
281
+
282
+
283
+ def build_llm_data_classes_for_taxonomy(
284
+ taxonomy: Taxonomy, output_dir: str, output_file: str, use_labels: bool = False
285
+ ):
286
+ """
287
+ This function will use jinja templates to build a set of classes that represent a taxonomy,
288
+ these classes will extend the LLMData class and therefore have the ability to take an LLM
289
+ response and map it to the Kodexa Document identifying and labeling the nodes as needed
290
+
291
+ :param taxonomy:
292
+ :param output_dir:
293
+ :param output_file:
294
+ :param use_labels:
295
+ :return:
296
+ """
297
+
298
+ # We will use a jinja template to build all the classes we need, to do this
299
+ # will iterate over all the taxons the taxonomy
300
+ def set_path(taxon: Taxon, parent_path: Optional[str] = None):
301
+ if parent_path is not None:
302
+ taxon.path = parent_path + "/" + taxon.name
303
+ else:
304
+ taxon.path = taxon.name
305
+ if taxon.children:
306
+ for child_taxon in taxon.children:
307
+ set_path(child_taxon, taxon.path)
308
+
309
+ for taxon in taxonomy.taxons:
310
+ set_path(taxon, None)
311
+
312
+ def collect_group_taxons(taxons: list[Taxon]) -> list[Taxon]:
313
+ """
314
+ Recursively collects all group taxons from a list of taxons.
315
+
316
+ Args:
317
+ taxons (list[Taxon]): The list of taxons to collect group taxons from.
318
+
319
+ Returns:
320
+ list[Taxon]: A list of group taxons.
321
+
322
+ """
323
+ group_taxons = []
324
+ for taxon in taxons:
325
+ if taxon.group:
326
+ group_taxons.append(taxon)
327
+ if taxon.children:
328
+ group_taxons = group_taxons + collect_group_taxons(taxon.children)
329
+ return group_taxons
330
+
331
+ all_group_taxons = collect_group_taxons(taxonomy.taxons)
332
+ all_group_taxons.reverse()
333
+ context = {"taxons": all_group_taxons, "use_labels": use_labels}
334
+ write_template("llm_data_class.j2", output_dir, output_file, context)
335
+
336
+ # Lets log what we created
337
+ logger.info(f"Created the following classes in {output_dir}/{output_file}")
338
+ with open(f"{output_dir}/{output_file}", "r") as file:
339
+ logger.info(file.read())
@@ -0,0 +1,21 @@
1
+ from typing import Optional, List
2
+ from kodexa_langchain.data_class import LLMDataAttribute, LLMDataObject
3
+ from kodexa_langchain.llm import deserialize_llm_data
4
+ from kodexa_langchain.document import KodexaDocumentLLMWrapper
5
+
6
+ {%- for taxon in taxons %}
7
+
8
+ class {{ taxon_to_class_name(taxon) }}(LLMDataObject):
9
+
10
+ {%- for child_taxon in taxon.children %}{%- if child_taxon.group %}
11
+ {{ taxon_to_property_name(child_taxon) }}: Optional[List[{{ taxon_to_class_name(child_taxon) }}]] = None
12
+ {%- else %}
13
+ {{ taxon_to_property_name(child_taxon) }}: Optional[LLMDataAttribute] = LLMDataAttribute(taxon_path='{{ child_taxon.path }}', data_type='{{ child_taxon.taxon_type.title() }}', value_path='{{ child_taxon.value_path.title() }}')
14
+ {%- endif %}
15
+ {%- endfor %}
16
+
17
+ def __init__(self, data: dict, document: Optional[KodexaDocumentLLMWrapper] = None, group_uuid=None, cell_index: int = 0, taxon=None, extraction_context=None):
18
+ super().__init__(group_uuid, cell_index)
19
+ deserialize_llm_data(self, data, document, f'{{ taxon_to_group_path(taxon) }}', group_uuid, taxon, extraction_context)
20
+
21
+ {%- endfor %}
@@ -0,0 +1,178 @@
1
+ """
2
+ This module provides a set of functions to manipulate and convert taxonomy objects for use within a data model.
3
+ It includes functions to convert taxonomy names to various naming conventions such as property names, class names,
4
+ and group paths. Additionally, it offers utility functions for string manipulation, like converting snake case strings
5
+ to camel case or title case, making string names safe for use as attribute names, converting strings to hexadecimal
6
+ color codes, estimating the token count of a text, and recursively finding all non-abstract subclasses of a given class.
7
+ """
8
+
9
+ import keyword
10
+ import logging
11
+ import re
12
+ from inspect import isabstract
13
+
14
+ from kodexa.model.objects import Taxon
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def taxon_to_property_name(taxon: Taxon):
20
+ # We need to convert the taxon name to a property name
21
+ # if the name of the taxon doesn't look like a UUID we will camel case
22
+ # it otherwise we will camelcase the taxon label
23
+ safe_property_name = to_snake(safe_name(taxon.label))
24
+ taxon.external_name = safe_property_name
25
+ return safe_property_name
26
+
27
+
28
+ def taxon_to_class_name(taxon: Taxon):
29
+ # We need to convert the taxon name to a class name
30
+ # if the name of the taxon doesn't look like a UUID we will camel case
31
+ # it otherwise we will camelcase the taxon label
32
+ safe_class_name = snake_to_camel(safe_name(taxon.label))
33
+ taxon.external_name = safe_class_name
34
+ return safe_class_name
35
+
36
+
37
+ def taxon_to_group_path(taxon: Taxon):
38
+ # We need to get the "group_name" from one of the taxons
39
+ # Which is the first part of the taxon path
40
+ return taxon.path.split('/')[0]
41
+
42
+
43
+ def snake_to_camel(snake_str):
44
+ components = snake_str.replace(" ", "_").split("_")
45
+ # We convert first letter of second word to uppercase
46
+ return components[0].strip().title() + "".join(
47
+ x.strip().title() for x in components[1:]
48
+ )
49
+
50
+
51
+ def to_snake(base_str):
52
+ components = base_str.replace(" ", "_").replace("-", "_").split("_")
53
+
54
+ # if the base string starts with a number than we add n_ to the start
55
+ if components[0].isdigit():
56
+ components[0] = "n_" + components[0]
57
+
58
+ # We convert first letter of second word to uppercase
59
+ return "_".join(x.strip().lower() for x in components)
60
+
61
+
62
+ def make_safe_attribute_name(name):
63
+ # Replace invalid characters (anything not a letter, digit, or underscore) with an underscore
64
+ safe_name = ''.join(char if char.isalnum() or char == '_' else '_' for char in name)
65
+
66
+ # If the name starts with a digit, prepend an underscore
67
+ if safe_name[0].isdigit():
68
+ safe_name = '_' + safe_name
69
+
70
+ # Append an underscore if the name is a Python keyword
71
+ if keyword.iskeyword(safe_name):
72
+ safe_name += '_'
73
+
74
+ return safe_name
75
+
76
+
77
+ def safe_name(string):
78
+ """
79
+ Removes invalid characters from a string, replaces spaces with underscores, removes leading/trailing underscores and hyphens, and makes the string lowercase. If the resulting string
80
+ * starts with a number, it prefixes it with "n_".
81
+
82
+ :param string: The string to be transformed.
83
+ :return: The transformed string.
84
+ """
85
+ # Remove invalid characters
86
+
87
+ # trim the string
88
+ string = string.strip()
89
+
90
+ string = re.sub(r"[^\w\s-]", "", string)
91
+
92
+ # Replace spaces with underscores
93
+ string = re.sub(r"\s+", "_", string)
94
+
95
+ # Remove leading/trailing underscores and hyphens
96
+ string = string.strip("_-")
97
+
98
+ # Make it lowercase
99
+ string = string.lower()
100
+
101
+ if string[0].isdigit():
102
+ # can't have things starting with a number
103
+ string = "n_" + string
104
+
105
+ # make sure we don't collide with a python keyword
106
+ return make_safe_attribute_name(string)
107
+
108
+
109
+ def snake_case_to_title_case(snake_case_string):
110
+ words = snake_case_string.split("_")
111
+ title_case_words = [word.capitalize() for word in words]
112
+ return " ".join(title_case_words)
113
+
114
+
115
+ def string_to_hex_color(string):
116
+ # Remove any leading or trailing whitespace from the string
117
+ string = string.strip()
118
+
119
+ # Calculate the hash value of the string
120
+ hash_value = hash(string)
121
+
122
+ # Convert the hash value to a 24-bit hexadecimal color code
123
+ hex_color = "#{:06x}".format(hash_value & 0xFFFFFF)
124
+
125
+ return hex_color
126
+
127
+
128
+ def get_is_square_bracket_first(string):
129
+ first_square_bracket = string.find("[")
130
+ first_bracket = string.find("{")
131
+ # Check if both "{" and "[" exist in the string
132
+ if first_bracket != -1 and first_square_bracket != -1:
133
+ # Compare their indices to determine which appears first
134
+ if first_bracket < first_square_bracket:
135
+ return False
136
+ else:
137
+ return True
138
+ # If only one of them exists, return the one that appears
139
+ elif first_bracket != -1:
140
+ return False
141
+ elif first_square_bracket != -1:
142
+ return True
143
+ else:
144
+ return None
145
+
146
+
147
+ def cosine_similarity(v1, v2):
148
+ """Compute the cosine similarity between two vectors."""
149
+ dot_product = sum(a * b for a, b in zip(v1, v2))
150
+ norm_a = sum(a * a for a in v1) ** 0.5
151
+ norm_b = sum(b * b for b in v2) ** 0.5
152
+ return dot_product / (norm_a * norm_b)
153
+
154
+
155
+ def estimate_token_count(text, avg_token_length=1):
156
+ # Removing spaces to focus on characters that form tokens
157
+ char_count = len(text.replace(" ", ""))
158
+ # Estimating token count
159
+ estimated_tokens = char_count / avg_token_length
160
+ return round(estimated_tokens)
161
+
162
+
163
+ def get_all_concrete_subclasses(cls):
164
+ """
165
+ Recursively find all non-abstract subclasses of a given class.
166
+
167
+ Parameters:
168
+ cls (class): The parent class to find subclasses for.
169
+
170
+ Returns:
171
+ list: A list of all non-abstract subclasses of cls.
172
+ """
173
+ concrete_subclasses = []
174
+ for subclass in cls.__subclasses__():
175
+ if not isabstract(subclass):
176
+ concrete_subclasses.append(subclass)
177
+ concrete_subclasses.extend(get_all_concrete_subclasses(subclass))
178
+ return concrete_subclasses
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kodexa
3
- Version: 7.0.10350737552
3
+ Version: 7.0.10402571165
4
4
  Summary: Python SDK for the Kodexa Platform
5
5
  Author: Austin Redenbaugh
6
6
  Author-email: austin@kodexa.com
@@ -3,6 +3,8 @@ kodexa/assistant/__init__.py,sha256=nlXm_YnV_50hgn0TIT2Fkc2fQ-86OjmctY_j8My9nc4,
3
3
  kodexa/assistant/assistant.py,sha256=5KFdbqFSLIZJyDRyZdpcfr448fT-CW4JhYu9A6B9DGY,14663
4
4
  kodexa/connectors/__init__.py,sha256=WF6G_MUeU32TlKSUKkpNoNX7dq8iBPliFMep4E8BmZc,328
5
5
  kodexa/connectors/connectors.py,sha256=FpUZDkSyHld2b9eYRuVOWzaFtuGoaRuPXXicJB7THbc,10413
6
+ kodexa/dataclasses/__init__.py,sha256=gM1meK2rltv3OO9oJGtuLG7It0L-JS8rMmSAg44Wbp8,12815
7
+ kodexa/dataclasses/llm_data_class.j2,sha256=1l30_Men0_cPEd6FCzbwsrWUi1QZidNEFXR06WudYlk,1127
6
8
  kodexa/model/__init__.py,sha256=rtLXYJBxB-rnukhslN9rlqoB3--1H3253HyHGbD_Gc8,796
7
9
  kodexa/model/base.py,sha256=CaZK8nMhT1LdCpt4aLhebJGcorjq9qRID1FjnXnP14M,521
8
10
  kodexa/model/entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -39,7 +41,8 @@ kodexa/testing/test_components.py,sha256=g5lP-GY0nTHuH5cIEw45vIejEeBaWkPKQGHL36j
39
41
  kodexa/testing/test_utils.py,sha256=DrLCkHxdb6AbZ-X3WmTMbQmnVIm55VEBL8MjtUK9POs,14021
40
42
  kodexa/training/__init__.py,sha256=xs2L62YpRkIRfslQwtQZ5Yxjhm7sLzX2TrVX6EuBnZQ,52
41
43
  kodexa/training/train_utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
- kodexa-7.0.10350737552.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
43
- kodexa-7.0.10350737552.dist-info/METADATA,sha256=SFvyMuqsVbNSBZ7tal88dBHT4temLjiySjugrvxE2Bs,3533
44
- kodexa-7.0.10350737552.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
45
- kodexa-7.0.10350737552.dist-info/RECORD,,
44
+ kodexa/utils/__init__.py,sha256=Pnim1o9_db5YEnNvDTxpM7HG-qTlL6n8JwFwOafU9wo,5928
45
+ kodexa-7.0.10402571165.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
46
+ kodexa-7.0.10402571165.dist-info/METADATA,sha256=_A6adX98QOX0rTPw7Mq_5Ev-ssdlQAXeTorZJx9Eyh4,3533
47
+ kodexa-7.0.10402571165.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
48
+ kodexa-7.0.10402571165.dist-info/RECORD,,