UniTok 4.2.5__tar.gz → 4.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. {UniTok-4.2.5 → UniTok-4.3.1}/PKG-INFO +4 -5
  2. {UniTok-4.2.5 → UniTok-4.3.1}/README.md +3 -4
  3. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/PKG-INFO +4 -5
  4. {UniTok-4.2.5 → UniTok-4.3.1}/setup.py +1 -1
  5. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/__init__.py +2 -0
  6. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/job.py +17 -2
  7. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/meta.py +2 -1
  8. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/__init__.py +3 -1
  9. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/base_tokenizer.py +7 -1
  10. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/glove_tokenizer.py +1 -1
  11. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/unitok.py +7 -7
  12. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/hub/hub.py +1 -1
  13. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/vocabulary/vocabulary.py +22 -5
  14. {UniTok-4.2.5 → UniTok-4.3.1}/LICENSE +0 -0
  15. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/SOURCES.txt +0 -0
  16. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/dependency_links.txt +0 -0
  17. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/entry_points.txt +0 -0
  18. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/requires.txt +0 -0
  19. {UniTok-4.2.5 → UniTok-4.3.1}/UniTok.egg-info/top_level.txt +0 -0
  20. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/__init__.py +0 -0
  21. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/__main__.py +0 -0
  22. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/analysis/__init__.py +0 -0
  23. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/analysis/lengths.py +0 -0
  24. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/analysis/plot.py +0 -0
  25. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/cols.py +0 -0
  26. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/column.py +0 -0
  27. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/fut.py +0 -0
  28. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/global_setting.py +0 -0
  29. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/meta.py +0 -0
  30. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/__init__.py +0 -0
  31. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/bert_tok.py +0 -0
  32. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/ent_tok.py +0 -0
  33. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/id_tok.py +0 -0
  34. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/number_tok.py +0 -0
  35. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/seq_tok.py +0 -0
  36. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/split_tok.py +0 -0
  37. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/tok/tok.py +0 -0
  38. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/unidep.py +0 -0
  39. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/unitok.py +0 -0
  40. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/vocab.py +0 -0
  41. {UniTok-4.2.5 → UniTok-4.3.1}/UniTokv3/vocabs.py +0 -0
  42. {UniTok-4.2.5 → UniTok-4.3.1}/setup.cfg +0 -0
  43. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/__main__.py +0 -0
  44. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/selector.py +0 -0
  45. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/status.py +0 -0
  46. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/digit_tokenizer.py +0 -0
  47. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/entity_tokenizer.py +0 -0
  48. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/split_tokenizer.py +0 -0
  49. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/transformers_tokenizer.py +0 -0
  50. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/union_tokenizer.py +0 -0
  51. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/tokenizer/unknown_tokenizer.py +0 -0
  52. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/__init__.py +0 -0
  53. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/class_pool.py +0 -0
  54. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/data.py +0 -0
  55. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/function.py +0 -0
  56. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/handler/__init__.py +0 -0
  57. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/handler/json_handler.py +0 -0
  58. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/handler/pkl_handler.py +0 -0
  59. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/hub/__init__.py +0 -0
  60. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/hub/param_hub.py +0 -0
  61. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/index_set/__init__.py +0 -0
  62. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/index_set/index_set.py +0 -0
  63. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/index_set/job_set.py +0 -0
  64. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/index_set/tokenizer_set.py +0 -0
  65. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/index_set/vocabulary_set.py +0 -0
  66. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/instance.py +0 -0
  67. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/map.py +0 -0
  68. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/space.py +0 -0
  69. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/symbol.py +0 -0
  70. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/utils/verbose.py +0 -0
  71. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/vocabulary/__init__.py +0 -0
  72. {UniTok-4.2.5 → UniTok-4.3.1}/unitok/vocabulary/counter.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: UniTok
3
- Version: 4.2.5
3
+ Version: 4.3.1
4
4
  Summary: Unified Tokenizer
5
5
  Home-page: https://github.com/Jyonn/UnifiedTokenizer
6
6
  Author: Jyonn Liu
@@ -13,17 +13,16 @@ License-File: LICENSE
13
13
 
14
14
  # UniTok V4
15
15
 
16
- If you want to use the old version, please refer to [v3](README_v3.md) in Chinese.
16
+ The documentation for v3, old version, can be found [here](README_v3.md) in Chinese.
17
17
 
18
18
  ## Overview
19
19
 
20
20
  [![PyPI version](https://badge.fury.io/py/unitok.svg)](https://badge.fury.io/py/unitok)
21
21
 
22
- Welcome to the UniTok documentation!
22
+ Welcome to the UniTok v4!
23
23
  This library provides a unified preprocessing solution for machine learning datasets, handling diverse data types like text, categorical features, and numerical values.
24
- It introduces **SQL-like** data table combinations and a modular workflow that transitions datasets through three states: `initialized`, `tokenized`, and `organized`.
25
24
 
26
- UniTok is designed to simplify preprocessing by offering reusable components such as tokenizers and vocabularies, making it flexible for various datasets and scenarios.
25
+ Please refer to [UniTok Handbook](https://unitok.qijiong.work) for more detailed information.
27
26
 
28
27
  ## Road from V3 to V4
29
28
 
@@ -1,16 +1,15 @@
1
1
  # UniTok V4
2
2
 
3
- If you want to use the old version, please refer to [v3](README_v3.md) in Chinese.
3
+ The documentation for v3, old version, can be found [here](README_v3.md) in Chinese.
4
4
 
5
5
  ## Overview
6
6
 
7
7
  [![PyPI version](https://badge.fury.io/py/unitok.svg)](https://badge.fury.io/py/unitok)
8
8
 
9
- Welcome to the UniTok documentation!
9
+ Welcome to the UniTok v4!
10
10
  This library provides a unified preprocessing solution for machine learning datasets, handling diverse data types like text, categorical features, and numerical values.
11
- It introduces **SQL-like** data table combinations and a modular workflow that transitions datasets through three states: `initialized`, `tokenized`, and `organized`.
12
11
 
13
- UniTok is designed to simplify preprocessing by offering reusable components such as tokenizers and vocabularies, making it flexible for various datasets and scenarios.
12
+ Please refer to [UniTok Handbook](https://unitok.qijiong.work) for more detailed information.
14
13
 
15
14
  ## Road from V3 to V4
16
15
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: UniTok
3
- Version: 4.2.5
3
+ Version: 4.3.1
4
4
  Summary: Unified Tokenizer
5
5
  Home-page: https://github.com/Jyonn/UnifiedTokenizer
6
6
  Author: Jyonn Liu
@@ -13,17 +13,16 @@ License-File: LICENSE
13
13
 
14
14
  # UniTok V4
15
15
 
16
- If you want to use the old version, please refer to [v3](README_v3.md) in Chinese.
16
+ The documentation for v3, old version, can be found [here](README_v3.md) in Chinese.
17
17
 
18
18
  ## Overview
19
19
 
20
20
  [![PyPI version](https://badge.fury.io/py/unitok.svg)](https://badge.fury.io/py/unitok)
21
21
 
22
- Welcome to the UniTok documentation!
22
+ Welcome to the UniTok v4!
23
23
  This library provides a unified preprocessing solution for machine learning datasets, handling diverse data types like text, categorical features, and numerical values.
24
- It introduces **SQL-like** data table combinations and a modular workflow that transitions datasets through three states: `initialized`, `tokenized`, and `organized`.
25
24
 
26
- UniTok is designed to simplify preprocessing by offering reusable components such as tokenizers and vocabularies, making it flexible for various datasets and scenarios.
25
+ Please refer to [UniTok Handbook](https://unitok.qijiong.work) for more detailed information.
27
26
 
28
27
  ## Road from V3 to V4
29
28
 
@@ -6,7 +6,7 @@ long_description = (this_directory / "README.md").read_text(encoding='utf8')
6
6
 
7
7
  setup(
8
8
  name='UniTok',
9
- version='4.2.5',
9
+ version='4.3.1',
10
10
  keywords=['token', 'tokenizer', 'NLP', 'transformers', 'glove', 'bert', 'llama'],
11
11
  description='Unified Tokenizer',
12
12
  long_description=long_description,
@@ -9,6 +9,7 @@ from unitok.tokenizer import BaseTokenizer, TokenizerHub
9
9
  from unitok.tokenizer import EntityTokenizer, EntitiesTokenizer
10
10
  from unitok.tokenizer import TransformersTokenizer, BertTokenizer
11
11
  from unitok.tokenizer import SplitTokenizer, DigitTokenizer, DigitsTokenizer
12
+ from unitok.tokenizer import GloVeTokenizer
12
13
  from unitok.job import Job, JobHub
13
14
 
14
15
  from unitok.utils.index_set import IndexSet, VocabSet, TokenizerSet, JobSet
@@ -29,6 +30,7 @@ __all__ = [
29
30
  'EntityTokenizer', 'EntitiesTokenizer',
30
31
  'TransformersTokenizer', 'BertTokenizer',
31
32
  'SplitTokenizer', 'DigitTokenizer', 'DigitsTokenizer',
33
+ 'GloVeTokenizer',
32
34
  'Job', 'JobHub',
33
35
  'IndexSet', 'VocabSet', 'TokenizerSet', 'JobSet',
34
36
  'Meta',
@@ -1,3 +1,6 @@
1
+ from typing import Union
2
+
3
+ from unitok import TokenizerHub, VocabHub
1
4
  from unitok.tokenizer.union_tokenizer import UnionTokenizer
2
5
 
3
6
  from unitok.tokenizer import BaseTokenizer
@@ -8,7 +11,7 @@ from unitok.utils.hub import Hub
8
11
  class Job:
9
12
  def __init__(
10
13
  self,
11
- tokenizer: BaseTokenizer,
14
+ tokenizer: Union[BaseTokenizer, str],
12
15
  column: str,
13
16
  name: str = None,
14
17
  truncate: int = None,
@@ -16,7 +19,13 @@ class Job:
16
19
  key: bool = False,
17
20
  max_len: int = 0,
18
21
  ):
22
+ if isinstance(tokenizer, str):
23
+ if TokenizerHub.has(tokenizer):
24
+ tokenizer = TokenizerHub.get(tokenizer)
25
+ else:
26
+ raise ValueError(f"Tokenizer {tokenizer} not found in the tokenizer hub.")
19
27
  self.tokenizer: BaseTokenizer = tokenizer
28
+
20
29
  self.column: str = column
21
30
  self.name: str = name
22
31
  self.truncate: int = truncate
@@ -26,7 +35,8 @@ class Job:
26
35
  self.max_len = max_len
27
36
  self.from_union = isinstance(self.tokenizer, UnionTokenizer)
28
37
 
29
- JobHub.add(self.name, self)
38
+ JobHub.add(self)
39
+ VocabHub.add(self.tokenizer.vocab)
30
40
 
31
41
  @property
32
42
  def return_list(self):
@@ -77,3 +87,8 @@ class Job:
77
87
 
78
88
  class JobHub(Hub[Job]):
79
89
  _instance = Instance(compulsory_space=True)
90
+
91
+ @classmethod
92
+ def add(cls, key, obj: Job = None):
93
+ key, obj = key.name, key
94
+ return super().add(key, obj)
@@ -15,7 +15,7 @@ from unitok.vocabulary import Vocab, VocabHub
15
15
 
16
16
 
17
17
  class Meta:
18
- version = 'unidep-v4beta'
18
+ version = 'unidep-v4'
19
19
 
20
20
  def __init__(self):
21
21
  self.note = ('Not compatible with unitok-v3 or lower version, '
@@ -116,6 +116,7 @@ class Meta:
116
116
  meta.vocabularies = VocabSet({cls.parse_vocabulary(**v).load(save_dir) for v in kwargs.get('vocabularies')})
117
117
  meta.tokenizers = TokenizerSet({cls.parse_tokenizer(**t) for t in kwargs.get('tokenizers')})
118
118
  meta.jobs = JobSet({cls.parse_job(**j) for j in kwargs.get('jobs')})
119
+ meta.version = kwargs.get('version')
119
120
 
120
121
  return meta
121
122
 
@@ -1,5 +1,6 @@
1
1
  from unitok.tokenizer.base_tokenizer import BaseTokenizer, TokenizerHub
2
2
  from unitok.tokenizer.entity_tokenizer import EntityTokenizer, EntitiesTokenizer
3
+ from unitok.tokenizer.glove_tokenizer import GloVeTokenizer
3
4
  from unitok.tokenizer.transformers_tokenizer import TransformersTokenizer, BertTokenizer
4
5
  from unitok.tokenizer.split_tokenizer import SplitTokenizer
5
6
  from unitok.tokenizer.digit_tokenizer import DigitTokenizer, DigitsTokenizer
@@ -14,5 +15,6 @@ __all__ = [
14
15
  SplitTokenizer,
15
16
  DigitTokenizer,
16
17
  DigitsTokenizer,
17
- TokenizerHub
18
+ GloVeTokenizer,
19
+ TokenizerHub,
18
20
  ]
@@ -28,7 +28,8 @@ class BaseTokenizer(abc.ABC):
28
28
 
29
29
  self._tokenizer_id = tokenizer_id
30
30
 
31
- TokenizerHub.add(self.get_tokenizer_id(), self)
31
+ TokenizerHub.add(self)
32
+ VocabHub.add(self.vocab)
32
33
 
33
34
  def get_tokenizer_id(self):
34
35
  if self._tokenizer_id is None:
@@ -81,3 +82,8 @@ class BaseTokenizer(abc.ABC):
81
82
 
82
83
  class TokenizerHub(Hub[BaseTokenizer]):
83
84
  _instance = Instance()
85
+
86
+ @classmethod
87
+ def add(cls, key, obj: BaseTokenizer = None):
88
+ key, obj = key.get_tokenizer_id(), key
89
+ return super().add(key, obj)
@@ -17,5 +17,5 @@ class GloVeTokenizer(BaseTokenizer):
17
17
  self.language = language
18
18
 
19
19
  def __call__(self, obj):
20
- objs = nltk.tokenize.word_tokenize(obj.lower())
20
+ objs = nltk.tokenize.word_tokenize(obj.lower(), language=self.language)
21
21
  return [self.vocab[o] for o in objs if o in self.vocab]
@@ -51,7 +51,7 @@ class UniTok(Status):
51
51
  if self._union_type is None:
52
52
  self._union_type = union_type
53
53
  elif self._union_type != union_type:
54
- raise ValueError(f'union type is already set: {self._union_type}')
54
+ raise ValueError(f'Union type is already set: {self._union_type}')
55
55
 
56
56
  @Status.require_not_initialized
57
57
  def init_indices(self):
@@ -173,7 +173,7 @@ class UniTok(Status):
173
173
  if tokenizer.return_list:
174
174
  raise AttributeError('Column content of the key job should be tokenized into atomic value')
175
175
  if self.key_job:
176
- raise ValueError(f'key key already exists: {self.key_job.name}')
176
+ raise ValueError(f'Key column already exists: {self.key_job.name}')
177
177
  self.key_job = job
178
178
 
179
179
  @Status.require_not_organized
@@ -282,7 +282,10 @@ class UniTok(Status):
282
282
 
283
283
  # Prepare introduction header
284
284
  introduction_header = Text.assemble(
285
- (f"UniTok ({self.meta.parse_version(self.meta.version)})\n", "bold cyan"),
285
+ (
286
+ f"UniTok (v{self.meta.parse_version(Meta.version)}), "
287
+ f"Data (v{self.meta.parse_version(self.meta.version)})\n",
288
+ "bold cyan"),
286
289
  (f"Sample Size: {self._sample_size}\n", "green"),
287
290
  (f"ID Column: {self.key_job.name}\n", "magenta"),
288
291
  style="dim"
@@ -337,6 +340,7 @@ class UniTok(Status):
337
340
  sample[job.name] = self.data[job.name][index]
338
341
  return sample
339
342
 
343
+ @Status.require_not_initialized
340
344
  def pack(self, index):
341
345
  if self.is_soft_union:
342
346
  return self._pack_soft_union(index)
@@ -390,10 +394,6 @@ class UniTok(Status):
390
394
  selector = Selector(self.meta, *selector)
391
395
  return selector(sample)
392
396
 
393
- def get_sample_by_id(self, key_id):
394
- index = self.key_job.tokenizer.vocab[key_id]
395
- return self[index]
396
-
397
397
  def __len__(self):
398
398
  return len(self._legal_indices)
399
399
 
@@ -11,7 +11,7 @@ class Hub(abc.ABC, Generic[T]):
11
11
  _instance: Instance
12
12
 
13
13
  @classmethod
14
- def add(cls, key, obj: T):
14
+ def add(cls, key, obj: T = None):
15
15
  instance = cls._instance.current()
16
16
  if key in instance and instance[key] is not obj:
17
17
  raise ValueError(f'Conflict object declaration: {obj} and {instance[key]}')
@@ -1,4 +1,5 @@
1
1
  import os
2
+ from typing import Optional, Union
2
3
 
3
4
  from unitok import PickleHandler
4
5
  from unitok.utils import Map, Instance
@@ -18,7 +19,7 @@ class Vocabulary:
18
19
  self._editable = True # whether vocab is editable
19
20
  self.counter = Counter()
20
21
 
21
- VocabularyHub.add(self.name, self)
22
+ VocabularyHub.add(self)
22
23
 
23
24
  def equals(self, other: 'Vocabulary'):
24
25
  return self.name == other.name and len(self) == len(other)
@@ -43,7 +44,7 @@ class Vocabulary:
43
44
  """
44
45
  return [self.append(obj) for obj in objs]
45
46
 
46
- def append(self, obj, oov_token=None):
47
+ def append(self, obj, oov_token: Optional[Union[int, str]] = None):
47
48
  obj = str(obj)
48
49
  if obj not in self.o2i:
49
50
  if '\n' in obj:
@@ -52,7 +53,11 @@ class Vocabulary:
52
53
  if not self._editable:
53
54
  if oov_token is None:
54
55
  raise ValueError(f'the fixed vocab {self.name} is not allowed to add new token ({obj})')
55
- return oov_token
56
+ if isinstance(oov_token, str):
57
+ return self[oov_token]
58
+ if len(self) > oov_token >= 0:
59
+ return oov_token
60
+ raise ValueError(f'oov_token ({oov_token}) is not in the vocab')
56
61
 
57
62
  index = len(self)
58
63
  self.o2i[obj] = index
@@ -81,6 +86,9 @@ class Vocabulary:
81
86
  return self.i2o[item]
82
87
  return self.o2i[item]
83
88
 
89
+ def __contains__(self, item: str):
90
+ return item in self.o2i
91
+
84
92
  def __str__(self):
85
93
  return f'Vocabulary({self.name}, vocab_size={len(self)})'
86
94
 
@@ -88,6 +96,10 @@ class Vocabulary:
88
96
  Editable Methods
89
97
  """
90
98
 
99
+ @property
100
+ def editable(self):
101
+ return self._editable
102
+
91
103
  def allow_edit(self):
92
104
  self._editable = True
93
105
  return self
@@ -113,8 +125,8 @@ class Vocabulary:
113
125
  Save & Load Methods
114
126
  """
115
127
 
116
- def filepath(self, store_dir):
117
- return os.path.join(store_dir, self.filename)
128
+ def filepath(self, save_dir):
129
+ return os.path.join(save_dir, self.filename)
118
130
 
119
131
  @property
120
132
  def filename(self):
@@ -147,3 +159,8 @@ class Vocabulary:
147
159
 
148
160
  class VocabularyHub(Hub[Vocabulary]):
149
161
  _instance = Instance()
162
+
163
+ @classmethod
164
+ def add(cls, key, obj: Vocabulary = None):
165
+ key, obj = key.name, key
166
+ return super().add(key, obj)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes