arekit 0.25.0__py3-none-any.whl → 0.25.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. arekit/common/data/storages/base.py +4 -15
  2. arekit/common/docs/parser.py +3 -30
  3. arekit/common/pipeline/items/base.py +1 -1
  4. arekit/common/utils.py +11 -8
  5. arekit/contrib/utils/data/storages/jsonl_based.py +2 -1
  6. arekit/contrib/utils/data/storages/pandas_based.py +2 -17
  7. arekit/contrib/utils/data/storages/row_cache.py +2 -1
  8. arekit/contrib/utils/data/storages/sqlite_based.py +2 -1
  9. arekit/contrib/utils/pipelines/text_opinion/extraction.py +5 -4
  10. {arekit-0.25.0.dist-info → arekit-0.25.1.dist-info}/METADATA +4 -5
  11. {arekit-0.25.0.dist-info → arekit-0.25.1.dist-info}/RECORD +15 -88
  12. arekit/common/data/input/repositories/__init__.py +0 -0
  13. arekit/common/data/input/repositories/base.py +0 -68
  14. arekit/common/data/input/repositories/sample.py +0 -22
  15. arekit/common/data/views/__init__.py +0 -0
  16. arekit/common/data/views/samples.py +0 -26
  17. arekit/common/service/__init__.py +0 -0
  18. arekit/common/service/sqlite.py +0 -36
  19. arekit/contrib/networks/__init__.py +0 -0
  20. arekit/contrib/networks/embedding.py +0 -149
  21. arekit/contrib/networks/embedding_io.py +0 -18
  22. arekit/contrib/networks/input/__init__.py +0 -0
  23. arekit/contrib/networks/input/const.py +0 -6
  24. arekit/contrib/networks/input/ctx_serialization.py +0 -28
  25. arekit/contrib/networks/input/embedding/__init__.py +0 -0
  26. arekit/contrib/networks/input/embedding/matrix.py +0 -29
  27. arekit/contrib/networks/input/embedding/offsets.py +0 -55
  28. arekit/contrib/networks/input/formatters/__init__.py +0 -0
  29. arekit/contrib/networks/input/formatters/pos_mapper.py +0 -22
  30. arekit/contrib/networks/input/providers/__init__.py +0 -0
  31. arekit/contrib/networks/input/providers/sample.py +0 -129
  32. arekit/contrib/networks/input/providers/term_connotation.py +0 -23
  33. arekit/contrib/networks/input/providers/text.py +0 -24
  34. arekit/contrib/networks/input/rows_parser.py +0 -47
  35. arekit/contrib/networks/input/term_types.py +0 -13
  36. arekit/contrib/networks/input/terms_mapping.py +0 -60
  37. arekit/contrib/networks/vectorizer.py +0 -6
  38. arekit/contrib/utils/data/readers/__init__.py +0 -0
  39. arekit/contrib/utils/data/readers/base.py +0 -7
  40. arekit/contrib/utils/data/readers/csv_pd.py +0 -38
  41. arekit/contrib/utils/data/readers/jsonl.py +0 -15
  42. arekit/contrib/utils/data/readers/sqlite.py +0 -14
  43. arekit/contrib/utils/data/service/__init__.py +0 -0
  44. arekit/contrib/utils/data/service/balance.py +0 -50
  45. arekit/contrib/utils/data/writers/csv_native.py +0 -63
  46. arekit/contrib/utils/data/writers/csv_pd.py +0 -40
  47. arekit/contrib/utils/data/writers/json_opennre.py +0 -132
  48. arekit/contrib/utils/data/writers/sqlite_native.py +0 -114
  49. arekit/contrib/utils/embeddings/__init__.py +0 -0
  50. arekit/contrib/utils/embeddings/rusvectores.py +0 -58
  51. arekit/contrib/utils/embeddings/tokens.py +0 -30
  52. arekit/contrib/utils/io_utils/embedding.py +0 -72
  53. arekit/contrib/utils/np_utils/__init__.py +0 -0
  54. arekit/contrib/utils/np_utils/embedding.py +0 -22
  55. arekit/contrib/utils/np_utils/npz_utils.py +0 -13
  56. arekit/contrib/utils/np_utils/vocab.py +0 -20
  57. arekit/contrib/utils/pipelines/items/sampling/__init__.py +0 -0
  58. arekit/contrib/utils/pipelines/items/sampling/base.py +0 -94
  59. arekit/contrib/utils/pipelines/items/sampling/networks.py +0 -55
  60. arekit/contrib/utils/pipelines/items/text/frames_lemmatized.py +0 -36
  61. arekit/contrib/utils/pipelines/items/text/frames_negation.py +0 -33
  62. arekit/contrib/utils/pipelines/items/text/tokenizer.py +0 -105
  63. arekit/contrib/utils/pipelines/items/text/translator.py +0 -136
  64. arekit/contrib/utils/processing/languages/__init__.py +0 -0
  65. arekit/contrib/utils/processing/languages/mods.py +0 -12
  66. arekit/contrib/utils/processing/languages/pos.py +0 -23
  67. arekit/contrib/utils/processing/languages/ru/__init__.py +0 -0
  68. arekit/contrib/utils/processing/languages/ru/cases.py +0 -78
  69. arekit/contrib/utils/processing/languages/ru/constants.py +0 -6
  70. arekit/contrib/utils/processing/languages/ru/mods.py +0 -13
  71. arekit/contrib/utils/processing/languages/ru/number.py +0 -23
  72. arekit/contrib/utils/processing/languages/ru/pos_service.py +0 -36
  73. arekit/contrib/utils/processing/lemmatization/__init__.py +0 -0
  74. arekit/contrib/utils/processing/lemmatization/mystem.py +0 -51
  75. arekit/contrib/utils/processing/pos/__init__.py +0 -0
  76. arekit/contrib/utils/processing/pos/base.py +0 -12
  77. arekit/contrib/utils/processing/pos/mystem_wrap.py +0 -134
  78. arekit/contrib/utils/processing/pos/russian.py +0 -10
  79. arekit/contrib/utils/processing/text/__init__.py +0 -0
  80. arekit/contrib/utils/processing/text/tokens.py +0 -127
  81. arekit/contrib/utils/serializer.py +0 -42
  82. arekit/contrib/utils/vectorizers/__init__.py +0 -0
  83. arekit/contrib/utils/vectorizers/bpe.py +0 -93
  84. arekit/contrib/utils/vectorizers/random_norm.py +0 -39
  85. {arekit-0.25.0.data → arekit-0.25.1.data}/data/logo.png +0 -0
  86. {arekit-0.25.0.dist-info → arekit-0.25.1.dist-info}/LICENSE +0 -0
  87. {arekit-0.25.0.dist-info → arekit-0.25.1.dist-info}/WHEEL +0 -0
  88. {arekit-0.25.0.dist-info → arekit-0.25.1.dist-info}/top_level.txt +0 -0
@@ -1,38 +0,0 @@
1
- import importlib
2
-
3
- from arekit.contrib.utils.data.readers.base import BaseReader
4
- from arekit.contrib.utils.data.storages.pandas_based import PandasBasedRowsStorage
5
-
6
-
7
- class PandasCsvReader(BaseReader):
8
- """ Represents a CSV-based reader, implmented via pandas API.
9
- """
10
-
11
- def __init__(self, sep='\t', header='infer', compression='infer', encoding='utf-8', col_types=None,
12
- custom_extension=None):
13
- self.__sep = sep
14
- self.__compression = compression
15
- self.__encoding = encoding
16
- self.__header = header
17
- self.__custom_extension = custom_extension
18
-
19
- # Special assignation of types for certain columns.
20
- self.__col_types = col_types
21
- if self.__col_types is None:
22
- self.__col_types = dict()
23
-
24
- def extension(self):
25
- return ".tsv.gz" if self.__custom_extension is None else self.__custom_extension
26
-
27
- def __from_csv(self, filepath):
28
- pd = importlib.import_module("pandas")
29
- return pd.read_csv(filepath,
30
- sep=self.__sep,
31
- encoding=self.__encoding,
32
- compression=self.__compression,
33
- dtype=self.__col_types,
34
- header=self.__header)
35
-
36
- def read(self, target):
37
- df = self.__from_csv(filepath=target)
38
- return PandasBasedRowsStorage(df)
@@ -1,15 +0,0 @@
1
- from arekit.contrib.utils.data.readers.base import BaseReader
2
- from arekit.contrib.utils.data.storages.jsonl_based import JsonlBasedRowsStorage
3
-
4
-
5
- class JsonlReader(BaseReader):
6
-
7
- def extension(self):
8
- return ".jsonl"
9
-
10
- def read(self, target):
11
- rows = []
12
- with open(target, "r") as f:
13
- for line in f.readlines():
14
- rows.append(line)
15
- return JsonlBasedRowsStorage(rows)
@@ -1,14 +0,0 @@
1
- from arekit.contrib.utils.data.readers.base import BaseReader
2
- from arekit.contrib.utils.data.storages.sqlite_based import SQliteBasedRowsStorage
3
-
4
-
5
- class SQliteReader(BaseReader):
6
-
7
- def __init__(self, table_name):
8
- self.__table_name = table_name
9
-
10
- def extension(self):
11
- return ".sqlite"
12
-
13
- def read(self, target):
14
- return SQliteBasedRowsStorage(path=target, table_name=self.__table_name)
File without changes
@@ -1,50 +0,0 @@
1
- import gc
2
- import importlib
3
- from arekit.contrib.utils.data.storages.pandas_based import PandasBasedRowsStorage
4
-
5
-
6
- class PandasBasedStorageBalancing(object):
7
-
8
- @staticmethod
9
- def create_balanced_from(storage, column_name, free_origin=True):
10
- """ Performs oversampled balancing.
11
-
12
- Note: it is quite important to remove previously created storage
13
- in order to avoid memory leaking.
14
-
15
- storage: PandasBasedRowsStorage
16
- storage contents to be balanced.
17
-
18
- column_name: str
19
- column utilized for balancing.
20
-
21
- free_origin: bool
22
- indicates whether there is a need to release the resources
23
- utilized for the original storage.
24
- """
25
- assert(isinstance(storage, PandasBasedRowsStorage))
26
-
27
- original_df = storage.DataFrame
28
-
29
- max_size = original_df[column_name].value_counts().max()
30
-
31
- dframes = []
32
- for class_index, group in original_df.groupby(column_name):
33
- dframes.append(group.sample(max_size - len(group), replace=True))
34
-
35
- # Clear resources.
36
- pd = importlib.import_module("pandas")
37
- balanced_df = pd.concat(dframes + [original_df])
38
-
39
- # Removing temporary created dataframe.
40
- for df in dframes:
41
- del df
42
-
43
- # Marking the original dataframe as released
44
- # in terms of the allocated memory for it.
45
- if free_origin:
46
- storage.free()
47
-
48
- gc.collect()
49
-
50
- return PandasBasedRowsStorage(df=balanced_df)
@@ -1,63 +0,0 @@
1
- import csv
2
- import os
3
- from os.path import dirname
4
-
5
- from arekit.common.data.storages.base import BaseRowsStorage
6
- from arekit.contrib.utils.data.storages.row_cache import RowCacheStorage
7
- from arekit.contrib.utils.data.writers.base import BaseWriter
8
-
9
-
10
- class NativeCsvWriter(BaseWriter):
11
-
12
- def __init__(self, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL, header=True):
13
- self.__target_f = None
14
- self.__writer = None
15
- self.__create_writer_func = lambda f: csv.writer(
16
- f, delimiter=delimiter, quotechar=quotechar, quoting=quoting)
17
- self.__header = header
18
- self.__header_written = None
19
-
20
- def extension(self):
21
- return ".csv"
22
-
23
- @staticmethod
24
- def __iter_storage_column_names(storage):
25
- """ Iter only those columns that existed in storage.
26
- """
27
- for col_name in storage.iter_column_names():
28
- if col_name in storage.RowCache:
29
- yield col_name
30
-
31
- def open_target(self, target):
32
- os.makedirs(dirname(target), exist_ok=True)
33
- self.__target_f = open(target, "w")
34
- self.__writer = self.__create_writer_func(self.__target_f)
35
- self.__header_written = not self.__header
36
-
37
- def close_target(self):
38
- self.__target_f.close()
39
-
40
- def commit_line(self, storage):
41
- assert(isinstance(storage, RowCacheStorage))
42
- assert(self.__writer is not None)
43
-
44
- if not self.__header_written:
45
- self.__writer.writerow(list(self.__iter_storage_column_names(storage)))
46
- self.__header_written = True
47
-
48
- line_data = list(map(lambda col_name: storage.RowCache[col_name],
49
- self.__iter_storage_column_names(storage)))
50
- self.__writer.writerow(line_data)
51
-
52
- def write_all(self, storage, target):
53
- """ Writes all the `storage` rows
54
- into the `target` filepath, formatted as CSV.
55
- """
56
- assert(isinstance(storage, BaseRowsStorage))
57
-
58
- with open(target, "w") as f:
59
- writer = self.__create_writer_func(f)
60
- for _, row in storage:
61
- #content = [row[col_name] for col_name in storage.iter_column_names()]
62
- content = [v for v in row]
63
- writer.writerow(content)
@@ -1,40 +0,0 @@
1
- import logging
2
-
3
- from arekit.common.data.input.providers.columns.base import BaseColumnsProvider
4
- from arekit.common.utils import create_dir_if_not_exists
5
- from arekit.contrib.utils.data.storages.pandas_based import PandasBasedRowsStorage
6
- from arekit.contrib.utils.data.writers.base import BaseWriter
7
-
8
- logger = logging.getLogger(__name__)
9
- logging.basicConfig(level=logging.INFO)
10
-
11
-
12
- class PandasCsvWriter(BaseWriter):
13
-
14
- def __init__(self, write_header):
15
- super(PandasCsvWriter, self).__init__()
16
- self.__write_header = write_header
17
-
18
- def extension(self):
19
- return ".tsv.gz"
20
-
21
- def write_all(self, storage, target):
22
- assert(isinstance(storage, PandasBasedRowsStorage))
23
- assert(isinstance(target, str))
24
-
25
- create_dir_if_not_exists(target)
26
-
27
- # Temporary hack, remove it in future.
28
- df = storage.DataFrame
29
-
30
- logger.info("Saving... {length}: {filepath}".format(length=len(storage), filepath=target))
31
- df.to_csv(target,
32
- sep='\t',
33
- encoding='utf-8',
34
- columns=[c for c in df.columns if c != BaseColumnsProvider.ROW_ID],
35
- index=False,
36
- float_format="%.0f",
37
- compression='gzip',
38
- header=self.__write_header)
39
-
40
- logger.info("Saving completed!")
@@ -1,132 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- from os.path import dirname
5
-
6
- from arekit.common.data import const
7
- from arekit.common.data.storages.base import BaseRowsStorage
8
- from arekit.contrib.utils.data.storages.row_cache import RowCacheStorage
9
- from arekit.contrib.utils.data.writers.base import BaseWriter
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- class OpenNREJsonWriter(BaseWriter):
15
- """ This is a bag-based writer for the samples.
16
- Project page: https://github.com/thunlp/OpenNRE
17
-
18
- Every bag presented as follows:
19
- {
20
- 'text' or 'token': ...,
21
- 'h': {'pos': [start, end], 'id': ... },
22
- 't': {'pos': [start, end], 'id': ... }
23
- 'id': "id_of_the_text_opinion"
24
- }
25
-
26
- In terms of the linked opinions (i0, i1, etc.) we consider id of the first opinion in linkage.
27
- During the dataset reading stage via OpenNRE, these linkages automaticaly groups into bags.
28
- """
29
-
30
- def __init__(self, text_columns, encoding="utf-8", na_value="NA", keep_extra_columns=True,
31
- skip_extra_existed=True):
32
- """ text_columns: list
33
- column names that expected to be joined into a single (token) column.
34
- """
35
- assert(isinstance(text_columns, list))
36
- assert(isinstance(encoding, str))
37
- self.__text_columns = text_columns
38
- self.__encoding = encoding
39
- self.__target_f = None
40
- self.__keep_extra_columns = keep_extra_columns
41
- self.__na_value = na_value
42
- self.__skip_extra_existed = skip_extra_existed
43
-
44
- def extension(self):
45
- return ".jsonl"
46
-
47
- @staticmethod
48
- def __format_row(row, na_value, text_columns, keep_extra_columns, skip_extra_existed):
49
- """ Formatting that is compatible with the OpenNRE.
50
- """
51
- assert(isinstance(na_value, str))
52
-
53
- sample_id = row[const.ID]
54
- s_ind = int(row[const.S_IND])
55
- t_ind = int(row[const.T_IND])
56
- bag_id = str(row[const.OPINION_ID])
57
-
58
- # Gather tokens.
59
- tokens = []
60
- for text_col in text_columns:
61
- if text_col in row:
62
- tokens.extend(row[text_col].split())
63
-
64
- # Filtering JSON row.
65
- formatted_data = {
66
- "id": bag_id,
67
- "id_orig": sample_id,
68
- "token": tokens,
69
- "h": {"pos": [s_ind, s_ind + 1], "id": str(bag_id + "s")},
70
- "t": {"pos": [t_ind, t_ind + 1], "id": str(bag_id + "t")},
71
- "relation": str(int(row[const.LABEL_UINT])) if const.LABEL_UINT in row else na_value
72
- }
73
-
74
- # Register extra fields (optionally).
75
- if keep_extra_columns:
76
- for key, value in row.items():
77
- if key not in formatted_data and key not in text_columns:
78
- formatted_data[key] = value
79
- else:
80
- if not skip_extra_existed:
81
- raise Exception(f"key `{key}` is already exist in formatted data "
82
- f"or a part of the text columns list: {text_columns}")
83
-
84
- return formatted_data
85
-
86
- def open_target(self, target):
87
- os.makedirs(dirname(target), exist_ok=True)
88
- self.__target_f = open(target, "w")
89
- pass
90
-
91
- def close_target(self):
92
- self.__target_f.close()
93
-
94
- def commit_line(self, storage):
95
- assert(isinstance(storage, RowCacheStorage))
96
-
97
- # Collect existed columns.
98
- row_data = {}
99
- for col_name in storage.iter_column_names():
100
- if col_name not in storage.RowCache:
101
- continue
102
- row_data[col_name] = storage.RowCache[col_name]
103
-
104
- bag = self.__format_row(row_data, text_columns=self.__text_columns,
105
- keep_extra_columns=self.__keep_extra_columns,
106
- na_value=self.__na_value,
107
- skip_extra_existed=self.__skip_extra_existed)
108
-
109
- self.__write_bag(bag=bag, json_file=self.__target_f)
110
-
111
- @staticmethod
112
- def __write_bag(bag, json_file):
113
- assert(isinstance(bag, dict))
114
- json.dump(bag, json_file, separators=(",", ":"), ensure_ascii=False)
115
- json_file.write("\n")
116
-
117
- def write_all(self, storage, target):
118
- assert(isinstance(storage, BaseRowsStorage))
119
- assert(isinstance(target, str))
120
-
121
- logger.info("Saving... {rows}: {filepath}".format(rows=(len(storage)), filepath=target))
122
-
123
- os.makedirs(os.path.dirname(target), exist_ok=True)
124
- with open(target, "w", encoding=self.__encoding) as json_file:
125
- for row_index, row in storage:
126
- self.__write_bag(bag=self.__format_row(row, text_columns=self.__text_columns,
127
- keep_extra_columns=self.__keep_extra_columns,
128
- na_value=self.__na_value,
129
- skip_extra_existed=self.__skip_extra_existed),
130
- json_file=json_file)
131
-
132
- logger.info("Saving completed!")
@@ -1,114 +0,0 @@
1
- import os
2
- import sqlite3
3
- from os.path import dirname
4
-
5
- from arekit.common.data import const
6
- from arekit.contrib.utils.data.storages.row_cache import RowCacheStorage
7
- from arekit.contrib.utils.data.writers.base import BaseWriter
8
-
9
-
10
- class SQliteWriter(BaseWriter):
11
- """ TODO. This implementation is dedicated for the writing concepts of the data
12
- serialization pipeline. However we add the SQLite3 service, it would be
13
- right to refactor and utlize some core functionality from the core/service/sqlite.py
14
- """
15
-
16
- def __init__(self, table_name="contents", index_column_names=None, skip_existed=False, clear_table=True):
17
- """ index_column_names: list or None
18
- column names should be considered to build a unique index;
19
- if None, the default 'const.ID' will be considered for row indexation.
20
- """
21
- assert (isinstance(index_column_names, list) or index_column_names is None)
22
- self.__index_column_names = index_column_names if index_column_names is not None else [const.ID]
23
- self.__table_name = table_name
24
- self.__conn = None
25
- self.__cur = None
26
- self.__need_init_table = True
27
- self.__origin_column_names = None
28
- self.__skip_existed = skip_existed
29
- self.__clear_table = clear_table
30
-
31
- def extension(self):
32
- return ".sqlite"
33
-
34
- @staticmethod
35
- def __iter_storage_column_names(storage):
36
- """ Iter only those columns that existed in storage.
37
- """
38
- assert (isinstance(storage, RowCacheStorage))
39
- for col_name, col_type in zip(storage.iter_column_names(), storage.iter_column_types()):
40
- if col_name in storage.RowCache:
41
- yield col_name, col_type
42
-
43
- def __init_table(self, column_data):
44
- # Compose column name with the related SQLITE type.
45
- column_types = ",".join([" ".join([col_name, self.type_to_sqlite(col_type)])
46
- for col_name, col_type in column_data])
47
- # Create table if not exists.
48
- self.__cur.execute(f"CREATE TABLE IF NOT EXISTS {self.__table_name}({column_types})")
49
- # Table exists, however we may optionally remove the content from it.
50
- if self.__clear_table:
51
- self.__cur.execute(f"DELETE FROM {self.__table_name};")
52
- # Create index.
53
- index_name = f"i_{self.__table_name}_id"
54
- self.__cur.execute(f"DROP INDEX IF EXISTS {index_name};")
55
- self.__cur.execute("CREATE INDEX IF NOT EXISTS {index} ON {table}({columns})".format(
56
- index=index_name,
57
- table=self.__table_name,
58
- columns=", ".join(self.__index_column_names)
59
- ))
60
- self.__origin_column_names = [col_name for col_name, _ in column_data]
61
-
62
- @staticmethod
63
- def type_to_sqlite(col_type):
64
- """ This is a simple function that provides conversion from the
65
- base numpy types to SQLITE.
66
- NOTE: this method represent a quick implementation for supporting
67
- types, however it is far away from the generalized implementation.
68
- """
69
- if isinstance(col_type, str):
70
- if 'int' in col_type:
71
- return 'INTEGER'
72
-
73
- return "TEXT"
74
-
75
- def open_target(self, target):
76
- os.makedirs(dirname(target), exist_ok=True)
77
- self.__conn = sqlite3.connect(target)
78
- self.__cur = self.__conn.cursor()
79
-
80
- def commit_line(self, storage):
81
- assert (isinstance(storage, RowCacheStorage))
82
-
83
- column_data = list(self.__iter_storage_column_names(storage))
84
-
85
- if self.__need_init_table:
86
- self.__init_table(column_data)
87
- self.__need_init_table = False
88
-
89
- # Check whether the related row is already exist in SQLITE database.
90
- row_id = storage.RowCache[const.ID]
91
- top_row = self.__cur.execute(f"SELECT EXISTS(SELECT 1 FROM {self.__table_name} WHERE id='{row_id}');")
92
- is_exists = top_row.fetchone()[0]
93
- if is_exists == 1 and self.__skip_existed:
94
- return
95
-
96
- line_data = [storage.RowCache[col_name] for col_name, _ in column_data]
97
- parameters = ",".join(["?"] * len(line_data))
98
-
99
- assert (len(self.__origin_column_names) == len(line_data))
100
-
101
- self.__cur.execute(
102
- f"INSERT OR REPLACE INTO {self.__table_name} VALUES ({parameters})",
103
- tuple(line_data))
104
-
105
- self.__conn.commit()
106
-
107
- def close_target(self):
108
- self.__cur = None
109
- self.__origin_column_names = None
110
- self.__need_init_table = True
111
- self.__conn.close()
112
-
113
- def write_all(self, storage, target):
114
- pass
File without changes
@@ -1,58 +0,0 @@
1
- from arekit.common.text.stemmer import Stemmer
2
- from arekit.contrib.networks.embedding import Embedding
3
-
4
-
5
- class RusvectoresEmbedding(Embedding):
6
- """ Wrapper over models from the following resource.
7
- https://rusvectores.org/ru/models/
8
-
9
- NOTE: Usually these are embeddings for texts written in Russian.
10
- for the better performance it is expected that we adopt stemmer.
11
- """
12
-
13
- def __init__(self, matrix, words, stemmer):
14
- assert(isinstance(stemmer, Stemmer) or stemmer is None)
15
- super(RusvectoresEmbedding, self).__init__(matrix=matrix, words=words)
16
- self.__index_without_pos = self.__create_terms_without_pos()
17
- self.__stemmer = stemmer
18
- self.__lemmatize_by_default = stemmer is not None
19
-
20
- def try_find_index_by_plain_word(self, word):
21
- assert(isinstance(word, str))
22
-
23
- temp = self.__lemmatize_by_default
24
- self.__lemmatize_by_default = False
25
- index = super(RusvectoresEmbedding, self).try_find_index_by_plain_word(word)
26
- self.__lemmatize_by_default = temp
27
-
28
- return index
29
-
30
- def _handler(self, word):
31
- return self.__try_find_word_index_pair_lemmatized(word, self.__lemmatize_by_default)
32
-
33
- # region private methods
34
-
35
- def __try_find_word_index_pair_lemmatized(self, term, lemmatize):
36
- assert(isinstance(term, str))
37
- assert(isinstance(lemmatize, bool))
38
-
39
- if lemmatize:
40
- term = self.__stemmer.lemmatize_to_str(term)
41
-
42
- index = self.__index_without_pos[term] \
43
- if term in self.__index_without_pos else None
44
-
45
- return term, index
46
-
47
- def __create_terms_without_pos(self):
48
- d = {}
49
- for word_with_pos, index in self.iter_vocabulary():
50
- assert(isinstance(word_with_pos, str))
51
- word = word_with_pos.split(u'_')[0]
52
- if word in d:
53
- continue
54
- d[word] = index
55
-
56
- return d
57
-
58
- # endregion
@@ -1,30 +0,0 @@
1
- import numpy as np
2
-
3
- from arekit.contrib.networks.embedding import Embedding
4
- from arekit.contrib.utils.processing.text.tokens import Tokens
5
-
6
-
7
- class TokenEmbedding(Embedding):
8
- """ Embedding vectors for text punctuation, based on Tokens in parsed text
9
- """
10
-
11
- @classmethod
12
- def from_supported_tokens(cls, vector_size, random_vector_func):
13
- """
14
- random_vector_func: func
15
- function with parameters (vector_size, seed)
16
- """
17
- assert(isinstance(vector_size, int))
18
- assert(callable(random_vector_func))
19
-
20
- matrix = []
21
- tokens_list = list(Tokens.iter_supported_tokens())
22
-
23
- for token_index, _ in enumerate(tokens_list):
24
-
25
- vector = random_vector_func(vector_size, token_index)
26
-
27
- matrix.append(vector)
28
-
29
- return cls(matrix=np.array(matrix),
30
- words=tokens_list)
@@ -1,72 +0,0 @@
1
- from os.path import join
2
-
3
- from arekit.contrib.networks.embedding_io import BaseEmbeddingIO
4
- from arekit.contrib.utils.io_utils.utils import check_targets_existence
5
- from arekit.contrib.utils.np_utils.embedding import NpzEmbeddingHelper
6
- from arekit.contrib.utils.np_utils.vocab import VocabRepositoryUtils
7
-
8
-
9
- class NpEmbeddingIO(BaseEmbeddingIO):
10
- """ Npz-based IO utils for embedding and text-based for vocabulary.
11
- This format represents a archived version of the numpy math data, i.e. vectors, numbers, etc.
12
-
13
- Provides additional Input/Output paths generation functions for:
14
- - embedding matrix;
15
- - embedding vocabulary.
16
- """
17
-
18
- def __init__(self, target_dir, prefix_name="sample"):
19
- assert(isinstance(target_dir, str))
20
-
21
- self.__target_dir = target_dir
22
- self.__term_emb_fn_template = "-".join([prefix_name, "term_embedding"])
23
- self.__vocab_fn_template = "-".join([prefix_name, "term_embedding"])
24
-
25
- # region Embedding-related data
26
-
27
- def save_vocab(self, data):
28
- target = self.__get_default_vocab_filepath()
29
- return VocabRepositoryUtils.save(data=data, target=target)
30
-
31
- def load_vocab(self):
32
- source = self.___get_vocab_source()
33
- return dict(VocabRepositoryUtils.load(source))
34
-
35
- def save_embedding(self, data):
36
- target = self.__get_default_embedding_filepath()
37
- NpzEmbeddingHelper.save_embedding(data=data, target=target)
38
-
39
- def load_embedding(self):
40
- source = self.__get_term_embedding_source()
41
- return NpzEmbeddingHelper.load_embedding(source)
42
-
43
- def check_targets_existed(self):
44
- targets = [
45
- self.__get_default_vocab_filepath(),
46
- self.__get_term_embedding_target()
47
- ]
48
- return check_targets_existence(targets=targets)
49
-
50
- # endregion
51
-
52
- # region embedding-related data
53
-
54
- def ___get_vocab_source(self):
55
- """ It is possible to load a predefined embedding from another experiment
56
- using the related filepath provided by model_io.
57
- """
58
- return self.__get_default_vocab_filepath()
59
-
60
- def __get_term_embedding_target(self):
61
- return self.__get_default_embedding_filepath()
62
-
63
- def __get_term_embedding_source(self):
64
- return self.__get_default_embedding_filepath()
65
-
66
- def __get_default_vocab_filepath(self):
67
- return join(self.__target_dir, self.__vocab_fn_template)
68
-
69
- def __get_default_embedding_filepath(self):
70
- return join(self.__target_dir, self.__term_emb_fn_template)
71
-
72
- # endregion
File without changes
@@ -1,22 +0,0 @@
1
- import logging
2
-
3
- from arekit.contrib.utils.np_utils.npz_utils import NpzRepositoryUtils
4
-
5
- logger = logging.getLogger(__name__)
6
- logging.basicConfig(level=logging.INFO)
7
-
8
-
9
- class NpzEmbeddingHelper:
10
-
11
- @staticmethod
12
- def save_embedding(data, target):
13
- NpzRepositoryUtils.save(data=data, target=target)
14
- logger.info("Saving embedding [size={shape}]: {filepath}".format(shape=data.shape,
15
- filepath=target))
16
-
17
- @staticmethod
18
- def load_embedding(source):
19
- embedding = NpzRepositoryUtils.load(source)
20
- logger.info("Embedding read [size={size}]: {filepath}".format(size=embedding.shape,
21
- filepath=source))
22
- return embedding
@@ -1,13 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- class NpzRepositoryUtils(object):
5
-
6
- @staticmethod
7
- def save(data, target):
8
- np.savez(target, data)
9
-
10
- @staticmethod
11
- def load(source):
12
- data = np.load(source)
13
- return data['arr_0']