arekit 0.24.0__py3-none-any.whl → 0.25.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- arekit/common/docs/entities_grouping.py +2 -1
- arekit/common/docs/parser.py +52 -20
- arekit/common/pipeline/base.py +12 -16
- arekit/common/pipeline/batching.py +28 -0
- arekit/common/pipeline/context.py +5 -1
- arekit/common/pipeline/items/base.py +38 -1
- arekit/common/pipeline/items/flatten.py +5 -1
- arekit/common/pipeline/items/handle.py +2 -1
- arekit/common/pipeline/items/iter.py +2 -1
- arekit/common/pipeline/items/map.py +2 -1
- arekit/common/pipeline/items/map_nested.py +4 -0
- arekit/common/pipeline/utils.py +32 -0
- arekit/common/service/sqlite.py +36 -0
- arekit/common/text/{partitioning/str.py → partitioning.py} +14 -9
- arekit/common/utils.py +0 -44
- arekit/contrib/utils/data/contents/opinions.py +13 -3
- arekit/contrib/utils/data/readers/sqlite.py +14 -0
- arekit/contrib/utils/data/storages/row_cache.py +6 -1
- arekit/contrib/utils/data/storages/sqlite_based.py +17 -0
- arekit/contrib/utils/data/writers/sqlite_native.py +4 -0
- arekit/contrib/utils/io_utils/utils.py +1 -18
- arekit/contrib/utils/pipelines/items/sampling/base.py +7 -12
- arekit/contrib/utils/pipelines/items/sampling/networks.py +3 -2
- arekit/contrib/utils/pipelines/items/text/entities_default.py +2 -2
- arekit/contrib/utils/pipelines/items/text/frames.py +2 -3
- arekit/contrib/utils/pipelines/items/text/frames_lemmatized.py +2 -2
- arekit/contrib/utils/pipelines/items/text/frames_negation.py +2 -1
- arekit/contrib/utils/pipelines/items/text/tokenizer.py +2 -4
- arekit/contrib/utils/pipelines/items/text/translator.py +2 -1
- arekit/contrib/utils/pipelines/text_opinion/extraction.py +6 -9
- arekit/contrib/utils/serializer.py +1 -2
- arekit-0.25.0.data/data/logo.png +0 -0
- arekit-0.25.0.dist-info/METADATA +82 -0
- {arekit-0.24.0.dist-info → arekit-0.25.0.dist-info}/RECORD +38 -153
- {arekit-0.24.0.dist-info → arekit-0.25.0.dist-info}/WHEEL +1 -1
- arekit/common/docs/objects_parser.py +0 -37
- arekit/common/text/parser.py +0 -12
- arekit/common/text/partitioning/base.py +0 -4
- arekit/common/text/partitioning/terms.py +0 -35
- arekit/contrib/source/__init__.py +0 -0
- arekit/contrib/source/brat/__init__.py +0 -0
- arekit/contrib/source/brat/annot.py +0 -84
- arekit/contrib/source/brat/doc.py +0 -28
- arekit/contrib/source/brat/entities/__init__.py +0 -0
- arekit/contrib/source/brat/entities/compound.py +0 -13
- arekit/contrib/source/brat/entities/entity.py +0 -42
- arekit/contrib/source/brat/entities/parser.py +0 -53
- arekit/contrib/source/brat/opinions/__init__.py +0 -0
- arekit/contrib/source/brat/opinions/converter.py +0 -19
- arekit/contrib/source/brat/relation.py +0 -32
- arekit/contrib/source/brat/sentence.py +0 -69
- arekit/contrib/source/brat/sentences_reader.py +0 -128
- arekit/contrib/source/download.py +0 -41
- arekit/contrib/source/nerel/__init__.py +0 -0
- arekit/contrib/source/nerel/entities.py +0 -55
- arekit/contrib/source/nerel/folding/__init__.py +0 -0
- arekit/contrib/source/nerel/folding/fixed.py +0 -74
- arekit/contrib/source/nerel/io_utils.py +0 -62
- arekit/contrib/source/nerel/labels.py +0 -241
- arekit/contrib/source/nerel/reader.py +0 -46
- arekit/contrib/source/nerel/utils.py +0 -24
- arekit/contrib/source/nerel/versions.py +0 -12
- arekit/contrib/source/nerelbio/__init__.py +0 -0
- arekit/contrib/source/nerelbio/io_utils.py +0 -62
- arekit/contrib/source/nerelbio/labels.py +0 -265
- arekit/contrib/source/nerelbio/reader.py +0 -8
- arekit/contrib/source/nerelbio/versions.py +0 -8
- arekit/contrib/source/ruattitudes/__init__.py +0 -0
- arekit/contrib/source/ruattitudes/collection.py +0 -36
- arekit/contrib/source/ruattitudes/doc.py +0 -51
- arekit/contrib/source/ruattitudes/doc_brat.py +0 -44
- arekit/contrib/source/ruattitudes/entity/__init__.py +0 -0
- arekit/contrib/source/ruattitudes/entity/parser.py +0 -7
- arekit/contrib/source/ruattitudes/io_utils.py +0 -56
- arekit/contrib/source/ruattitudes/labels_fmt.py +0 -12
- arekit/contrib/source/ruattitudes/opinions/__init__.py +0 -0
- arekit/contrib/source/ruattitudes/opinions/base.py +0 -28
- arekit/contrib/source/ruattitudes/opinions/converter.py +0 -37
- arekit/contrib/source/ruattitudes/reader.py +0 -268
- arekit/contrib/source/ruattitudes/sentence.py +0 -73
- arekit/contrib/source/ruattitudes/synonyms.py +0 -17
- arekit/contrib/source/ruattitudes/text_object.py +0 -59
- arekit/contrib/source/rusentiframes/__init__.py +0 -0
- arekit/contrib/source/rusentiframes/collection.py +0 -157
- arekit/contrib/source/rusentiframes/effect.py +0 -24
- arekit/contrib/source/rusentiframes/io_utils.py +0 -19
- arekit/contrib/source/rusentiframes/labels_fmt.py +0 -22
- arekit/contrib/source/rusentiframes/polarity.py +0 -35
- arekit/contrib/source/rusentiframes/role.py +0 -15
- arekit/contrib/source/rusentiframes/state.py +0 -24
- arekit/contrib/source/rusentiframes/types.py +0 -42
- arekit/contrib/source/rusentiframes/value.py +0 -2
- arekit/contrib/source/rusentrel/__init__.py +0 -0
- arekit/contrib/source/rusentrel/const.py +0 -3
- arekit/contrib/source/rusentrel/docs_reader.py +0 -51
- arekit/contrib/source/rusentrel/entities.py +0 -26
- arekit/contrib/source/rusentrel/io_utils.py +0 -125
- arekit/contrib/source/rusentrel/labels_fmt.py +0 -12
- arekit/contrib/source/rusentrel/opinions/__init__.py +0 -0
- arekit/contrib/source/rusentrel/opinions/collection.py +0 -30
- arekit/contrib/source/rusentrel/opinions/converter.py +0 -40
- arekit/contrib/source/rusentrel/opinions/provider.py +0 -54
- arekit/contrib/source/rusentrel/opinions/writer.py +0 -42
- arekit/contrib/source/rusentrel/synonyms.py +0 -17
- arekit/contrib/source/sentinerel/__init__.py +0 -0
- arekit/contrib/source/sentinerel/entities.py +0 -52
- arekit/contrib/source/sentinerel/folding/__init__.py +0 -0
- arekit/contrib/source/sentinerel/folding/factory.py +0 -31
- arekit/contrib/source/sentinerel/folding/fixed.py +0 -70
- arekit/contrib/source/sentinerel/io_utils.py +0 -87
- arekit/contrib/source/sentinerel/labels.py +0 -53
- arekit/contrib/source/sentinerel/labels_scaler.py +0 -30
- arekit/contrib/source/sentinerel/reader.py +0 -42
- arekit/contrib/source/synonyms/__init__.py +0 -0
- arekit/contrib/source/synonyms/utils.py +0 -19
- arekit/contrib/source/zip_utils.py +0 -47
- arekit/contrib/utils/connotations/__init__.py +0 -0
- arekit/contrib/utils/connotations/rusentiframes_sentiment.py +0 -23
- arekit/contrib/utils/download.py +0 -77
- arekit/contrib/utils/io_utils/opinions.py +0 -37
- arekit/contrib/utils/io_utils/samples.py +0 -79
- arekit/contrib/utils/lexicons/__init__.py +0 -0
- arekit/contrib/utils/lexicons/lexicon.py +0 -41
- arekit/contrib/utils/lexicons/relation.py +0 -42
- arekit/contrib/utils/lexicons/rusentilex.py +0 -37
- arekit/contrib/utils/nn/__init__.py +0 -0
- arekit/contrib/utils/nn/rows.py +0 -83
- arekit/contrib/utils/pipelines/items/text/terms_splitter.py +0 -10
- arekit/contrib/utils/pipelines/sources/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/nerel/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/nerel/doc_provider.py +0 -27
- arekit/contrib/utils/pipelines/sources/nerel/extract_text_relations.py +0 -65
- arekit/contrib/utils/pipelines/sources/nerel/labels_fmt.py +0 -60
- arekit/contrib/utils/pipelines/sources/nerel_bio/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/nerel_bio/doc_provider.py +0 -29
- arekit/contrib/utils/pipelines/sources/nerel_bio/extrat_text_relations.py +0 -64
- arekit/contrib/utils/pipelines/sources/nerel_bio/labels_fmt.py +0 -79
- arekit/contrib/utils/pipelines/sources/ruattitudes/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/ruattitudes/doc_provider.py +0 -56
- arekit/contrib/utils/pipelines/sources/ruattitudes/entity_filter.py +0 -20
- arekit/contrib/utils/pipelines/sources/ruattitudes/extract_text_opinions.py +0 -65
- arekit/contrib/utils/pipelines/sources/rusentrel/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/rusentrel/doc_provider.py +0 -21
- arekit/contrib/utils/pipelines/sources/rusentrel/extract_text_opinions.py +0 -107
- arekit/contrib/utils/pipelines/sources/sentinerel/__init__.py +0 -0
- arekit/contrib/utils/pipelines/sources/sentinerel/doc_provider.py +0 -29
- arekit/contrib/utils/pipelines/sources/sentinerel/entity_filter.py +0 -62
- arekit/contrib/utils/pipelines/sources/sentinerel/extract_text_opinions.py +0 -180
- arekit/contrib/utils/pipelines/sources/sentinerel/labels_fmt.py +0 -50
- arekit/contrib/utils/pipelines/text_opinion/annot/predefined.py +0 -88
- arekit/contrib/utils/resources.py +0 -25
- arekit/contrib/utils/sources/__init__.py +0 -0
- arekit/contrib/utils/sources/sentinerel/__init__.py +0 -0
- arekit/contrib/utils/sources/sentinerel/text_opinion/__init__.py +0 -0
- arekit/contrib/utils/sources/sentinerel/text_opinion/prof_per_org_filter.py +0 -63
- arekit/download_data.py +0 -11
- arekit-0.24.0.dist-info/METADATA +0 -23
- /arekit/common/{text/partitioning → service}/__init__.py +0 -0
- {arekit-0.24.0.dist-info → arekit-0.25.0.dist-info}/LICENSE +0 -0
- {arekit-0.24.0.dist-info → arekit-0.25.0.dist-info}/top_level.txt +0 -0
|
@@ -4,8 +4,9 @@ from arekit.common.pipeline.items.base import BasePipelineItem
|
|
|
4
4
|
|
|
5
5
|
class EntitiesGroupingPipelineItem(BasePipelineItem):
|
|
6
6
|
|
|
7
|
-
def __init__(self, value_to_group_id_func):
|
|
7
|
+
def __init__(self, value_to_group_id_func, **kwargs):
|
|
8
8
|
assert(callable(value_to_group_id_func))
|
|
9
|
+
super(EntitiesGroupingPipelineItem, self).__init__(**kwargs)
|
|
9
10
|
self.__value_to_group_id_func = value_to_group_id_func
|
|
10
11
|
|
|
11
12
|
def apply_core(self, input_data, pipeline_ctx):
|
arekit/common/docs/parser.py
CHANGED
|
@@ -1,34 +1,66 @@
|
|
|
1
|
+
from tqdm import tqdm
|
|
1
2
|
from arekit.common.docs.base import Document
|
|
2
3
|
from arekit.common.docs.parsed.base import ParsedDocument
|
|
4
|
+
from arekit.common.pipeline.base import BasePipelineLauncher
|
|
5
|
+
from arekit.common.pipeline.batching import BatchingPipelineLauncher
|
|
3
6
|
from arekit.common.pipeline.context import PipelineContext
|
|
4
|
-
from arekit.common.
|
|
7
|
+
from arekit.common.pipeline.utils import BatchIterator
|
|
8
|
+
from arekit.common.text.parsed import BaseParsedText
|
|
5
9
|
|
|
6
10
|
|
|
7
|
-
class
|
|
11
|
+
class DocumentParsers(object):
|
|
8
12
|
|
|
9
13
|
@staticmethod
|
|
10
|
-
def
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
def parse(doc, text_parser, parent_ppl_ctx=None):
|
|
14
|
+
def parse(doc, pipeline_items, parent_ppl_ctx=None, src_key="input", show_progress=False):
|
|
15
|
+
""" This document parser is based on single text parts (sentences)
|
|
16
|
+
that passes sequentially through the pipeline of transformations.
|
|
17
|
+
"""
|
|
15
18
|
assert(isinstance(doc, Document))
|
|
16
|
-
assert(isinstance(
|
|
19
|
+
assert(isinstance(pipeline_items, list))
|
|
17
20
|
assert(isinstance(parent_ppl_ctx, PipelineContext) or parent_ppl_ctx is None)
|
|
18
21
|
|
|
19
|
-
parsed_sentences = [
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
22
|
+
parsed_sentences = []
|
|
23
|
+
|
|
24
|
+
data_it = range(doc.SentencesCount)
|
|
25
|
+
progress_it = tqdm(data_it, disable=not show_progress)
|
|
26
|
+
|
|
27
|
+
for sent_ind in progress_it:
|
|
23
28
|
|
|
24
|
-
|
|
25
|
-
|
|
29
|
+
# Composing the context from a single sentence.
|
|
30
|
+
ctx = PipelineContext({src_key: doc.get_sentence(sent_ind)}, parent_ctx=parent_ppl_ctx)
|
|
31
|
+
|
|
32
|
+
# Apply all the operations.
|
|
33
|
+
BasePipelineLauncher.run(pipeline=pipeline_items, pipeline_ctx=ctx, src_key=src_key)
|
|
34
|
+
|
|
35
|
+
# Collecting the result.
|
|
36
|
+
parsed_sentences.append(BaseParsedText(terms=ctx.provide("result")))
|
|
37
|
+
|
|
38
|
+
return ParsedDocument(doc_id=doc.ID, parsed_sentences=parsed_sentences)
|
|
26
39
|
|
|
27
40
|
@staticmethod
|
|
28
|
-
def
|
|
41
|
+
def parse_batch(doc, pipeline_items, batch_size, parent_ppl_ctx=None, src_key="input", show_progress=False):
|
|
42
|
+
""" This document parser is based on batch of sentences.
|
|
43
|
+
"""
|
|
44
|
+
assert(isinstance(batch_size, int) and batch_size > 0)
|
|
29
45
|
assert(isinstance(doc, Document))
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
46
|
+
assert(isinstance(pipeline_items, list))
|
|
47
|
+
assert(isinstance(parent_ppl_ctx, PipelineContext) or parent_ppl_ctx is None)
|
|
48
|
+
|
|
49
|
+
parsed_sentences = []
|
|
50
|
+
|
|
51
|
+
data_it = BatchIterator(data_iter=iter(range(doc.SentencesCount)), batch_size=batch_size)
|
|
52
|
+
progress_it = tqdm(data_it, total=round(doc.SentencesCount / batch_size), disable=not show_progress)
|
|
53
|
+
|
|
54
|
+
for batch in progress_it:
|
|
55
|
+
|
|
56
|
+
# Composing the context from a single sentence.
|
|
57
|
+
ctx = PipelineContext({src_key: [doc.get_sentence(s_ind) for s_ind in batch]},
|
|
58
|
+
parent_ctx=parent_ppl_ctx)
|
|
59
|
+
|
|
60
|
+
# Apply all the operations.
|
|
61
|
+
BatchingPipelineLauncher.run(pipeline=pipeline_items, pipeline_ctx=ctx, src_key=src_key)
|
|
62
|
+
|
|
63
|
+
# Collecting the result.
|
|
64
|
+
parsed_sentences += [BaseParsedText(terms=result) for result in ctx.provide("result")]
|
|
65
|
+
|
|
66
|
+
return ParsedDocument(doc_id=doc.ID, parsed_sentences=parsed_sentences)
|
arekit/common/pipeline/base.py
CHANGED
|
@@ -2,24 +2,20 @@ from arekit.common.pipeline.context import PipelineContext
|
|
|
2
2
|
from arekit.common.pipeline.items.base import BasePipelineItem
|
|
3
3
|
|
|
4
4
|
|
|
5
|
-
class
|
|
5
|
+
class BasePipelineLauncher:
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
@staticmethod
|
|
8
|
+
def run(pipeline, pipeline_ctx, src_key=None, has_input=True):
|
|
8
9
|
assert(isinstance(pipeline, list))
|
|
9
|
-
|
|
10
|
+
assert(isinstance(pipeline_ctx, PipelineContext))
|
|
11
|
+
assert(isinstance(src_key, str) or src_key is None)
|
|
10
12
|
|
|
11
|
-
|
|
12
|
-
assert(isinstance(params_dict, dict) or params_dict is None)
|
|
13
|
-
|
|
14
|
-
pipeline_ctx = PipelineContext(d=params_dict if params_dict is not None else dict(),
|
|
15
|
-
parent_ctx=parent_ctx)
|
|
16
|
-
|
|
17
|
-
for item in filter(lambda itm: itm is not None, self.__pipeline):
|
|
13
|
+
for ind, item in enumerate(filter(lambda itm: itm is not None, pipeline)):
|
|
18
14
|
assert(isinstance(item, BasePipelineItem))
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
15
|
+
do_force_key = src_key is not None and ind == 0
|
|
16
|
+
input_data = item.get_source(pipeline_ctx, force_key=src_key if do_force_key else None) \
|
|
17
|
+
if has_input or ind > 0 else None
|
|
18
|
+
item_result = item.apply(input_data=input_data, pipeline_ctx=pipeline_ctx)
|
|
19
|
+
pipeline_ctx.update(param=item.ResultKey, value=item_result, is_new_key=False)
|
|
22
20
|
|
|
23
|
-
|
|
24
|
-
assert(isinstance(item, BasePipelineItem))
|
|
25
|
-
self.__pipeline.append(item)
|
|
21
|
+
return pipeline_ctx
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from arekit.common.pipeline.context import PipelineContext
|
|
2
|
+
from arekit.common.pipeline.items.base import BasePipelineItem
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class BatchingPipelineLauncher:
|
|
6
|
+
|
|
7
|
+
@staticmethod
|
|
8
|
+
def run(pipeline, pipeline_ctx, src_key=None):
|
|
9
|
+
assert(isinstance(pipeline, list))
|
|
10
|
+
assert(isinstance(pipeline_ctx, PipelineContext))
|
|
11
|
+
assert(isinstance(src_key, str) or src_key is None)
|
|
12
|
+
|
|
13
|
+
for ind, item in enumerate(filter(lambda itm: itm is not None, pipeline)):
|
|
14
|
+
assert (isinstance(item, BasePipelineItem))
|
|
15
|
+
|
|
16
|
+
# Handle the content of the batch or batch itself.
|
|
17
|
+
content = item.get_source(pipeline_ctx, call_func=False, force_key=src_key if ind == 0 else None)
|
|
18
|
+
handled_batch = [item._src_func(i) if item._src_func is not None else i for i in content]
|
|
19
|
+
|
|
20
|
+
if item.SupportBatching:
|
|
21
|
+
batch_result = list(item.apply(input_data=handled_batch, pipeline_ctx=pipeline_ctx))
|
|
22
|
+
else:
|
|
23
|
+
batch_result = [item.apply(input_data=input_data, pipeline_ctx=pipeline_ctx)
|
|
24
|
+
for input_data in handled_batch]
|
|
25
|
+
|
|
26
|
+
pipeline_ctx.update(param=item.ResultKey, value=batch_result, is_new_key=False)
|
|
27
|
+
|
|
28
|
+
return pipeline_ctx
|
|
@@ -13,6 +13,8 @@ class PipelineContext(object):
|
|
|
13
13
|
self._d[PARENT_CTX] = parent_ctx
|
|
14
14
|
|
|
15
15
|
def __provide(self, param):
|
|
16
|
+
if param not in self._d:
|
|
17
|
+
raise Exception(f"Key `{param}` is not in dictionary.\n{self._d}")
|
|
16
18
|
return self._d[param]
|
|
17
19
|
|
|
18
20
|
# region public
|
|
@@ -23,7 +25,9 @@ class PipelineContext(object):
|
|
|
23
25
|
def provide_or_none(self, param):
|
|
24
26
|
return self.__provide(param) if param in self._d else None
|
|
25
27
|
|
|
26
|
-
def update(self, param, value):
|
|
28
|
+
def update(self, param, value, is_new_key=False):
|
|
29
|
+
if is_new_key and param in self._d:
|
|
30
|
+
raise Exception(f"Key `{param}` is already presented in pipeline context dictionary.")
|
|
27
31
|
self._d[param] = value
|
|
28
32
|
|
|
29
33
|
# endregion
|
|
@@ -1,9 +1,46 @@
|
|
|
1
|
+
from arekit.common.pipeline.context import PipelineContext
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
class BasePipelineItem(object):
|
|
2
5
|
""" Single pipeline item that might be instatiated and embedded into pipeline.
|
|
3
6
|
"""
|
|
4
7
|
|
|
8
|
+
def __init__(self, src_key="result", result_key="result", src_func=None):
|
|
9
|
+
assert(isinstance(src_key, str) or src_key is None)
|
|
10
|
+
assert(callable(src_func) or src_func is None)
|
|
11
|
+
self.__src_key = src_key
|
|
12
|
+
self._src_func = src_func
|
|
13
|
+
self.__result_key = result_key
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def ResultKey(self):
|
|
17
|
+
return self.__result_key
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def SupportBatching(self):
|
|
21
|
+
""" By default pipeline item is not designed for batching.
|
|
22
|
+
"""
|
|
23
|
+
return False
|
|
24
|
+
|
|
25
|
+
def get_source(self, src_ctx, call_func=True, force_key=None):
|
|
26
|
+
""" Extract input element for processing.
|
|
27
|
+
"""
|
|
28
|
+
assert(isinstance(src_ctx, PipelineContext))
|
|
29
|
+
|
|
30
|
+
# If there is no information about key, then we consider absence of the source.
|
|
31
|
+
if self.__src_key is None:
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# Extracting actual source.
|
|
35
|
+
src_data = src_ctx.provide(self.__src_key if force_key is None else force_key)
|
|
36
|
+
if self._src_func is not None and call_func:
|
|
37
|
+
src_data = self._src_func(src_data)
|
|
38
|
+
|
|
39
|
+
return src_data
|
|
40
|
+
|
|
5
41
|
def apply_core(self, input_data, pipeline_ctx):
|
|
6
|
-
|
|
42
|
+
"""By default we do nothing."""
|
|
43
|
+
pass
|
|
7
44
|
|
|
8
45
|
def apply(self, input_data, pipeline_ctx=None):
|
|
9
46
|
""" Performs input processing an update it for a further pipeline items.
|
|
@@ -5,10 +5,14 @@ class FlattenIterPipelineItem(BasePipelineItem):
|
|
|
5
5
|
""" Considered to flat iterations of items that represent iterations.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
def __init__(self, **kwargs):
|
|
9
|
+
super(FlattenIterPipelineItem, self).__init__(**kwargs)
|
|
10
|
+
pass
|
|
11
|
+
|
|
8
12
|
def __flat_iter(self, iter_data):
|
|
9
13
|
for iter_item in iter_data:
|
|
10
14
|
for item in iter_item:
|
|
11
15
|
yield item
|
|
12
16
|
|
|
13
17
|
def apply_core(self, input_data, pipeline_ctx):
|
|
14
|
-
return self.__flat_iter(input_data)
|
|
18
|
+
return self.__flat_iter(input_data)
|
|
@@ -3,8 +3,9 @@ from arekit.common.pipeline.items.base import BasePipelineItem
|
|
|
3
3
|
|
|
4
4
|
class HandleIterPipelineItem(BasePipelineItem):
|
|
5
5
|
|
|
6
|
-
def __init__(self, handle_func=None):
|
|
6
|
+
def __init__(self, handle_func=None, **kwargs):
|
|
7
7
|
assert(callable(handle_func))
|
|
8
|
+
super(HandleIterPipelineItem, self).__init__(**kwargs)
|
|
8
9
|
self.__handle_func = handle_func
|
|
9
10
|
|
|
10
11
|
def __updated_data(self, items_iter):
|
|
@@ -3,8 +3,9 @@ from arekit.common.pipeline.items.base import BasePipelineItem
|
|
|
3
3
|
|
|
4
4
|
class FilterPipelineItem(BasePipelineItem):
|
|
5
5
|
|
|
6
|
-
def __init__(self, filter_func=None):
|
|
6
|
+
def __init__(self, filter_func=None, **kwargs):
|
|
7
7
|
assert(callable(filter_func))
|
|
8
|
+
super(FilterPipelineItem, self).__init__(**kwargs)
|
|
8
9
|
self.__filter_func = filter_func
|
|
9
10
|
|
|
10
11
|
def apply_core(self, input_data, pipeline_ctx):
|
|
@@ -3,8 +3,9 @@ from arekit.common.pipeline.items.base import BasePipelineItem
|
|
|
3
3
|
|
|
4
4
|
class MapPipelineItem(BasePipelineItem):
|
|
5
5
|
|
|
6
|
-
def __init__(self, map_func=None):
|
|
6
|
+
def __init__(self, map_func=None, **kwargs):
|
|
7
7
|
assert(callable(map_func))
|
|
8
|
+
super(MapPipelineItem, self).__init__(**kwargs)
|
|
8
9
|
self._map_func = map_func
|
|
9
10
|
|
|
10
11
|
def apply_core(self, input_data, pipeline_ctx):
|
|
@@ -9,5 +9,9 @@ class MapNestedPipelineItem(MapPipelineItem):
|
|
|
9
9
|
suppose to be mapped with the passed pipeline context.
|
|
10
10
|
"""
|
|
11
11
|
|
|
12
|
+
def __init__(self, **kwargs):
|
|
13
|
+
super(MapNestedPipelineItem, self).__init__(**kwargs)
|
|
14
|
+
pass
|
|
15
|
+
|
|
12
16
|
def apply_core(self, input_data, pipeline_ctx):
|
|
13
17
|
return map(lambda item: self._map_func(item, pipeline_ctx), input_data)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
class BatchIterator:
|
|
2
|
+
|
|
3
|
+
def __init__(self, data_iter, batch_size, end_value=None):
|
|
4
|
+
assert(isinstance(batch_size, int) and batch_size > 0)
|
|
5
|
+
assert(callable(end_value) or end_value is None)
|
|
6
|
+
self.__data_iter = data_iter
|
|
7
|
+
self.__index = 0
|
|
8
|
+
self.__batch_size = batch_size
|
|
9
|
+
self.__end_value = end_value
|
|
10
|
+
|
|
11
|
+
def __iter__(self):
|
|
12
|
+
return self
|
|
13
|
+
|
|
14
|
+
def __next__(self):
|
|
15
|
+
buffer = []
|
|
16
|
+
while True:
|
|
17
|
+
try:
|
|
18
|
+
data = next(self.__data_iter)
|
|
19
|
+
except StopIteration:
|
|
20
|
+
break
|
|
21
|
+
buffer.append(data)
|
|
22
|
+
if len(buffer) == self.__batch_size:
|
|
23
|
+
break
|
|
24
|
+
|
|
25
|
+
if len(buffer) > 0:
|
|
26
|
+
self.__index += 1
|
|
27
|
+
return buffer
|
|
28
|
+
|
|
29
|
+
if self.__end_value is None:
|
|
30
|
+
raise StopIteration
|
|
31
|
+
else:
|
|
32
|
+
return self.__end_value()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import sqlite3
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class SQLiteProvider(object):
|
|
5
|
+
|
|
6
|
+
@staticmethod
|
|
7
|
+
def write(data_it, target, data2col_func, table_name, columns, sqlite3_column_types,
|
|
8
|
+
id_column_name="id", id_column_type="TEXT"):
|
|
9
|
+
assert(callable(data2col_func))
|
|
10
|
+
assert(isinstance(columns, list))
|
|
11
|
+
assert(isinstance(sqlite3_column_types, list))
|
|
12
|
+
assert(len(columns) == len(sqlite3_column_types))
|
|
13
|
+
|
|
14
|
+
with sqlite3.connect(target) as con:
|
|
15
|
+
cur = con.cursor()
|
|
16
|
+
|
|
17
|
+
# Provide the ID column.
|
|
18
|
+
columns = [id_column_name] + columns
|
|
19
|
+
sqlite3_column_types = [id_column_type] + sqlite3_column_types
|
|
20
|
+
|
|
21
|
+
# Compose the whole columns list.
|
|
22
|
+
content = ", ".join([" ".join(item) for item in zip(columns, sqlite3_column_types)])
|
|
23
|
+
cur.execute(f"CREATE TABLE IF NOT EXISTS {table_name}({content})")
|
|
24
|
+
cur.execute(f"CREATE INDEX IF NOT EXISTS i_id ON {table_name}({id_column_name})")
|
|
25
|
+
|
|
26
|
+
for uid, data in data_it:
|
|
27
|
+
r = cur.execute(f"SELECT EXISTS(SELECT 1 FROM {table_name} WHERE {id_column_name}='{uid}');")
|
|
28
|
+
ans = r.fetchone()[0]
|
|
29
|
+
if ans == 1:
|
|
30
|
+
continue
|
|
31
|
+
|
|
32
|
+
params = ", ".join(tuple(['?'] * (len(columns))))
|
|
33
|
+
cur.execute(f"INSERT INTO {table_name} VALUES ({params})", [str(uid)] + data2col_func(data))
|
|
34
|
+
con.commit()
|
|
35
|
+
|
|
36
|
+
cur.close()
|
|
@@ -1,28 +1,34 @@
|
|
|
1
1
|
from collections.abc import Iterable
|
|
2
2
|
|
|
3
3
|
from arekit.common.bound import Bound
|
|
4
|
-
from arekit.common.text.partitioning.base import BasePartitioning
|
|
5
4
|
|
|
6
5
|
|
|
7
|
-
class
|
|
6
|
+
class Partitioning(object):
|
|
8
7
|
""" NOTE: considering that provided parts
|
|
9
8
|
has no intersections between each other
|
|
10
9
|
"""
|
|
11
10
|
|
|
11
|
+
list_reg_types = {
|
|
12
|
+
"str": lambda p, item: p.append(item),
|
|
13
|
+
"list": lambda p, item: p.extend(item)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
def __init__(self, text_fmt):
|
|
17
|
+
assert(isinstance(text_fmt, str) and text_fmt in self.list_reg_types)
|
|
18
|
+
self.__reg_part = self.list_reg_types[text_fmt]
|
|
19
|
+
|
|
12
20
|
def provide(self, text, parts_it):
|
|
13
|
-
assert(isinstance(text, str))
|
|
14
21
|
assert(isinstance(parts_it, Iterable))
|
|
15
22
|
|
|
16
|
-
start = 0
|
|
17
23
|
parts = []
|
|
24
|
+
start = 0
|
|
25
|
+
|
|
18
26
|
for value, bound in parts_it:
|
|
19
27
|
assert(isinstance(bound, Bound))
|
|
20
28
|
assert(bound.Position >= start)
|
|
21
29
|
|
|
22
30
|
# Release everything till the current value position.
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
parts.append(part)
|
|
31
|
+
self.__reg_part(p=parts, item=text[start:bound.Position])
|
|
26
32
|
|
|
27
33
|
# Release the entity value.
|
|
28
34
|
parts.extend([value])
|
|
@@ -30,7 +36,6 @@ class StringPartitioning(BasePartitioning):
|
|
|
30
36
|
start = bound.Position + bound.Length
|
|
31
37
|
|
|
32
38
|
# Release everything after the last entity.
|
|
33
|
-
|
|
34
|
-
parts.extend([last_part])
|
|
39
|
+
self.__reg_part(p=parts, item=text[start:len(text)])
|
|
35
40
|
|
|
36
41
|
return parts
|
arekit/common/utils.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import sys
|
|
2
2
|
import os
|
|
3
|
-
import requests
|
|
4
3
|
from tqdm import tqdm
|
|
5
4
|
|
|
6
5
|
|
|
@@ -84,46 +83,3 @@ def progress_bar_iter(iterable, desc="", unit='it'):
|
|
|
84
83
|
leave=True,
|
|
85
84
|
ncols=120,
|
|
86
85
|
unit=unit)
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def get_default_download_dir():
|
|
90
|
-
""" Refered to NLTK toolkit approach
|
|
91
|
-
https://github.com/nltk/nltk/blob/8e771679cee1b4a9540633cc3ea17f4421ffd6c0/nltk/downloader.py#L1051
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
# On Windows, use %APPDATA%
|
|
95
|
-
if sys.platform == "win32" and "APPDATA" in os.environ:
|
|
96
|
-
homedir = os.environ["APPDATA"]
|
|
97
|
-
|
|
98
|
-
# Otherwise, install in the user's home directory.
|
|
99
|
-
else:
|
|
100
|
-
homedir = os.path.expanduser("~/")
|
|
101
|
-
if homedir == "~/":
|
|
102
|
-
raise ValueError("Could not find a default download directory")
|
|
103
|
-
|
|
104
|
-
return os.path.join(homedir, ".arekit")
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
def download(dest_file_path, source_url):
|
|
108
|
-
""" Refered to https://github.com/nicolay-r/ner-bilstm-crf-tensorflow/blob/master/ner/utils.py
|
|
109
|
-
Simple http file downloader
|
|
110
|
-
"""
|
|
111
|
-
print(('Downloading from {src} to {dest}'.format(src=source_url, dest=dest_file_path)))
|
|
112
|
-
|
|
113
|
-
sys.stdout.flush()
|
|
114
|
-
datapath = os.path.dirname(dest_file_path)
|
|
115
|
-
|
|
116
|
-
if not os.path.exists(datapath):
|
|
117
|
-
os.makedirs(datapath, mode=0o755)
|
|
118
|
-
|
|
119
|
-
dest_file_path = os.path.abspath(dest_file_path)
|
|
120
|
-
|
|
121
|
-
r = requests.get(source_url, stream=True)
|
|
122
|
-
total_length = int(r.headers.get('content-length', 0))
|
|
123
|
-
|
|
124
|
-
with open(dest_file_path, 'wb') as f:
|
|
125
|
-
pbar = tqdm(total=total_length, unit='B', unit_scale=True)
|
|
126
|
-
for chunk in r.iter_content(chunk_size=32 * 1024):
|
|
127
|
-
if chunk: # filter out keep-alive new chunks
|
|
128
|
-
pbar.update(len(chunk))
|
|
129
|
-
f.write(chunk)
|
|
@@ -2,7 +2,8 @@ from arekit.common.data.input.providers.const import IDLE_MODE
|
|
|
2
2
|
from arekit.common.data.input.providers.contents import ContentsProvider
|
|
3
3
|
from arekit.common.linkage.base import LinkedDataWrapper
|
|
4
4
|
from arekit.common.linkage.text_opinions import TextOpinionsLinkage
|
|
5
|
-
from arekit.common.pipeline.base import
|
|
5
|
+
from arekit.common.pipeline.base import BasePipelineLauncher
|
|
6
|
+
from arekit.common.pipeline.context import PipelineContext
|
|
6
7
|
from arekit.common.text_opinions.base import TextOpinion
|
|
7
8
|
|
|
8
9
|
|
|
@@ -13,7 +14,7 @@ class InputTextOpinionProvider(ContentsProvider):
|
|
|
13
14
|
results in a TextOpinionLinkage instances.
|
|
14
15
|
pipeline: id -> ... -> TextOpinionLinkage[]
|
|
15
16
|
"""
|
|
16
|
-
assert(isinstance(pipeline,
|
|
17
|
+
assert(isinstance(pipeline, list))
|
|
17
18
|
self.__pipeline = pipeline
|
|
18
19
|
self.__current_id = None
|
|
19
20
|
|
|
@@ -30,7 +31,16 @@ class InputTextOpinionProvider(ContentsProvider):
|
|
|
30
31
|
|
|
31
32
|
def from_doc_ids(self, doc_ids, idle_mode=False):
|
|
32
33
|
self.__current_id = 0
|
|
33
|
-
|
|
34
|
+
|
|
35
|
+
ctx = PipelineContext(d={
|
|
36
|
+
"result": doc_ids,
|
|
37
|
+
IDLE_MODE: idle_mode
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
# Launching pipeline with the passed context
|
|
41
|
+
BasePipelineLauncher.run(pipeline=self.__pipeline, pipeline_ctx=ctx)
|
|
42
|
+
|
|
43
|
+
for linkage in ctx.provide("result"):
|
|
34
44
|
assert(isinstance(linkage, LinkedDataWrapper))
|
|
35
45
|
if isinstance(linkage, TextOpinionsLinkage):
|
|
36
46
|
self.__assign_ids(linkage)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from arekit.contrib.utils.data.readers.base import BaseReader
|
|
2
|
+
from arekit.contrib.utils.data.storages.sqlite_based import SQliteBasedRowsStorage
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SQliteReader(BaseReader):
|
|
6
|
+
|
|
7
|
+
def __init__(self, table_name):
|
|
8
|
+
self.__table_name = table_name
|
|
9
|
+
|
|
10
|
+
def extension(self):
|
|
11
|
+
return ".sqlite"
|
|
12
|
+
|
|
13
|
+
def read(self, target):
|
|
14
|
+
return SQliteBasedRowsStorage(path=target, table_name=self.__table_name)
|
|
@@ -33,7 +33,12 @@ class RowCacheStorage(BaseRowsStorage):
|
|
|
33
33
|
|
|
34
34
|
# Expand with columns that are forced to be provided.
|
|
35
35
|
existed_set = set(self.__column_names)
|
|
36
|
-
|
|
36
|
+
|
|
37
|
+
# Calculate extension: columns that were not mentioned in column names list.
|
|
38
|
+
extension = [c for c in self.__force_collect_columns if c not in existed_set]
|
|
39
|
+
|
|
40
|
+
self.__column_names += extension
|
|
41
|
+
self.__column_types += [str] * len(extension)
|
|
37
42
|
|
|
38
43
|
def iter_column_names(self):
|
|
39
44
|
return iter(self.__column_names)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import sqlite3
|
|
2
|
+
from arekit.common.data.storages.base import BaseRowsStorage
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SQliteBasedRowsStorage(BaseRowsStorage):
|
|
6
|
+
|
|
7
|
+
def __init__(self, path, table_name):
|
|
8
|
+
self.__path = path
|
|
9
|
+
self.__table_name = table_name
|
|
10
|
+
self.__conn = None
|
|
11
|
+
|
|
12
|
+
def _iter_rows(self):
|
|
13
|
+
with sqlite3.connect(self.__path) as conn:
|
|
14
|
+
cursor = conn.execute(f"select * from {self.__table_name}")
|
|
15
|
+
for row_index, row in enumerate(cursor.fetchall()):
|
|
16
|
+
row_dict = {cursor.description[i][0]: value for i, value in enumerate(row)}
|
|
17
|
+
yield row_index, row_dict
|
|
@@ -8,6 +8,10 @@ from arekit.contrib.utils.data.writers.base import BaseWriter
|
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
class SQliteWriter(BaseWriter):
|
|
11
|
+
""" TODO. This implementation is dedicated for the writing concepts of the data
|
|
12
|
+
serialization pipeline. However we add the SQLite3 service, it would be
|
|
13
|
+
right to refactor and utlize some core functionality from the core/service/sqlite.py
|
|
14
|
+
"""
|
|
11
15
|
|
|
12
16
|
def __init__(self, table_name="contents", index_column_names=None, skip_existed=False, clear_table=True):
|
|
13
17
|
""" index_column_names: list or None
|
|
@@ -1,29 +1,12 @@
|
|
|
1
1
|
from collections.abc import Iterable
|
|
2
2
|
import logging
|
|
3
|
-
from os.path import
|
|
4
|
-
|
|
5
|
-
from arekit.common.experiment.data_type import DataType
|
|
3
|
+
from os.path import exists
|
|
6
4
|
|
|
7
5
|
|
|
8
6
|
logger = logging.getLogger(__name__)
|
|
9
7
|
logging.basicConfig(level=logging.INFO)
|
|
10
8
|
|
|
11
9
|
|
|
12
|
-
def join_dir_with_subfolder_name(subfolder_name, dir):
|
|
13
|
-
""" Returns subfolder in in directory
|
|
14
|
-
"""
|
|
15
|
-
assert(isinstance(subfolder_name, str))
|
|
16
|
-
assert(isinstance(dir, str))
|
|
17
|
-
|
|
18
|
-
target_dir = join(dir, "{}/".format(subfolder_name))
|
|
19
|
-
return target_dir
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def filename_template(data_type):
|
|
23
|
-
assert(isinstance(data_type, DataType))
|
|
24
|
-
return "{data_type}-0".format(data_type=data_type.name.lower())
|
|
25
|
-
|
|
26
|
-
|
|
27
10
|
def check_targets_existence(targets):
|
|
28
11
|
assert (isinstance(targets, Iterable))
|
|
29
12
|
|
|
@@ -2,15 +2,13 @@ from arekit.common.data.input.providers.rows.samples import BaseSampleRowProvide
|
|
|
2
2
|
from arekit.common.data.storages.base import BaseRowsStorage
|
|
3
3
|
from arekit.common.experiment.api.base_samples_io import BaseSamplesIO
|
|
4
4
|
from arekit.common.experiment.data_type import DataType
|
|
5
|
-
from arekit.common.pipeline.base import BasePipeline
|
|
6
|
-
from arekit.common.pipeline.context import PipelineContext
|
|
7
5
|
from arekit.common.pipeline.items.base import BasePipelineItem
|
|
8
6
|
from arekit.contrib.utils.serializer import InputDataSerializationHelper
|
|
9
7
|
|
|
10
8
|
|
|
11
9
|
class BaseSerializerPipelineItem(BasePipelineItem):
|
|
12
10
|
|
|
13
|
-
def __init__(self, rows_provider, samples_io, save_labels_func, storage):
|
|
11
|
+
def __init__(self, rows_provider, samples_io, save_labels_func, storage, **kwargs):
|
|
14
12
|
""" sample_rows_formatter:
|
|
15
13
|
how we format input texts for a BERT model, for example:
|
|
16
14
|
- single text
|
|
@@ -23,6 +21,7 @@ class BaseSerializerPipelineItem(BasePipelineItem):
|
|
|
23
21
|
assert(isinstance(samples_io, BaseSamplesIO))
|
|
24
22
|
assert(callable(save_labels_func))
|
|
25
23
|
assert(isinstance(storage, BaseRowsStorage))
|
|
24
|
+
super(BaseSerializerPipelineItem, self).__init__(**kwargs)
|
|
26
25
|
|
|
27
26
|
self._rows_provider = rows_provider
|
|
28
27
|
self._samples_io = samples_io
|
|
@@ -31,7 +30,7 @@ class BaseSerializerPipelineItem(BasePipelineItem):
|
|
|
31
30
|
|
|
32
31
|
def _serialize_iteration(self, data_type, pipeline, data_folding, doc_ids):
|
|
33
32
|
assert(isinstance(data_type, DataType))
|
|
34
|
-
assert(isinstance(pipeline,
|
|
33
|
+
assert(isinstance(pipeline, list))
|
|
35
34
|
assert(isinstance(data_folding, dict) or data_folding is None)
|
|
36
35
|
assert(isinstance(doc_ids, list) or doc_ids is None)
|
|
37
36
|
assert(doc_ids is not None or data_folding is not None)
|
|
@@ -89,11 +88,7 @@ class BaseSerializerPipelineItem(BasePipelineItem):
|
|
|
89
88
|
doc_ids: optional
|
|
90
89
|
this parameter allows to limit amount of documents considered for sampling
|
|
91
90
|
"""
|
|
92
|
-
assert(
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
self._handle_iteration(data_type_pipelines=input_data.provide("data_type_pipelines"),
|
|
98
|
-
doc_ids=input_data.provide_or_none("doc_ids"),
|
|
99
|
-
data_folding=data_folding)
|
|
91
|
+
assert("data_type_pipelines" in pipeline_ctx)
|
|
92
|
+
self._handle_iteration(data_type_pipelines=pipeline_ctx.provide("data_type_pipelines"),
|
|
93
|
+
doc_ids=pipeline_ctx.provide_or_none("doc_ids"),
|
|
94
|
+
data_folding=pipeline_ctx.provide_or_none("data_folding"))
|