SinaTools 0.1.35__py2.py3-none-any.whl → 0.1.37__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sinatools/ner/data.py DELETED
@@ -1,124 +0,0 @@
1
- from torch.utils.data import DataLoader
2
- from torchtext.vocab import vocab
3
- from collections import Counter, namedtuple
4
- import logging
5
- import re
6
- import itertools
7
- from sinatools.ner.helpers import load_object
8
- from sinatools.ner.datasets import Token
9
- from sinatools.utils.tokenizers_words import simple_word_tokenize
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def conll_to_segments(filename):
15
- """
16
- Convert CoNLL files to segments. This return list of segments and each segment is
17
- a list of tuples (token, tag)
18
- :param filename: Path
19
- :return: list[[tuple]] - [[(token, tag), (token, tag), ...], [(token, tag), ...]]
20
- """
21
- segments, segment = list(), list()
22
-
23
- with open(filename, "r") as fh:
24
- for token in fh.read().splitlines():
25
- if not token.strip():
26
- segments.append(segment)
27
- segment = list()
28
- else:
29
- parts = token.split()
30
- token = Token(text=parts[0], gold_tag=parts[1:])
31
- segment.append(token)
32
-
33
- segments.append(segment)
34
-
35
- return segments
36
-
37
-
38
- def parse_conll_files(data_paths):
39
- """
40
- Parse CoNLL formatted files and return list of segments for each file and index
41
- the vocabs and tags across all data_paths
42
- :param data_paths: tuple(Path) - tuple of filenames
43
- :return: tuple( [[(token, tag), ...], [(token, tag), ...]], -> segments for data_paths[i]
44
- [[(token, tag), ...], [(token, tag), ...]], -> segments for data_paths[i+1],
45
- ...
46
- )
47
- List of segments for each dataset and each segment has list of (tokens, tags)
48
- """
49
- vocabs = namedtuple("Vocab", ["tags", "tokens"])
50
- datasets, tags, tokens = list(), list(), list()
51
-
52
- for data_path in data_paths:
53
- dataset = conll_to_segments(data_path)
54
- datasets.append(dataset)
55
- tokens += [token.text for segment in dataset for token in segment]
56
- tags += [token.gold_tag for segment in dataset for token in segment]
57
-
58
- # Flatten list of tags
59
- tags = list(itertools.chain(*tags))
60
-
61
- # Generate vocabs for tags and tokens
62
- tag_vocabs = tag_vocab_by_type(tags)
63
- tag_vocabs.insert(0, vocab(Counter(tags)))
64
- vocabs = vocabs(tokens=vocab(Counter(tokens), specials=["UNK"]), tags=tag_vocabs)
65
- return tuple(datasets), vocabs
66
-
67
-
68
- def tag_vocab_by_type(tags):
69
- vocabs = list()
70
- c = Counter(tags)
71
- tag_names = c.keys()
72
- tag_types = sorted(list(set([tag.split("-", 1)[1] for tag in tag_names if "-" in tag])))
73
-
74
- for tag_type in tag_types:
75
- r = re.compile(".*-" + tag_type)
76
- t = list(filter(r.match, tags)) + ["O"]
77
- vocabs.append(vocab(Counter(t), specials=["<pad>"]))
78
-
79
- return vocabs
80
-
81
-
82
- def text2segments(text):
83
- """
84
- Convert text to a datasets and index the tokens
85
- """
86
- #dataset = [[Token(text=token, gold_tag=["O"]) for token in text.split()]]
87
- list_of_tokens = simple_word_tokenize(text)
88
- dataset = [[Token(text=token, gold_tag=["O"]) for token in list_of_tokens]]
89
- tokens = [token.text for segment in dataset for token in segment]
90
-
91
- # Generate vocabs for the tokens
92
- segment_vocab = vocab(Counter(tokens), specials=["UNK"])
93
- return dataset, segment_vocab
94
-
95
-
96
- def get_dataloaders(
97
- datasets, vocab, data_config, batch_size=32, num_workers=0, shuffle=(True, False, False)
98
- ):
99
- """
100
- From the datasets generate the dataloaders
101
- :param datasets: list - list of the datasets, list of list of segments and tokens
102
- :param batch_size: int
103
- :param num_workers: int
104
- :param shuffle: boolean - to shuffle the data or not
105
- :return: List[torch.utils.data.DataLoader]
106
- """
107
- dataloaders = list()
108
-
109
- for i, examples in enumerate(datasets):
110
- data_config["kwargs"].update({"examples": examples, "vocab": vocab})
111
- dataset = load_object("sinatools."+data_config["fn"], data_config["kwargs"])
112
-
113
- dataloader = DataLoader(
114
- dataset=dataset,
115
- shuffle=shuffle[i],
116
- batch_size=batch_size,
117
- num_workers=num_workers,
118
- collate_fn=dataset.collate_fn,
119
- )
120
-
121
- logger.info("%s batches found", len(dataloader))
122
- dataloaders.append(dataloader)
123
-
124
- return dataloaders
@@ -1,201 +0,0 @@
1
- import torch
2
- import json
3
- from urllib.request import Request, urlopen
4
- from sinatools.ner.entity_extractor import extract
5
- from . import pipe
6
-
7
-
8
- # ============================ Extract entities and their types ========================
9
- def jsons_to_list_of_lists(json_list):
10
- return [[d['token'], d['tags']] for d in json_list]
11
-
12
- def entities_and_types(sentence):
13
- output_list = jsons_to_list_of_lists(extract(sentence))
14
- json_short = distill_entities(output_list)
15
-
16
- entities = {}
17
- for entity in json_short:
18
- name = entity[0]
19
- entity_type = entity[1]
20
- entities[name] = entity_type
21
-
22
- return entities
23
-
24
- def distill_entities(entities):
25
- # This is list that we put the output what we need
26
- list_output = list()
27
-
28
- # This line go to sort function and save the output to temp_entities
29
- temp_entities = sortTags(entities)
30
-
31
- # This list help us to make the output,
32
- temp_list = list()
33
-
34
- # initlize the temp_list
35
- temp_list.append(["", "", 0, 0])
36
- word_position = 0
37
-
38
- # For each entity, convert ibo to distllir list.
39
- for entity in temp_entities:
40
- # This is counter tag of this entity
41
- counter_tag = 0
42
- # For each tag
43
- for tag in str(entity[1]).split():
44
- # If the counter tag greater than or equal to lenght of templist, if yes then we will append the empty value in templist
45
- if counter_tag >= len(temp_list):
46
- temp_list.append(["", "", 0, 0])
47
-
48
- # If tag equal O and word postion of this tag is not equal zero then it will add all
49
- # not empty eliment of temp list in output list
50
- if "O" == tag and word_position != 0:
51
- for j in range(0, len(temp_list)):
52
- if temp_list[j][1] != "":
53
- list_output.append([temp_list[j][0].strip(), temp_list[j][1], temp_list[j][2], temp_list[j][3]])
54
- temp_list[j][0] = ""
55
- temp_list[j][1] = ""
56
- temp_list[j][2] = word_position
57
- temp_list[j][3] = word_position
58
- # if this tag not equal O, and split by '-' the tag and check the lenght equals two and if the first eliment
59
- # of the split its B
60
- elif "O" != tag and len(tag.split("-")) == 2 and tag.split("-")[0] == "B":
61
- # if the temp_list of counter is not empty then it will append in output list and hten it will
62
- # initilize by new string and tag in templist of counter
63
- if temp_list[counter_tag][1] != "":
64
- list_output.append([temp_list[counter_tag][0].strip(), temp_list[counter_tag][1], temp_list[counter_tag][2], temp_list[counter_tag][3]])
65
- temp_list[counter_tag][0] = str(entity[0]) + " "
66
- temp_list[counter_tag][1] = str(tag).split("-")[1]
67
- temp_list[counter_tag][2] = word_position
68
- temp_list[counter_tag][3] = word_position
69
-
70
- # if this tag not equal O, and split by '-' the tag and check the lenght equals two and if the first eliment
71
- # of the split its O
72
- elif "O" != tag and len(tag.split("-")) == 2 and tag.split("-")[0] == "I" and word_position != 0:
73
- # For each of temp_list, check if in this counter tag of templist is same tag with this.tag
74
- # then will complete if not it will save in output list and cheak another
75
- for j in range(counter_tag,len(temp_list)):
76
- if temp_list[j][1] == tag[2:] and temp_list[j][3] != word_position:
77
- temp_list[j][0] += str(entity[0]) + " "
78
- temp_list[j][3] += 1
79
- break
80
- else:
81
- if temp_list[j][1] != "":
82
- list_output.append([temp_list[j][0].strip(), temp_list[j][1], temp_list[j][2], temp_list[j][3]])
83
- temp_list[j][0] = ""
84
- temp_list[j][1] = ""
85
- temp_list[j][2] = word_position
86
- temp_list[j][3] = word_position
87
- counter_tag += 1
88
- word_position += 1
89
- # For each temp_list, at the end of the previous loop, there will be some
90
- # values in this list, we should save it to the output list
91
- for j in range(0, len(temp_list)):
92
- if temp_list[j][1] != "":
93
- list_output.append([temp_list[j][0].strip(), temp_list[j][1], temp_list[j][2], temp_list[j][3]])
94
- return sorted(list_output, key=lambda x: (x[2]))
95
-
96
- def sortTags(entities):
97
- temp_entities = entities
98
- temp_counter = 0
99
- # For each entity, this loop will sort each tag of entitiy, first it will check if the
100
- # previous tags has same count of this tag, second will sort the tags and check if this tags is correct
101
- for entity in temp_entities:
102
- tags = entity[1].split()
103
- for tag in tags:
104
- # if the counter is not 0 then, will complete
105
- if temp_counter != 0:
106
- # Check if this tag is equal I-, if yes then it will count how many tag in this tags and
107
- # count how many tag in previous tags
108
- if "I-" == tag[0:2]:
109
- counter_of_this_tag = 0
110
- counter_of_previous_tag = 0
111
- for word in tags:
112
- if tag.split("-")[1] in word:
113
- counter_of_this_tag+=1
114
- for word in temp_entities[temp_counter-1][1].split():
115
- if tag.split("-")[1] in word:
116
- counter_of_previous_tag+=1
117
- # if the counter of previous tag is bigger than counter of this tag, then we
118
- # need to add I-tag in this tags
119
- if counter_of_previous_tag > counter_of_this_tag:
120
- tags.append("I-"+tag.split("-")[1])
121
- # Sort the tags
122
- tags.sort()
123
- # Need to revers the tags because it should begins with I
124
- tags.reverse()
125
- # If the counter is not 0 then we can complete
126
- if temp_counter != 0:
127
- this_tags = tags
128
- previous_tags = temp_entities[temp_counter - 1][1].split()
129
- sorted_tags = list()
130
-
131
- # Check if the this tag is not O and previous tags is not O, then will complete,
132
- # if not then it will ignor this tag
133
- if "O" not in this_tags and "O" not in previous_tags:
134
- index = 0
135
- #For each previous tags, need sort this tag by previous tags if its I, B we can ignor
136
- for i in previous_tags:
137
- j = 0
138
- while this_tags and j < len(this_tags):
139
- if this_tags[j][0:2] == "I-" and this_tags[j][2:] == i[2:]:
140
- sorted_tags.insert(index, this_tags.pop(j))
141
- break
142
- elif this_tags[j][0:2] == "B-":
143
- break
144
- j += 1
145
- index += 1
146
- sorted_tags += this_tags
147
- tags = sorted_tags
148
- str_tag = " "
149
- str_tag = str_tag.join(tags)
150
- str_tag = str_tag.strip()
151
- temp_entities[temp_counter][1] = str_tag
152
- temp_counter += 1
153
- return temp_entities
154
-
155
- # ============= Prepare Templates and Catergorize Extracted Entities ================
156
- temp03={'location':'مكان حدوث','agent':'أحد المتأثرين في','happened at':'تاريخ حدوث'}
157
- categories = {
158
- 'agent': ['PERS', 'NORP', 'OCC', 'ORG'],
159
- 'location': ['LOC', 'FAC', 'GPE'],
160
- 'happened at': ['DATE', 'TIME']
161
- }
162
-
163
- def get_entity_category(entity_type, categories):
164
- for category, types in categories.items():
165
- if entity_type in types:
166
- return category
167
- return None
168
-
169
-
170
- # ============ Extract entities, their types and categorize them ===============
171
- def relation_extraction(sentence):
172
- #test_sentence="صورة إعتقال طفل فلسطيني خلال انتفاضة الأقصى ."
173
- entities=entities_and_types(sentence)
174
-
175
- event_indices = [i for i, (_, entity_type) in enumerate(entities.items()) if entity_type == 'EVENT']
176
- arg_event_indices = [i for i, (_, entity_type) in enumerate(entities.items()) if entity_type != 'EVENT']
177
-
178
- output_list=[]
179
-
180
- for i in event_indices:
181
- event_entity=list(entities.keys())[i]
182
- for j in arg_event_indices:
183
- arg_name= list(entities.keys())[j]
184
- arg_type=entities[arg_name]
185
- category = get_entity_category(arg_type, categories)
186
-
187
- if category in temp03:
188
- relation_sentence=f"[CLS] {sentence} [SEP] {event_entity} {temp03[category]} {arg_name}"
189
- predicted_relation=pipe(relation_sentence)
190
- score = predicted_relation[0][0]['score']
191
- if score > 0.50:
192
- #print(f"Event:{event_entity} Relation:{category} Argument:{arg_name}\n")
193
- #output_list.append([{event_entity} ,{category}, {arg_name}])
194
- output_list.append(f"Event:{event_entity}, Relation:{category}, Argument:{arg_name}")
195
-
196
- else:
197
- #print(f"Event:{event_entity} Relation:No relation Argument:{arg_name}\n")
198
- #output_list.append([{event_entity} ,'No relation', {arg_name}])
199
- output_list.append(f"Event:{event_entity}, Relation:No relation, Argument:{arg_name}")
200
-
201
- return output_list