opener-opinion-detector-base 2.0.1 → 2.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/core/python-scripts/README.md +78 -3
  3. data/core/python-scripts/classify_kaf_naf_file.py +94 -94
  4. data/core/python-scripts/models.cfg +1 -0
  5. data/core/python-scripts/scripts/config_manager.py +3 -0
  6. data/core/python-scripts/scripts/extract_features.py +0 -3
  7. data/core/python-scripts/scripts/relation_classifier.py +1 -1
  8. data/core/vendor/src/crfsuite/crfsuite.sln +42 -42
  9. data/core/vendor/src/liblbfgs/lbfgs.sln +26 -26
  10. data/ext/hack/Rakefile +5 -2
  11. data/lib/opener/opinion_detectors/base.rb +19 -15
  12. data/lib/opener/opinion_detectors/base/version.rb +1 -1
  13. data/lib/opener/opinion_detectors/configuration_creator.rb +6 -8
  14. data/lib/opener/opinion_detectors/de.rb +1 -1
  15. data/lib/opener/opinion_detectors/es.rb +7 -0
  16. data/lib/opener/opinion_detectors/fr.rb +7 -0
  17. data/opener-opinion-detector-base.gemspec +0 -1
  18. data/pre_install_requirements.txt +3 -0
  19. metadata +41 -85
  20. data/core/packages/KafNafParser-1.4.tar.gz +0 -0
  21. data/core/packages/VUA_pylib-1.5.tar.gz +0 -0
  22. data/core/site-packages/pre_build/KafNafParser-1.4-py2.7.egg-info/PKG-INFO +0 -10
  23. data/core/site-packages/pre_build/KafNafParser-1.4-py2.7.egg-info/SOURCES.txt +0 -22
  24. data/core/site-packages/pre_build/KafNafParser-1.4-py2.7.egg-info/dependency_links.txt +0 -1
  25. data/core/site-packages/pre_build/KafNafParser-1.4-py2.7.egg-info/installed-files.txt +0 -47
  26. data/core/site-packages/pre_build/KafNafParser-1.4-py2.7.egg-info/top_level.txt +0 -1
  27. data/core/site-packages/pre_build/KafNafParser/KafNafParserMod.py +0 -390
  28. data/core/site-packages/pre_build/KafNafParser/__init__.py +0 -14
  29. data/core/site-packages/pre_build/KafNafParser/constituency_data.py +0 -125
  30. data/core/site-packages/pre_build/KafNafParser/coreference_data.py +0 -52
  31. data/core/site-packages/pre_build/KafNafParser/dependency_data.py +0 -78
  32. data/core/site-packages/pre_build/KafNafParser/entity_data.py +0 -59
  33. data/core/site-packages/pre_build/KafNafParser/external_references_data.py +0 -41
  34. data/core/site-packages/pre_build/KafNafParser/feature_extractor/__init__.py +0 -2
  35. data/core/site-packages/pre_build/KafNafParser/feature_extractor/constituency.py +0 -205
  36. data/core/site-packages/pre_build/KafNafParser/feature_extractor/dependency.py +0 -309
  37. data/core/site-packages/pre_build/KafNafParser/features_data.py +0 -131
  38. data/core/site-packages/pre_build/KafNafParser/header_data.py +0 -127
  39. data/core/site-packages/pre_build/KafNafParser/opinion_data.py +0 -211
  40. data/core/site-packages/pre_build/KafNafParser/references_data.py +0 -23
  41. data/core/site-packages/pre_build/KafNafParser/span_data.py +0 -63
  42. data/core/site-packages/pre_build/KafNafParser/term_data.py +0 -111
  43. data/core/site-packages/pre_build/KafNafParser/term_sentiment_data.py +0 -42
  44. data/core/site-packages/pre_build/KafNafParser/text_data.py +0 -99
  45. data/core/site-packages/pre_build/VUA_pylib-1.5-py2.7.egg-info/PKG-INFO +0 -10
  46. data/core/site-packages/pre_build/VUA_pylib-1.5-py2.7.egg-info/SOURCES.txt +0 -14
  47. data/core/site-packages/pre_build/VUA_pylib-1.5-py2.7.egg-info/dependency_links.txt +0 -1
  48. data/core/site-packages/pre_build/VUA_pylib-1.5-py2.7.egg-info/installed-files.txt +0 -23
  49. data/core/site-packages/pre_build/VUA_pylib-1.5-py2.7.egg-info/top_level.txt +0 -1
  50. data/core/site-packages/pre_build/VUA_pylib/__init__.py +0 -1
  51. data/core/site-packages/pre_build/VUA_pylib/common/__init__.py +0 -1
  52. data/core/site-packages/pre_build/VUA_pylib/common/common.py +0 -28
  53. data/core/site-packages/pre_build/VUA_pylib/corpus_reader/__init__.py +0 -1
  54. data/core/site-packages/pre_build/VUA_pylib/corpus_reader/google_web_nl.py +0 -156
  55. data/core/site-packages/pre_build/VUA_pylib/io_utils/__init__.py +0 -1
  56. data/core/site-packages/pre_build/VUA_pylib/io_utils/feature_file.py +0 -121
  57. data/core/site-packages/pre_build/VUA_pylib/lexicon/__init__.py +0 -1
  58. data/core/site-packages/pre_build/VUA_pylib/lexicon/lexicon.py +0 -72
  59. data/core/site-packages/pre_build/VUKafParserPy-1.0-py2.7.egg-info/PKG-INFO +0 -10
  60. data/core/site-packages/pre_build/VUKafParserPy-1.0-py2.7.egg-info/SOURCES.txt +0 -7
  61. data/core/site-packages/pre_build/VUKafParserPy-1.0-py2.7.egg-info/dependency_links.txt +0 -1
  62. data/core/site-packages/pre_build/VUKafParserPy-1.0-py2.7.egg-info/installed-files.txt +0 -11
  63. data/core/site-packages/pre_build/VUKafParserPy-1.0-py2.7.egg-info/top_level.txt +0 -1
  64. data/core/site-packages/pre_build/VUKafParserPy/KafDataObjectsMod.py +0 -165
  65. data/core/site-packages/pre_build/VUKafParserPy/KafParserMod.py +0 -439
  66. data/core/site-packages/pre_build/VUKafParserPy/__init__.py +0 -7
  67. data/pre_build_requirements.txt +0 -3
@@ -1,309 +0,0 @@
1
- from operator import itemgetter
2
- from VUA_pylib.common import get_max_distr_dict
3
- import sys
4
-
5
- class Cdependency_extractor:
6
- def __init__(self,knaf_obj):
7
- self.naf = knaf_obj
8
- self.relations_for_term = {}
9
- self.reverse_relations_for_term = {}
10
- self.prefix_for_reverse = ''
11
-
12
-
13
- already_linked = {}
14
- for dep in knaf_obj.get_dependencies():
15
- term_from = dep.get_from()
16
- term_to = dep.get_to()
17
- rfunc = dep.get_function()
18
-
19
- # Dependencies reversed are skipped...
20
- #if rfunc.startswith('rhd/') or rfunc.startswith('whd/'):
21
- # continue
22
-
23
- # For detecting cycles like:
24
- # <!-- rhd/body(geef,wat) -->
25
- # <dep from="t19" to="t15" rfunc="rhd/body"/>
26
- # <!-- hd/su(wat,geef) -->
27
- # <dep from="t15" to="t19" rfunc="hd/su"/>
28
-
29
- '''
30
- if term_from in already_linked and term_to in already_linked[term_from]:
31
- #There could be a cycle, skip this
32
- print>>sys.stderr,'Skipped from',term_from,'to',term_to,'func',rfunc,' cycle detected'
33
- continue
34
- else:
35
- #Include term_from as linked with term_to for future...
36
- if term_to not in already_linked:
37
- already_linked[term_to] = set()
38
- already_linked[term_to].add(term_from)
39
- '''
40
-
41
-
42
-
43
-
44
- if term_from in self.relations_for_term:
45
- self.relations_for_term[term_from].append((rfunc,term_to))
46
- else:
47
- self.relations_for_term[term_from] = [(rfunc,term_to)]
48
-
49
- if term_to in self.reverse_relations_for_term:
50
- self.reverse_relations_for_term[term_to].append((self.prefix_for_reverse+rfunc,term_from))
51
- else:
52
- self.reverse_relations_for_term[term_to] = [(self.prefix_for_reverse+rfunc,term_from)]
53
-
54
-
55
- self.paths_for_termid={}
56
- self.sentence_for_termid={}
57
- self.top_relation_for_term = {} ## termid --> (relation,topnode)
58
- self.root_for_sentence = {} ## sentenceid --> termid
59
-
60
- for term_obj in knaf_obj.get_terms():
61
- termid = term_obj.get_id()
62
-
63
- #Calculating the sentence id for the term id
64
- span_ids = term_obj.get_span().get_span_ids()
65
- token_obj = knaf_obj.get_token(span_ids[0])
66
- if token_obj is None:
67
- continue
68
-
69
- sentence = token_obj.get_sent()
70
-
71
- self.sentence_for_termid[termid] = sentence
72
- ###########################################
73
-
74
- #paths = self.__propagate_node(termid,[])
75
- #inversed = self.__reverse_propagate_node(termid)
76
-
77
- ## Due to the change on direction of dependencies...
78
- inversed = self.__propagate_node(termid,already_propagated=[])
79
- paths = self.__reverse_propagate_node(termid,already_propagated=[])
80
-
81
- ##Calculate the top relation for the node, the relation with the main root of the tree
82
- if len(inversed) != 0:
83
- for ip in inversed:
84
- if len(ip)!=0:
85
- self.top_relation_for_term[termid] = ip[-1] ## ex. ('NMOD', 't2')
86
- root = ip[-1][1]
87
- if sentence not in self.root_for_sentence:
88
- self.root_for_sentence[sentence] = {}
89
-
90
- if root not in self.root_for_sentence[sentence]:
91
- self.root_for_sentence[sentence][root]=0
92
- else:
93
- self.root_for_sentence[sentence][root]+=1
94
- break
95
-
96
- self.paths_for_termid[termid] = paths + inversed
97
-
98
- '''
99
- print termid
100
- print 'DIRECT RELS'
101
- for p in paths:
102
- print ' ',p
103
-
104
- print 'INDIRECT RELS'
105
- for p in inversed:
106
- print ' ',p
107
- '''
108
- ####
109
-
110
- for sent_id, distr in self.root_for_sentence.items():
111
- ## get_max_distr_dict imported from VUA_pylib.common
112
- most_freq,c = get_max_distr_dict(distr)
113
- self.root_for_sentence[sent_id] = most_freq
114
-
115
-
116
-
117
-
118
- def __propagate_node(self,node,already_propagated=[]):
119
- paths = []
120
-
121
- relations = self.relations_for_term.get(node)
122
- #print 'Propagate ',node,relations
123
- if relations is None: ##Case base
124
- paths = [[]]
125
- elif node in already_propagated:
126
- paths = [[]]
127
-
128
- else:
129
- already_propagated.append(node)
130
- for func, target_node in relations:
131
- new_paths = self.__propagate_node(target_node, already_propagated)
132
- for new_path in new_paths:
133
- new_path.insert(0,(func,target_node))
134
- paths.append(new_path)
135
- return paths
136
-
137
- def __reverse_propagate_node(self,node,already_propagated=[]):
138
- paths = []
139
- relations = self.reverse_relations_for_term.get(node)
140
- #print 'Propagate reverse',node,relations,already_propagated
141
- if relations is None: ##Case base
142
- paths = [[]]
143
- elif node in already_propagated:
144
- paths = [[]]
145
- else:
146
- already_propagated.append(node)
147
- for func, target_node in relations:
148
- new_paths = self.__reverse_propagate_node(target_node,already_propagated)
149
- for new_path in new_paths:
150
- new_path.insert(0,(func,target_node))
151
- paths.append(new_path)
152
- return paths
153
-
154
-
155
- # Get the shortest path between 2 term ids
156
- def get_shortest_path(self,term1,term2):
157
- dep_path = None
158
- if term1 == term2: dep_path = []
159
- else:
160
- paths1 = self.paths_for_termid[term1]
161
- paths2 = self.paths_for_termid[term2]
162
-
163
- ##Check if term2 is on paths1
164
- hits = [] ## list of (common_id,idx1,idx2,numpath1,numpath2)
165
- for num1, p1 in enumerate(paths1):
166
- ids1 = [ my_id for my_func, my_id in p1]
167
- if term2 in ids1:
168
- idx1 = ids1.index(term2)
169
- hits.append((term2,idx1+0,idx1,0,num1,None))
170
-
171
- for num2,p2 in enumerate(paths2):
172
- ids2 = [ my_id for my_func, my_id in p2]
173
- if term1 in ids2:
174
- idx2=ids2.index(term1)
175
- hits.append((term1,0+idx2,0,idx2,None,num2))
176
-
177
- #Pair by pair
178
- for num1, p1 in enumerate(paths1):
179
- #print 'Path1',term1, p1
180
- ids1 = [ my_id for my_func, my_id in p1]
181
- #print 'IDS1',ids1
182
- for num2, p2 in enumerate(paths2):
183
- #print '\t',term2,p2
184
- ids2 = [ my_id for my_func, my_id in p2]
185
- #print ' IDS2',ids2
186
- common_ids = set(ids1) & set(ids2)
187
- #print ' cmmon',common_ids
188
- for common_id in common_ids:
189
- idx1 = ids1.index(common_id)
190
- idx2 = ids2.index(common_id)
191
- hits.append((common_id,idx1+idx2,idx1,idx2,num1,num2))
192
-
193
-
194
- if len(hits) != 0:
195
- dep_path = []
196
- hits.sort(key=itemgetter(1))
197
- best_hit = hits[0]
198
- common_id, _, idx1, idx2, numpath1, numpath2 = best_hit
199
-
200
- if numpath2 is None: #term2 is in one of the paths of t1
201
- path1 = paths1[numpath1]
202
- my_rels1 = path1[:idx1+1]
203
- ##complete_path = ''
204
- ##complete_path_ids = ''
205
- for func,node in my_rels1:
206
- dep_path.append(func)
207
- ##complete_path+=func+'#'
208
- ##complete_path_ids+=node+'#'
209
-
210
- #===========================================================
211
- # print 'CASE1',best_hit
212
- # print complete_path
213
- # print complete_path_ids
214
- #===========================================================
215
- elif numpath1 is None: #term1 is in one of the paths of t2
216
- path2 = paths2[numpath2]
217
- my_rels2 = path2[:idx2+1]
218
- ##complete_path = ''
219
- ##complete_path_ids = ''
220
- for func,node in my_rels2:
221
- dep_path.append(func)
222
- #complete_path+=func+'#'
223
- #complete_path_ids+=node+'#'
224
-
225
- #===========================================================
226
- # print 'CASE2',best_hit
227
- # print complete_path
228
- # print complete_path_ids
229
- #===========================================================
230
- else: #There is a common node linking both
231
- path1 = paths1[numpath1]
232
- my_rels1 = path1[:idx1+1]
233
-
234
- path2 = paths2[numpath2]
235
- my_rels2 = path2[:idx2+1]
236
-
237
- ##complete_path = ''
238
- #complete_path_ids = ''
239
- for func,node in my_rels1:
240
- dep_path.append(func)
241
- ##complete_path+=func+'#'
242
- #complete_path_ids+=func+'->'+self.naf.get_term(node).get_lemma()+'->'
243
-
244
- for func,node in my_rels2[-1::-1]:
245
- dep_path.append(func)
246
- ##complete_path+=func+'#'
247
- #complete_path_ids+=func+'->'+self.naf.get_term(node).get_lemma()+'->'
248
- #===========================================================
249
- #
250
- # print complete_path
251
- # print complete_path_ids
252
- # print path2
253
- # print my_rels1
254
- # print my_rels2
255
- # print 'CASE3',best_hit
256
- #===========================================================
257
- return dep_path
258
-
259
- ## Get the shortest dependency path between 2 sets of spans
260
- def get_shortest_path_spans(self,span1,span2):
261
- shortest_path = None
262
-
263
- for term1 in span1:
264
- for term2 in span2:
265
- this_path = self.get_shortest_path(term1, term2)
266
- #print term1,term2, this_path
267
- if shortest_path is None or (this_path is not None and len(this_path)<len(shortest_path)):
268
- shortest_path = this_path
269
- return shortest_path
270
-
271
- # Get the dependency path to the sentence root for a term id
272
- def get_path_to_root(self,termid):
273
- # Get the sentence for the term
274
- root = None
275
- sentence = self.sentence_for_termid.get(termid)
276
-
277
- if sentence is None: #try with the top node
278
- top_node = self.top_relation_for_term.get(termid)
279
- if top_node is not None:
280
- root = top_node[1]
281
- else:
282
- return None
283
- else:
284
- if sentence in self.root_for_sentence:
285
- root = self.root_for_sentence[sentence]
286
- else:
287
- ##There is no root for this sentence
288
- return None
289
- # In this point top_node should be properly set
290
- path = self.get_shortest_path(termid, root)
291
- return path
292
-
293
- # Get the shortest dependency path to the sentence root for a span of ids
294
- # extractor.get_shortest_path_to_root_span(['t444','t445','t446'])
295
- def get_shortest_path_to_root_span(self,span):
296
- shortest_path = None
297
- for termid in span:
298
- this_path = self.get_path_to_root(termid)
299
- ## In case of , or . or whatever, the path to the root usually is None, there are no dependencies...
300
- if shortest_path is None or (this_path is not None and len(this_path) < len(shortest_path)):
301
- shortest_path = this_path
302
- return shortest_path
303
-
304
-
305
-
306
-
307
-
308
-
309
-
@@ -1,131 +0,0 @@
1
- from lxml import etree
2
- from lxml.objectify import dump
3
- from references_data import *
4
-
5
-
6
-
7
- class Cproperty:
8
- def __init__(self,node=None,type='NAF'):
9
- self.type = type
10
- if node is None:
11
- self.node = etree.Element('property')
12
- else:
13
- self.node = node
14
-
15
- def get_node(self):
16
- return self.node
17
-
18
- def get_id(self):
19
- if self.type == 'KAF':
20
- return self.node.get('pid')
21
- elif self.type == 'NAF':
22
- return self.node.get('id')
23
-
24
- def set_id(self,pid):
25
- if self.type == 'KAF':
26
- return self.node.set('pid',pid)
27
- elif self.type == 'NAF':
28
- return self.node.set('id',pid)
29
-
30
- def get_type(self):
31
- return self.node.get('lemma')
32
-
33
- def set_type(self,t):
34
- return self.node.set('lemma',t)
35
-
36
- def get_references(self):
37
- for ref_node in self.node.findall('references'):
38
- yield Creferences(ref_node)
39
-
40
- def set_reference(self,ref):
41
- self.node.append(ref.get_node())
42
-
43
-
44
-
45
- class Cproperties:
46
- def __init__(self,node=None,type='NAF'):
47
- self.type=type
48
- if node is None:
49
- self.node = etree.Element('properties')
50
- else:
51
- self.node = node
52
-
53
- def get_node(self):
54
- return self.node
55
-
56
- def __iter__(self):
57
- for prop_node in self.node.findall('property'):
58
- yield Cproperty(prop_node,self.type)
59
-
60
- def add_property(self,pid, label,term_span):
61
- new_property = Cproperty(type=self.type)
62
- self.node.append(new_property.get_node())
63
- ##Set the id
64
- if pid is None:
65
- ##Generate a new pid
66
- existing_pids = [property.get_id() for property in self]
67
- n = 0
68
- new_pid = ''
69
- while True:
70
- new_pid = 'p'+str(n)
71
- if new_pid not in existing_pids: break
72
- n += 1
73
- pid = new_pid
74
- new_property.set_id(pid)
75
-
76
- new_property.set_type(label)
77
-
78
- new_ref = Creferences()
79
- new_ref.add_span(term_span)
80
- new_property.set_reference(new_ref)
81
-
82
-
83
-
84
- class Cfeatures:
85
- def __init__(self,node=None,type='NAF'):
86
- self.type = type
87
- if node is None:
88
- self.node = etree.Element('features')
89
- else:
90
- self.node = node
91
-
92
- def get_node(self):
93
- return self.node
94
-
95
- def to_kaf(self):
96
- if self.type == 'NAF':
97
- ##convert all the properties
98
- for node in self.node.findall('properties/property'):
99
- node.set('pid',node.get('id'))
100
- del node.attrib['id']
101
-
102
- def to_naf(self):
103
- if self.type == 'KAF':
104
- ##convert all the properties
105
- for node in self.node.findall('properties/property'):
106
- node.set('id',node.get('pid'))
107
- del node.attrib['pid']
108
-
109
- def add_property(self,pid, label,term_span):
110
- node_prop = self.node.find('properties')
111
- if node_prop is None:
112
- properties = Cproperties(type=self.type)
113
- self.node.append(properties.get_node())
114
- else:
115
- properties = Cproperties(node=node_prop,type=self.type)
116
-
117
- properties.add_property(pid, label,term_span)
118
-
119
-
120
- def get_properties(self):
121
- node_prop = self.node.find('properties')
122
- if node_prop is not None:
123
- obj_properties = Cproperties(node_prop,self.type)
124
- for prop in obj_properties:
125
- yield prop
126
-
127
- def remove_properties(self):
128
- node_prop = self.node.find('properties')
129
- if node_prop is not None:
130
- self.node.remove(node_prop)
131
-
@@ -1,127 +0,0 @@
1
- # Modified to KAF / NAF
2
-
3
- from lxml import etree
4
- import time
5
-
6
- class CfileDesc:
7
- def __init__(self,node=None):
8
- self.type = 'KAF/NAF'
9
- if node is None:
10
- self.node = etree.Element('fileDesc')
11
- else:
12
- self.node = node
13
-
14
- #self.title='' #self.author='' #self.creationtime='' #self.filename='' #self.filetype='' #self.pages=''
15
-
16
-
17
- class Cpublic:
18
- def __init__(self,node=None):
19
- self.type = 'KAF/NAF'
20
- if node is None:
21
- self.node = etree.Element('public')
22
- else:
23
- self.node = node
24
-
25
- #self.publicId = ''
26
- #slf.uri = ''
27
-
28
-
29
- class Clp:
30
- def __init__(self,node=None,name="",version="",timestamp=None):
31
- self.type = 'KAF/NAF'
32
- if node is None:
33
- self.node = etree.Element('lp')
34
- self.set_name(name)
35
- self.set_version(version)
36
- self.set_timestamp(timestamp)
37
- else:
38
- self.node = node
39
-
40
- def set_name(self,name):
41
- self.node.set('name',name)
42
-
43
- def set_version(self,version):
44
- self.node.set('version',version)
45
-
46
- def set_timestamp(self,timestamp=None):
47
- if timestamp is None:
48
- import time
49
- timestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z')
50
- self.node.set('timestamp',timestamp)
51
-
52
-
53
- def get_node(self):
54
- return self.node
55
-
56
-
57
- class ClinguisticProcessors:
58
- def __init__(self,node=None):
59
- self.type = 'KAF/NAF'
60
- if node is None:
61
- self.node = etree.Element('linguisticProcessors')
62
- else:
63
- self.node = node
64
-
65
- def get_layer(self):
66
- return self.node.get('layer')
67
-
68
- def set_layer(self,layer):
69
- self.node.set('layer',layer)
70
-
71
- def add_linguistic_processor(self,my_lp):
72
- self.node.append(my_lp.get_node())
73
-
74
- def get_node(self):
75
- return self.node
76
-
77
-
78
- class CHeader:
79
- def __init__(self,node=None,type='NAF'):
80
- self.type = type
81
- if node is None:
82
- if self.type == 'NAF':
83
- self.node = etree.Element('nafHeader')
84
- elif self.type == 'KAF':
85
- self.node = etree.Element('kafHeader')
86
- else:
87
- self.node = node
88
-
89
- def to_kaf(self):
90
- if self.type == 'NAF':
91
- self.node.tag = 'kafHeader'
92
- self.type = 'KAF'
93
-
94
- def to_naf(self):
95
- if self.type == 'KAF':
96
- self.node.tag = 'nafHeader'
97
- self.type = 'NAF'
98
-
99
- def add_linguistic_processors(self,linpro):
100
- self.node.append(linpro.get_node())
101
-
102
- def remove_lp(self,layer):
103
- for this_node in self.node.findall('linguisticProcessors'):
104
- if this_node.get('layer') == layer:
105
- self.node.remove(this_node)
106
- break
107
-
108
-
109
- def add_linguistic_processor(self, layer ,my_lp):
110
- ## Locate the linguisticProcessor element for taht layer
111
- found_lp_obj = None
112
- for this_lp in self.node.findall('linguisticProcessors'):
113
- lp_obj = ClinguisticProcessors(this_lp)
114
- if lp_obj.get_layer() == layer:
115
- found_lp_obj = lp_obj
116
- break
117
-
118
- if found_lp_obj is None: #Not found
119
- found_lp_obj = ClinguisticProcessors()
120
- found_lp_obj.set_layer(layer)
121
- self.add_linguistic_processors(found_lp_obj)
122
-
123
- found_lp_obj.add_linguistic_processor(my_lp)
124
-
125
-
126
-
127
-