proiel-cli 1.2.1 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/README.md +11 -3
- data/bin/proiel +1 -1
- data/lib/proiel/cli/commands/build.rb +91 -0
- data/lib/proiel/cli/commands/convert.rb +7 -2
- data/lib/proiel/cli/commands/dictionary.rb +46 -0
- data/lib/proiel/cli/commands/info.rb +1 -1
- data/lib/proiel/cli/commands/shell.rb +34 -0
- data/lib/proiel/cli/commands/tokenize.rb +2 -2
- data/lib/proiel/cli/commands/validate.rb +1 -1
- data/lib/proiel/cli/commands/visualize.rb +14 -11
- data/lib/proiel/cli/converters/conll-u/morphology.rb +162 -72
- data/lib/proiel/cli/converters/conll-u/syntax.rb +108 -62
- data/lib/proiel/cli/converters/conll-u.rb +648 -548
- data/lib/proiel/cli/converters/conll-x.rb +67 -52
- data/lib/proiel/cli/converters/lexc.rb +21 -23
- data/lib/proiel/cli/converters/proielxml.rb +173 -132
- data/lib/proiel/cli/converters/text.rb +69 -71
- data/lib/proiel/cli/converters/tiger.rb +110 -114
- data/lib/proiel/cli/converters/tiger2.rb +139 -141
- data/lib/proiel/cli/converters/tnt.rb +19 -15
- data/lib/proiel/cli/version.rb +1 -1
- data/lib/proiel/cli.rb +26 -1
- metadata +43 -58
- data/bin/setup +0 -8
- data/contrib/proiel-tnt-train +0 -15
- data/lib/proiel/cli/commands.rb +0 -28
|
@@ -1,64 +1,79 @@
|
|
|
1
|
-
module PROIEL
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
1
|
+
module PROIEL::Converter
|
|
2
|
+
# Converter that outputs the CoNLL-X format as described on
|
|
3
|
+
# http://ilk.uvt.nl/conll/#dataformat.
|
|
4
|
+
#
|
|
5
|
+
# The conversion removes empty tokens. PRO tokens are completely ignored,
|
|
6
|
+
# while null C and null V tokens are eliminated by attaching their
|
|
7
|
+
# dependents to the first non-null ancestor and labelling them with a
|
|
8
|
+
# concatenation of dependency relations.
|
|
9
|
+
#
|
|
10
|
+
# Sequences of whitespace in forms and lemmas are represented by '.'.
|
|
11
|
+
class CoNLLX
|
|
12
|
+
class << self
|
|
13
|
+
def process(tb, _)
|
|
14
|
+
tb.sources.each do |source|
|
|
15
|
+
source.sentences.each do |sentence|
|
|
16
|
+
process_sentence(tb, sentence)
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
end
|
|
19
20
|
|
|
20
|
-
|
|
21
|
+
def process_sentence(tb, sentence)
|
|
22
|
+
tokens = sentence.tokens
|
|
21
23
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
pos_full = token.part_of_speech
|
|
30
|
-
morphology = format_morphology(token)
|
|
24
|
+
# Generate 1-based continguous numbering of overt tokens with
|
|
25
|
+
# null V and null C tokens appended at the end. We do this
|
|
26
|
+
# manually to ensure that the numbering is correct whatever the
|
|
27
|
+
# sequence is in the treebank.
|
|
28
|
+
id_map = Hash.new { |h, k| h[k] = h.keys.length + 1 }
|
|
29
|
+
tokens.select(&:has_content?).each { |t| id_map[t] } # these blocks have side-effects
|
|
30
|
+
tokens.reject(&:has_content?).reject(&:pro?).each { |t| id_map[t] }
|
|
31
31
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
32
|
+
# Iterate overt tokens and print one formatted line per token.
|
|
33
|
+
tokens.select(&:has_content?).each do |token|
|
|
34
|
+
this_number = id_map[token]
|
|
35
|
+
head_number, relation = find_lexical_head_and_relation(id_map, tb, token)
|
|
36
|
+
form = format_text(token.form)
|
|
37
|
+
lemma = format_text(token.lemma)
|
|
38
|
+
pos_major, pos_full = format_pos(token)
|
|
39
|
+
morphology = format_morphology(token)
|
|
36
40
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
end
|
|
40
|
-
end
|
|
41
|
+
puts [this_number, form, lemma, pos_major, pos_full,
|
|
42
|
+
morphology, head_number, relation, '_', '_'].join("\t")
|
|
41
43
|
end
|
|
42
44
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
if k == :inflection and v =='i'
|
|
47
|
-
nil
|
|
48
|
-
else
|
|
49
|
-
"#{k.upcase[0..3]}#{v}"
|
|
50
|
-
end
|
|
51
|
-
end.compact.join('|')
|
|
52
|
-
end
|
|
45
|
+
# Separate sentences by an empty line.
|
|
46
|
+
puts
|
|
47
|
+
end
|
|
53
48
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
49
|
+
def format_text(s)
|
|
50
|
+
s.gsub(/[[:space:]]+/, '.')
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def format_pos(token)
|
|
54
|
+
[token.part_of_speech_hash[:major], token.part_of_speech]
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def format_morphology(token)
|
|
58
|
+
token.morphology_hash.map do |k, v|
|
|
59
|
+
# Remove inflection tag except when set to inflecting
|
|
60
|
+
if k == :inflection and v =='i'
|
|
61
|
+
nil
|
|
59
62
|
else
|
|
60
|
-
|
|
63
|
+
"#{k.upcase[0..3]}#{v}"
|
|
61
64
|
end
|
|
65
|
+
end.compact.join('|')
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def find_lexical_head_and_relation(id_map, tb, t, rel = '')
|
|
69
|
+
new_relation = rel + t.relation
|
|
70
|
+
|
|
71
|
+
if t.is_root?
|
|
72
|
+
[0, new_relation]
|
|
73
|
+
elsif t.head.has_content?
|
|
74
|
+
[id_map[t.head], new_relation]
|
|
75
|
+
else
|
|
76
|
+
find_lexical_head_and_relation(id_map, tb, t.head, "#{new_relation}(#{id_map[t.head]})")
|
|
62
77
|
end
|
|
63
78
|
end
|
|
64
79
|
end
|
|
@@ -1,33 +1,31 @@
|
|
|
1
|
-
module PROIEL
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
class
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
lexicon = {}
|
|
1
|
+
module PROIEL::Converter
|
|
2
|
+
# Converter that outputs a lexc file with part of speech and morphology.
|
|
3
|
+
class Lexc
|
|
4
|
+
class << self
|
|
5
|
+
def process(tb, options)
|
|
6
|
+
lexicon = {}
|
|
8
7
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
end
|
|
8
|
+
tb.sources.each do |source|
|
|
9
|
+
source.divs.each do |div|
|
|
10
|
+
div.sentences.each do |sentence|
|
|
11
|
+
sentence.tokens.each do |token|
|
|
12
|
+
unless token.is_empty?
|
|
13
|
+
lexicon[token.form] ||= []
|
|
14
|
+
if options['morphology']
|
|
15
|
+
lexicon[token.form] << [token.lemma, [token.part_of_speech, token.morphology].join].join(',')
|
|
16
|
+
else
|
|
17
|
+
lexicon[token.form] << [token.lemma, token.part_of_speech].join(',')
|
|
20
18
|
end
|
|
21
19
|
end
|
|
22
20
|
end
|
|
23
21
|
end
|
|
24
22
|
end
|
|
23
|
+
end
|
|
25
24
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
end
|
|
25
|
+
puts 'LEXICON Root'
|
|
26
|
+
lexicon.sort.each do |form, tags|
|
|
27
|
+
tags.sort.uniq.each do |tag|
|
|
28
|
+
puts ' %s:%s #;' % [tag, form]
|
|
31
29
|
end
|
|
32
30
|
end
|
|
33
31
|
end
|
|
@@ -1,168 +1,209 @@
|
|
|
1
|
-
module PROIEL
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
1
|
+
module PROIEL::Converter
|
|
2
|
+
# Converter that outputs PROIEL XML. This is primarily useful for filtering,
|
|
3
|
+
# merging or splitting PROIEL XML data. It is also useful for "upgrading"
|
|
4
|
+
# PROIEL XML to a new version or for testing round tripping of data.
|
|
5
|
+
class PROIELXML
|
|
6
|
+
class << self
|
|
7
|
+
def process(tb, options)
|
|
8
|
+
builder = Builder::XmlMarkup.new(target: STDOUT, indent: 2)
|
|
9
|
+
builder.instruct! :xml, version: '1.0', encoding: 'UTF-8'
|
|
10
|
+
builder.proiel('export-time' => DateTime.now.xmlschema, 'schema-version' => '2.1') do
|
|
11
|
+
builder.annotation do
|
|
12
|
+
builder.relations do
|
|
13
|
+
tb.annotation_schema.relation_tags.each do |tag, value|
|
|
14
|
+
attrs = { tag: tag }
|
|
15
|
+
attrs.merge!(grab_features(value, %i(summary primary secondary)))
|
|
16
|
+
builder.value(attrs)
|
|
16
17
|
end
|
|
18
|
+
end
|
|
17
19
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
end
|
|
20
|
+
builder.tag! 'parts-of-speech' do
|
|
21
|
+
tb.annotation_schema.part_of_speech_tags.each do |tag, value|
|
|
22
|
+
attrs = { tag: tag }
|
|
23
|
+
attrs.merge!(grab_features(value, %i(summary)))
|
|
24
|
+
builder.value(attrs)
|
|
24
25
|
end
|
|
26
|
+
end
|
|
25
27
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
end
|
|
28
|
+
builder.morphology do
|
|
29
|
+
tb.annotation_schema.morphology_tags.each do |cat_tag, cat_values|
|
|
30
|
+
builder.field(tag: cat_tag) do
|
|
31
|
+
cat_values.each do |tag, value|
|
|
32
|
+
attrs = { tag: tag }
|
|
33
|
+
attrs.merge!(grab_features(value, %i(summary)))
|
|
34
|
+
builder.value(attrs)
|
|
34
35
|
end
|
|
35
36
|
end
|
|
36
37
|
end
|
|
38
|
+
end
|
|
37
39
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
end
|
|
40
|
+
builder.tag! 'information-statuses' do
|
|
41
|
+
tb.annotation_schema.information_status_tags.each do |tag, value|
|
|
42
|
+
attrs = { tag: tag }
|
|
43
|
+
attrs.merge!(grab_features(value, %i(summary)))
|
|
44
|
+
builder.value(attrs)
|
|
44
45
|
end
|
|
45
46
|
end
|
|
47
|
+
end
|
|
46
48
|
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
optional_features = []
|
|
50
|
-
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
49
|
+
tb.sources.each do |source|
|
|
50
|
+
next if options['remove-unaligned-sources'] and source.alignment_id.nil?
|
|
51
51
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
end
|
|
52
|
+
mandatory_features = %i(id language)
|
|
53
|
+
optional_features = []
|
|
54
|
+
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
56
55
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
optional_features = [] # we do it this way to preserve the order of status and presentation_* so that diffing files is easier
|
|
73
|
-
optional_features += %i(status) unless options['remove-status']
|
|
74
|
-
optional_features += %i(presentation_before presentation_after)
|
|
75
|
-
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
76
|
-
optional_features += %i(annotated_at) unless options['remove-annotator']
|
|
77
|
-
optional_features += %i(reviewed_at) unless options['remove-reviewer']
|
|
78
|
-
optional_features += %i(annotated_by) unless options['remove-annotator']
|
|
79
|
-
optional_features += %i(reviewed_by) unless options['remove-reviewer']
|
|
80
|
-
|
|
81
|
-
builder.sentence(grab_features(sentence, mandatory_features, optional_features)) do
|
|
82
|
-
sentence.tokens.each do |token|
|
|
83
|
-
next if token.empty_token_sort == 'P' and options['remove-information-structure']
|
|
84
|
-
next if token.empty_token_sort == 'C' and options['remove-syntax']
|
|
85
|
-
next if token.empty_token_sort == 'V' and options['remove-syntax']
|
|
86
|
-
|
|
87
|
-
mandatory_features = %i(id)
|
|
88
|
-
|
|
89
|
-
optional_features = %i(citation_part)
|
|
90
|
-
optional_features += %i(lemma part_of_speech morphology) unless options['remove-morphology']
|
|
91
|
-
optional_features += %i(head_id relation) unless options['remove-syntax']
|
|
92
|
-
optional_features += %i(antecedent_id information_status contrast_group) unless options['remove-information-structure']
|
|
93
|
-
|
|
94
|
-
unless token.is_empty?
|
|
95
|
-
mandatory_features << :form
|
|
96
|
-
optional_features += %i(presentation_before presentation_after foreign_ids)
|
|
97
|
-
else
|
|
98
|
-
mandatory_features << :empty_token_sort
|
|
99
|
-
end
|
|
100
|
-
|
|
101
|
-
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
102
|
-
|
|
103
|
-
attrs = grab_features(token, mandatory_features, optional_features)
|
|
104
|
-
|
|
105
|
-
unless token.slashes.empty? or options['remove-syntax'] # this extra test avoids <token></token> style XML
|
|
106
|
-
builder.token(attrs) do
|
|
107
|
-
token.slashes.each do |relation, target_id|
|
|
108
|
-
builder.slash(:"target-id" => target_id, relation: relation)
|
|
109
|
-
end
|
|
110
|
-
end
|
|
111
|
-
else
|
|
112
|
-
unless options['remove-syntax'] and token.is_empty?
|
|
113
|
-
builder.token(attrs)
|
|
114
|
-
end
|
|
115
|
-
end
|
|
116
|
-
end
|
|
117
|
-
end
|
|
118
|
-
end
|
|
119
|
-
end
|
|
120
|
-
end
|
|
121
|
-
end
|
|
56
|
+
builder.source(grab_features(source, mandatory_features, optional_features)) do
|
|
57
|
+
PROIEL::Treebank::METADATA_ELEMENTS.each do |field|
|
|
58
|
+
builder.tag!(field.to_s.gsub('_', '-'), source.send(field)) if source.send(field)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
source.divs.each do |div|
|
|
62
|
+
if include_div?(div, options)
|
|
63
|
+
|
|
64
|
+
overrides = {
|
|
65
|
+
div: {},
|
|
66
|
+
sentence: {},
|
|
67
|
+
token: {}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
process_div(builder, tb, source, div, options, overrides)
|
|
122
71
|
end
|
|
123
72
|
end
|
|
124
73
|
end
|
|
125
74
|
end
|
|
126
75
|
end
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def include_div?(div, options)
|
|
79
|
+
if options['remove-empty-divs']
|
|
80
|
+
div.sentences.any? { |sentence| include_sentence?(sentence, options) }
|
|
81
|
+
else
|
|
82
|
+
true
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def include_sentence?(sentence, options)
|
|
87
|
+
case sentence.status
|
|
88
|
+
when :reviewed
|
|
89
|
+
not options['remove-reviewed'] and not options['remove-annotated']
|
|
90
|
+
when :annotated
|
|
91
|
+
not options['remove-not-reviewed'] and not options['remove-annotated']
|
|
92
|
+
else
|
|
93
|
+
not options['remove-not-reviewed'] and not options['remove-not-annotated']
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
def include_token?(token, options)
|
|
98
|
+
if options['remove-syntax'] and (token.empty_token_sort == 'C' or token.empty_token_sort == 'V')
|
|
99
|
+
false
|
|
100
|
+
elsif token.empty_token_sort == 'P' and options['remove-information-structure']
|
|
101
|
+
false
|
|
102
|
+
else
|
|
103
|
+
true
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def process_div(builder, tb, source, div, options, overrides)
|
|
108
|
+
mandatory_features = %i()
|
|
127
109
|
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
110
|
+
optional_features = []
|
|
111
|
+
optional_features += %i(presentation_before presentation_after)
|
|
112
|
+
optional_features += %i(id alignment_id) unless options['remove-alignments']
|
|
113
|
+
|
|
114
|
+
if options['infer-alignments'] and source.alignment_id
|
|
115
|
+
aligned_source = tb.find_source(source.alignment_id)
|
|
116
|
+
# FIXME: how to behave here? overwrite existing? what if nil? how to deal with multiple aligned divs?
|
|
117
|
+
overrides[:div][:alignment_id] = div.alignment_id || div.inferred_alignment(aligned_source).map(&:id).join(',')
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
builder.div(grab_features(div, mandatory_features, optional_features, overrides[:div])) do
|
|
121
|
+
builder.title div.title if div.title
|
|
122
|
+
|
|
123
|
+
div.sentences.select do |sentence|
|
|
124
|
+
include_sentence?(sentence, options)
|
|
125
|
+
end.each do |sentence|
|
|
126
|
+
process_sentence(builder, tb, sentence, options, overrides)
|
|
133
127
|
end
|
|
134
128
|
end
|
|
129
|
+
end
|
|
135
130
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
131
|
+
def process_sentence(builder, tb, sentence, options, overrides)
|
|
132
|
+
mandatory_features = %i(id)
|
|
133
|
+
|
|
134
|
+
optional_features = [] # we do it this way to preserve the order of status and presentation_* so that diffing files is easier
|
|
135
|
+
optional_features += %i(status) unless options['remove-status']
|
|
136
|
+
optional_features += %i(presentation_before presentation_after)
|
|
137
|
+
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
138
|
+
optional_features += %i(annotated_at) unless options['remove-annotator']
|
|
139
|
+
optional_features += %i(reviewed_at) unless options['remove-reviewer']
|
|
140
|
+
optional_features += %i(annotated_by) unless options['remove-annotator']
|
|
141
|
+
optional_features += %i(reviewed_by) unless options['remove-reviewer']
|
|
142
|
+
|
|
143
|
+
builder.sentence(grab_features(sentence, mandatory_features, optional_features)) do
|
|
144
|
+
sentence.tokens.select do |token|
|
|
145
|
+
include_token?(token, options)
|
|
146
|
+
end.each do |token|
|
|
147
|
+
process_token(builder, tb, token, options, overrides)
|
|
144
148
|
end
|
|
145
149
|
end
|
|
150
|
+
end
|
|
146
151
|
|
|
147
|
-
|
|
148
|
-
|
|
152
|
+
def process_token(builder, tb, token, options, overrides)
|
|
153
|
+
mandatory_features = %i(id)
|
|
149
154
|
|
|
150
|
-
|
|
151
|
-
|
|
155
|
+
optional_features = %i(citation_part)
|
|
156
|
+
optional_features += %i(lemma part_of_speech morphology) unless options['remove-morphology']
|
|
157
|
+
optional_features += %i(head_id relation) unless options['remove-syntax']
|
|
158
|
+
optional_features += %i(antecedent_id information_status contrast_group) unless options['remove-information-structure']
|
|
152
159
|
|
|
153
|
-
|
|
154
|
-
|
|
160
|
+
unless token.is_empty?
|
|
161
|
+
mandatory_features << :form
|
|
162
|
+
optional_features += %i(presentation_before presentation_after foreign_ids)
|
|
163
|
+
else
|
|
164
|
+
mandatory_features << :empty_token_sort
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
if options['remove-not-reviewed'] or options['remove-not-annotated'] or options['remove-annotated'] or options['remove-annotated']
|
|
168
|
+
overrides[:token][:antecedent_id] =
|
|
169
|
+
(token.antecedent_id and include_sentence?(tb.find_token(token.antecedent_id.to_i).sentence, options)) ? token.antecedent_id : nil
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
optional_features += %i(alignment_id) unless options['remove-alignments']
|
|
155
173
|
|
|
156
|
-
|
|
157
|
-
v = obj.send(f)
|
|
174
|
+
attrs = grab_features(token, mandatory_features, optional_features, overrides[:token])
|
|
158
175
|
|
|
159
|
-
|
|
160
|
-
|
|
176
|
+
unless token.slashes.empty? or options['remove-syntax'] # this extra test avoids <token></token> style XML
|
|
177
|
+
builder.token(attrs) do
|
|
178
|
+
token.slashes.each do |relation, target_id|
|
|
179
|
+
builder.slash(:"target-id" => target_id, relation: relation)
|
|
161
180
|
end
|
|
162
181
|
end
|
|
182
|
+
else
|
|
183
|
+
unless options['remove-syntax'] and token.is_empty?
|
|
184
|
+
builder.token(attrs)
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
end
|
|
163
188
|
|
|
164
|
-
|
|
189
|
+
def grab_features(obj, mandatory_features, optional_features = [], overrides = {})
|
|
190
|
+
attrs = {}
|
|
191
|
+
|
|
192
|
+
mandatory_features.each do |f|
|
|
193
|
+
v = overrides.key?(f) ? overrides[f] : obj.send(f)
|
|
194
|
+
|
|
195
|
+
attrs[f.to_s.gsub('_', '-')] = v
|
|
165
196
|
end
|
|
197
|
+
|
|
198
|
+
optional_features.each do |f|
|
|
199
|
+
v = overrides.key?(f) ? overrides[f] : obj.send(f)
|
|
200
|
+
|
|
201
|
+
if v and v.to_s != ''
|
|
202
|
+
attrs[f.to_s.gsub('_', '-')] = v
|
|
203
|
+
end
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
attrs
|
|
166
207
|
end
|
|
167
208
|
end
|
|
168
209
|
end
|