aac-metrics 0.1.1 → 0.1.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/aac-metrics/loader.rb +75 -11
- data/lib/aac-metrics/metrics.rb +196 -26
- data/lib/aac-metrics.rb +1 -1
- data/sets/common_words.en.json +1507 -2837
- data/sets/l84f-e9fafa55d4.common.en.obfset +79170 -0
- data/sets/sentences.en.json +31 -0
- data/sets/synonyms.en.json +14 -2
- data/sets/wp108-c428d7f2dc.en.obfset +53075 -0
- data/sets/wp80-dad3aeda5e.common.en.obfset +47115 -0
- metadata +7 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 5cfa37f885dec40c12ac7cbb86c44ac49ceec1138866b3de2e52b84facdddb87
|
4
|
+
data.tar.gz: e29f98ec3c9aa3840a513787003a5db196dd09b81699c3a81b252439929e4dcd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3e54f361e67a70f8c68f85469ffcc0c9da6c8269928b707b2e6e4b5c353e5bc19f64de3d93d50c7c37304822924bf8ca862e35e3f6a4ed692ad2781c444403bb
|
7
|
+
data.tar.gz: c12274971262bf327f9d2e6c29286bd1147fed6e5cf3d7a022a869d9810669e796fff20bfabcf13b3e973984a480a936e33468c4d4b2b7ad8f82737bf4b1b33d
|
data/lib/aac-metrics/loader.rb
CHANGED
@@ -78,6 +78,7 @@ module AACMetrics::Loader
|
|
78
78
|
words_path = File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'sets', "base_words"))
|
79
79
|
words = nil
|
80
80
|
do_ingest = true
|
81
|
+
relations_hash = {}
|
81
82
|
|
82
83
|
while paths.length > 0
|
83
84
|
path = paths.shift
|
@@ -130,17 +131,7 @@ module AACMetrics::Loader
|
|
130
131
|
"id" => "btn#{btn_idx}",
|
131
132
|
"label" => (btn['vocalization'] || '').length > 0 ? btn['vocalization'] : btn['label']
|
132
133
|
}
|
133
|
-
|
134
|
-
str = new_btn['label'].downcase.sub(/^\s+/, '').sub(/\s+$/, '')
|
135
|
-
if str.scan(/\s+/).length < 2
|
136
|
-
word_hash = Digest::MD5.hexdigest(str)[0, 10]
|
137
|
-
raise "collision!" if words[word_hash] && words[word_hash] != str
|
138
|
-
if add_words || words[word_hash]
|
139
|
-
words[word_hash] = str
|
140
|
-
new_btn['label'] = "$#{word_hash}"
|
141
|
-
end
|
142
|
-
end
|
143
|
-
end
|
134
|
+
# record load_board reference
|
144
135
|
btn_idx += 1
|
145
136
|
if btn['load_board']
|
146
137
|
if btn['load_board']['path']
|
@@ -167,6 +158,36 @@ module AACMetrics::Loader
|
|
167
158
|
# treat action buttons for metrics
|
168
159
|
new_btn = nil
|
169
160
|
end
|
161
|
+
# temporarily save semantic_id and possible clone_id for later use
|
162
|
+
# 1. Buttons in the same location with the same
|
163
|
+
# semantic_id should be marked in the obfset as having
|
164
|
+
# the same semantic_id
|
165
|
+
# 2. Buttons in the same location with the same label & voc
|
166
|
+
# and same load_board setting
|
167
|
+
# should be marked in the obfset as having the same clone_id
|
168
|
+
ref = "#{new_json['grid']['rows']}x#{new_json['grid']['columns']}-#{row_ids}.#{col_id}"
|
169
|
+
if btn['semantic_id']
|
170
|
+
relations_hash["s#{ref}-#{btn['semantic_id']}"] ||= []
|
171
|
+
relations_hash["s#{ref}-#{btn['semantic_id']}"] << [new_json['id'], new_btn['id']]
|
172
|
+
end
|
173
|
+
if new_btn['label']
|
174
|
+
# TODO: currently doesn't enforce same-location on links, just whether it's a linked button or not
|
175
|
+
pre = new_btn['load_board'] ? 'cl' : 'c'
|
176
|
+
relations_hash["#{pre}#{ref}-#{new_btn['label']}"] ||= []
|
177
|
+
relations_hash["#{pre}#{ref}-#{new_btn['label']}"] ||= [new_json['id'], new_btn['id']]
|
178
|
+
end
|
179
|
+
if do_ingest && new_btn['label']
|
180
|
+
str = new_btn['label'].downcase.sub(/^\s+/, '').sub(/\s+$/, '')
|
181
|
+
if str.scan(/\s+/).length < 2
|
182
|
+
word_hash = Digest::MD5.hexdigest(str)[0, 10]
|
183
|
+
raise "collision!" if words[word_hash] && words[word_hash] != str
|
184
|
+
if add_words || words[word_hash]
|
185
|
+
words[word_hash] = str
|
186
|
+
new_btn['label'] = "$#{word_hash}"
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
170
191
|
end
|
171
192
|
new_row.push(new_btn ? new_btn['id'] : nil)
|
172
193
|
new_json['buttons'].push(new_btn) if new_btn
|
@@ -176,6 +197,29 @@ module AACMetrics::Loader
|
|
176
197
|
boards << new_json
|
177
198
|
end
|
178
199
|
end
|
200
|
+
# any semantic_id or clone_id repeats must be recorded
|
201
|
+
relations_hash.each do |id, btns|
|
202
|
+
if btns && btns.length > 0
|
203
|
+
btns.each do |brd_id, btn_id|
|
204
|
+
brd = boards.detect{|b| b['id'] == brd_id }
|
205
|
+
if brd && brd['buttons']
|
206
|
+
btn = brd['buttons'].detect{|b| b['id'] == btn_id }
|
207
|
+
if btn
|
208
|
+
if id.match(/^s/)
|
209
|
+
btn['semantic_id'] = id
|
210
|
+
brd['semantic_ids'] ||= []
|
211
|
+
brd['semantic_ids'] << id
|
212
|
+
elsif id.match(/^c/)
|
213
|
+
btn['clone_id'] = id
|
214
|
+
brd['clone_ids'] ||= []
|
215
|
+
brd['clone_ids'] << id
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
end
|
220
|
+
#
|
221
|
+
end
|
222
|
+
end
|
179
223
|
boards.each do |brd|
|
180
224
|
brd['buttons'].each do |btn|
|
181
225
|
if btn['load_board'] && btn['load_board']['tmp_path']
|
@@ -184,9 +228,21 @@ module AACMetrics::Loader
|
|
184
228
|
end
|
185
229
|
end
|
186
230
|
end
|
231
|
+
# TODO: record whether the board set is expected to have auto-home
|
187
232
|
{boards: boards, words: words, words_path: words_path}
|
188
233
|
end
|
189
234
|
|
235
|
+
# TODO: Qualitative assessments of common vocabularies,
|
236
|
+
# gather perspectives on what makes a "good" vocabulary
|
237
|
+
# and collect reviews from field experts, also free
|
238
|
+
# response sections.
|
239
|
+
# Some criteria:
|
240
|
+
# - works well for age group X, Y, Z
|
241
|
+
# - works well for a beginning communicator
|
242
|
+
# - allows long-term growth as-is
|
243
|
+
# - comprehensive core
|
244
|
+
# -
|
245
|
+
|
190
246
|
def self.ingest(fn, token=nil)
|
191
247
|
output = nil
|
192
248
|
boards = nil
|
@@ -292,6 +348,14 @@ module AACMetrics::Loader
|
|
292
348
|
@@synonyms[locale] = res
|
293
349
|
end
|
294
350
|
|
351
|
+
def self.sentences(locale)
|
352
|
+
@@sentences ||= {}
|
353
|
+
return @@sentences[locale] if @@sentences[locale]
|
354
|
+
locale = locale.split(/-|_/)[0]
|
355
|
+
path = File.expand_path(File.join(File.dirname(__FILE__), '..', '..', 'sets', "sentences.#{locale}.json"))
|
356
|
+
res = JSON.parse(File.read(path))
|
357
|
+
@@sentences[locale] = res
|
358
|
+
end
|
295
359
|
|
296
360
|
def self.base_words(locale)
|
297
361
|
@@base_words ||= {}
|
data/lib/aac-metrics/metrics.rb
CHANGED
@@ -2,31 +2,68 @@
|
|
2
2
|
# Scores for average effort level for word sets (spelling if that's th only way)
|
3
3
|
# Effort scores for sentence corpus
|
4
4
|
# Effort algorithms for scanning/eyes
|
5
|
-
|
6
5
|
module AACMetrics::Metrics
|
7
|
-
|
6
|
+
# TODO:
|
7
|
+
# 1. When navigating from one board to the next, grid locations
|
8
|
+
# with the same clone_id or semantic_id should result in a
|
9
|
+
# discount to overall search based more on the number of
|
10
|
+
# uncloned/unsemantic buttons than the number of total buttons
|
11
|
+
# (perhaps also factoring in the percent of board with that
|
12
|
+
# id present in the full board set)
|
13
|
+
# 2. When selecting a button with a semantic_id or clone_id,
|
14
|
+
# a discount to both search and selection should
|
15
|
+
# be applied based on the percent of boards that
|
16
|
+
# contain the same id at that grid location
|
17
|
+
# 3.5 When selecting a button with a semantic_id or clone_id,
|
18
|
+
# if the same id was present on the previous board,
|
19
|
+
# an additional discount to search and selection should be applied
|
20
|
+
def self.analyze(obfset, output=true)
|
8
21
|
locale = nil
|
9
22
|
buttons = []
|
23
|
+
refs = {}
|
10
24
|
total_boards = 1
|
11
|
-
|
25
|
+
|
12
26
|
if obfset.is_a?(Hash) && obfset['buttons']
|
13
27
|
locale = obfset['locale'] || 'en'
|
28
|
+
refs = obfset['reference_counts']
|
14
29
|
buttons = []
|
15
30
|
obfset['buttons'].each do |btn|
|
16
31
|
buttons << {
|
17
32
|
id: btn['id'],
|
18
33
|
label: btn['label'],
|
19
34
|
level: btn['level'],
|
20
|
-
effort: btn['effort']
|
35
|
+
effort: btn['effort'],
|
36
|
+
semantic_id: btn['semantic_id'],
|
37
|
+
clone_id: btn['clone_id']
|
21
38
|
}
|
22
39
|
end
|
23
40
|
total_boards = obfset['total_boards']
|
24
41
|
else
|
25
42
|
visited_board_ids = {}
|
26
43
|
to_visit = [{board: obfset[0], level: 0, entry_x: 1.0, entry_y: 1.0}]
|
44
|
+
refs = {}
|
45
|
+
obfset.each do |board|
|
46
|
+
# determine frequency within the board set
|
47
|
+
# for each semantic_id and clone_id
|
48
|
+
if board['clone_ids']
|
49
|
+
boards['clone_ids'].each do |id|
|
50
|
+
refs[id] ||= 0
|
51
|
+
refs[id] += 1
|
52
|
+
end
|
53
|
+
end
|
54
|
+
if board['semantic_ids']
|
55
|
+
boards['semantic_ids'].each do |id|
|
56
|
+
refs[id] ||= 0
|
57
|
+
refs[id] += 1
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
pcts = {}
|
62
|
+
refs.each do |id, cnt|
|
63
|
+
pcts[id] = cnt.to_f / obfset.length.to_f
|
64
|
+
end
|
27
65
|
locale = obfset[0]['locale']
|
28
66
|
known_buttons = {}
|
29
|
-
sqrt2 = Math.sqrt(2)
|
30
67
|
while to_visit.length > 0
|
31
68
|
board = to_visit.shift
|
32
69
|
visited_board_ids[board[:board]['id']] = board[:level]
|
@@ -35,32 +72,63 @@ module AACMetrics::Metrics
|
|
35
72
|
btn_width = 1.0 / board[:board]['grid']['columns'].to_f
|
36
73
|
board_effort = 0
|
37
74
|
# add effort for level of complexity when new board is rendered
|
38
|
-
|
75
|
+
button_size = button_size_effort(board[:board]['grid']['rows'], board[:board]['grid']['columns'])
|
76
|
+
board_effort += button_size
|
39
77
|
# add effort for number of visible buttons
|
40
|
-
|
78
|
+
field_size = field_size_effort(board[:board]['grid']['order'].flatten.length)
|
79
|
+
board_effort += field_size
|
80
|
+
# decrease effort here for every button on the board
|
81
|
+
# whose semantic_id or clone_id is repeated in the board set
|
82
|
+
# -0.0025 (* pct of matching boards) for semantic_id
|
83
|
+
# -0.005 (* pct of matching boards) for clone_id
|
84
|
+
board[:board]['grid']['rows'].times do |row_idx|
|
85
|
+
board[:board]['grid']['columns'].times do |col_idx|
|
86
|
+
button_id = (board[:board]['grid']['order'][row_idx] || [])[col_idx]
|
87
|
+
button = board[:board]['buttons'].detect{|b| b['id'] == button_id }
|
88
|
+
if button && button['clone_id'] && pcts[button['clone_id']]
|
89
|
+
board_effort -= 0.005 * pcts[button['clone_id']]
|
90
|
+
elsif button && button['semantic_id'] && pcts[button['semantic_id']]
|
91
|
+
board_effort -= 0.0025 * pcts[button['semantic_id']]
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
41
96
|
prior_buttons = 0
|
42
97
|
|
43
98
|
board[:board]['grid']['rows'].times do |row_idx|
|
44
99
|
board[:board]['grid']['columns'].times do |col_idx|
|
45
100
|
button_id = (board[:board]['grid']['order'][row_idx] || [])[col_idx]
|
46
101
|
button = board[:board]['buttons'].detect{|b| b['id'] == button_id }
|
47
|
-
prior_buttons += 0.1 if !button
|
102
|
+
# prior_buttons += 0.1 if !button
|
48
103
|
next unless button
|
49
104
|
x = (btn_width / 2) + (btn_width * col_idx)
|
50
105
|
y = (btn_height / 2) + (btn_height * row_idx)
|
51
106
|
# prior_buttons = (row_idx * board[:board]['grid']['columns']) + col_idx
|
52
107
|
effort = 0
|
108
|
+
# TODO: additional discount on board search effort
|
109
|
+
# if this button's semantic_id or clone_id
|
110
|
+
# was also present on the prior board
|
111
|
+
# board_effort * 0.5 for semantic_id
|
112
|
+
# board_effort * 0.33 for clone_id
|
53
113
|
effort += board_effort
|
54
114
|
# add effort for percent distance from entry point
|
55
|
-
distance =
|
115
|
+
distance = distance_effort(x, y, board[:entry_x], board[:entry_y])
|
116
|
+
# TODO: decrease effective distance if the semantic_id or clone_id:
|
117
|
+
# - are used on other boards in the set (semi)
|
118
|
+
# distance * 0.5 (* pct of matching boards) for semantic_id
|
119
|
+
# distance * 0.33 (* pct of matching boards) for clone_id
|
120
|
+
# - was also present on the prior board (total)
|
121
|
+
# distance * 0.5 for semantic_id
|
122
|
+
# distance * 0.33 for clone_id
|
56
123
|
effort += distance
|
57
|
-
if distance >
|
124
|
+
if distance > DISTANCE_THRESHOLD_TO_SKIP_VISUAL_SCAN || (board[:entry_x] == 1.0 && board[:entry_y] == 1.0)
|
58
125
|
# add small effort for every prior (visible) button when visually scanning
|
59
|
-
|
126
|
+
visual_scan = visual_scan_effort(prior_buttons)
|
127
|
+
effort += visual_scan
|
60
128
|
else
|
61
129
|
# ..unless it's right by the previous button, then
|
62
130
|
# add tiny effort for local scan
|
63
|
-
effort += distance *
|
131
|
+
effort += distance * SKIPPED_VISUAL_SCAN_DISTANCE_MULTIPLIER
|
64
132
|
end
|
65
133
|
# add cumulative effort from previous sequence
|
66
134
|
effort += board[:prior_effort] || 0
|
@@ -81,11 +149,12 @@ module AACMetrics::Metrics
|
|
81
149
|
end
|
82
150
|
if try_visit
|
83
151
|
next_board = obfset.detect{|brd| brd['id'] == button['load_board']['id'] }
|
152
|
+
puts "LIKE[] #{effort}" if button['label'] == 'like'
|
84
153
|
if next_board
|
85
154
|
to_visit.push({
|
86
155
|
board: next_board,
|
87
156
|
level: board[:level] + 1,
|
88
|
-
prior_effort: effort +
|
157
|
+
prior_effort: effort + BOARD_CHANGE_PROCESSING_EFFORT,
|
89
158
|
entry_x: x,
|
90
159
|
entry_y: y
|
91
160
|
})
|
@@ -94,7 +163,8 @@ module AACMetrics::Metrics
|
|
94
163
|
else
|
95
164
|
word = button['label']
|
96
165
|
existing = known_buttons[word]
|
97
|
-
if !existing || board[:level] < existing[:level]
|
166
|
+
if !existing || existing[:effort] < effort #board[:level] < existing[:level]
|
167
|
+
puts "LIKE #{effort}" if button['label'] == 'like'
|
98
168
|
known_buttons[word] = {
|
99
169
|
id: "#{button['id']}::#{board[:board]['id']}",
|
100
170
|
label: word,
|
@@ -120,11 +190,41 @@ module AACMetrics::Metrics
|
|
120
190
|
locale: locale,
|
121
191
|
total_boards: total_boards,
|
122
192
|
total_buttons: buttons.length,
|
193
|
+
reference_counts: refs,
|
123
194
|
buttons: buttons,
|
124
195
|
levels: clusters
|
125
196
|
}
|
126
197
|
end
|
127
198
|
|
199
|
+
SQRT2 = Math.sqrt(2)
|
200
|
+
BUTTON_SIZE_MULTIPLIER = 0.09
|
201
|
+
FIELD_SIZE_MULTIPLIER = 0.017
|
202
|
+
VISUAL_SCAN_MULTIPLIER = 0.02
|
203
|
+
BOARD_CHANGE_PROCESSING_EFFORT = 1.0
|
204
|
+
DISTANCE_MULTIPLIER = 0.5
|
205
|
+
DISTANCE_THRESHOLD_TO_SKIP_VISUAL_SCAN = 0.1
|
206
|
+
SKIPPED_VISUAL_SCAN_DISTANCE_MULTIPLIER = 0.5
|
207
|
+
|
208
|
+
def self.button_size_effort(rows, cols)
|
209
|
+
BUTTON_SIZE_MULTIPLIER * (rows + cols) / 2
|
210
|
+
end
|
211
|
+
|
212
|
+
def self.field_size_effort(button_count)
|
213
|
+
FIELD_SIZE_MULTIPLIER * button_count
|
214
|
+
end
|
215
|
+
|
216
|
+
def self.visual_scan_effort(prior_buttons)
|
217
|
+
prior_buttons * VISUAL_SCAN_MULTIPLIER
|
218
|
+
end
|
219
|
+
|
220
|
+
def self.distance_effort(x, y, entry_x, entry_y)
|
221
|
+
Math.sqrt((x - entry_x) ** 2 + (y - entry_y) ** 2) / SQRT2 * DISTANCE_MULTIPLIER
|
222
|
+
end
|
223
|
+
|
224
|
+
def self.spelling_effort(word)
|
225
|
+
10 + (word.length * 2.5)
|
226
|
+
end
|
227
|
+
|
128
228
|
def self.analyze_and_compare(obfset, compset)
|
129
229
|
target = AACMetrics::Metrics.analyze(obfset, false)
|
130
230
|
res = {}.merge(target)
|
@@ -142,34 +242,42 @@ module AACMetrics::Metrics
|
|
142
242
|
comp_efforts[btn[:label]] = btn[:effort]
|
143
243
|
end
|
144
244
|
|
145
|
-
|
245
|
+
sortable_efforts = {}
|
146
246
|
target_efforts = {}
|
147
247
|
target_words = []
|
248
|
+
# Track effort scores for each button in the set,
|
249
|
+
# used to sort and for assessing priority
|
250
|
+
# TODO: keep a list of expected effort scores for
|
251
|
+
# very frequent core words and use that when available
|
148
252
|
res[:buttons].each{|b|
|
149
253
|
target_words << b[:label]
|
150
254
|
target_efforts[b[:label]] = b[:effort]
|
151
|
-
|
255
|
+
sortable_efforts[b[:label]] = b[:effort]
|
152
256
|
comp = compare_buttons[b[:label]]
|
153
257
|
if comp
|
154
258
|
b[:comp_level] = comp[:level]
|
155
259
|
b[:comp_effort] = comp[:effort]
|
156
260
|
end
|
157
261
|
}
|
262
|
+
# Effort scores are the mean of thw scores from the
|
263
|
+
# two sets, or just a singular value if in only one set
|
158
264
|
compare[:buttons].each{|b|
|
159
|
-
if
|
160
|
-
|
161
|
-
|
265
|
+
if sortable_efforts[b[:label]]
|
266
|
+
sortable_efforts[b[:label]] += b[:effort]
|
267
|
+
sortable_efforts[b[:label]] /= 2
|
162
268
|
else
|
163
|
-
|
269
|
+
sortable_efforts[b[:label]] ||= b[:effort]
|
164
270
|
end
|
165
271
|
}
|
166
272
|
|
167
273
|
core_lists = AACMetrics::Loader.core_lists(target[:locale])
|
168
274
|
common_words_obj = AACMetrics::Loader.common_words(target[:locale])
|
169
275
|
synonyms = AACMetrics::Loader.synonyms(target[:locale])
|
170
|
-
|
276
|
+
sentences = AACMetrics::Loader.sentences(target[:locale])
|
277
|
+
common_words_obj['efforts'].each{|w, e| sortable_efforts[w] ||= e }
|
171
278
|
common_words = common_words_obj['words']
|
172
279
|
|
280
|
+
# Track which words are significantly harder or easier than expected
|
173
281
|
too_easy = []
|
174
282
|
too_hard = []
|
175
283
|
target[:buttons].each do |btn|
|
@@ -183,11 +291,11 @@ module AACMetrics::Metrics
|
|
183
291
|
end
|
184
292
|
|
185
293
|
|
186
|
-
missing = (compare_words - target_words).sort_by{|w|
|
294
|
+
missing = (compare_words - target_words).sort_by{|w| sortable_efforts[w] }
|
187
295
|
missing = missing.select do |word|
|
188
296
|
!synonyms[word] || (synonyms[word] & target_words).length == 0
|
189
297
|
end
|
190
|
-
extras = (target_words - compare_words).sort_by{|w|
|
298
|
+
extras = (target_words - compare_words).sort_by{|w| sortable_efforts[w] }
|
191
299
|
extras = extras.select do |word|
|
192
300
|
!synonyms[word] || (synonyms[word] & compare_words).length == 0
|
193
301
|
end
|
@@ -235,28 +343,42 @@ module AACMetrics::Metrics
|
|
235
343
|
res[:cores] = {
|
236
344
|
:common => {name: "Common Word List", list: common_words, average_effort: common_effort, comp_effort: comp_effort}
|
237
345
|
}
|
346
|
+
target_effort_tally = 0.0
|
347
|
+
comp_effort_tally = 0.0
|
348
|
+
# For each core list, find any missing words, and compute
|
349
|
+
# the average level of effort for all words in the set,
|
350
|
+
# using a fallback effort metric if the word isn't in the
|
351
|
+
# board set
|
238
352
|
# puts missing.join(' ')
|
239
353
|
core_lists.each do |list|
|
240
|
-
puts list['id']
|
241
354
|
missing = []
|
355
|
+
comp_missing = []
|
242
356
|
list_effort = 0
|
243
357
|
comp_effort = 0
|
244
358
|
list['words'].each do |word|
|
245
359
|
words = [word] + (synonyms[word] || [])
|
360
|
+
# Check if any words from the core list are missing in the set
|
246
361
|
if (target_words & words).length == 0
|
247
362
|
missing << word
|
248
363
|
end
|
364
|
+
if (compare_words & words).length == 0
|
365
|
+
comp_missing << word
|
366
|
+
end
|
367
|
+
|
368
|
+
# Calculate the effort for the target and comp sets
|
249
369
|
effort = target_efforts[word]
|
250
370
|
if !effort
|
251
371
|
words.each{|w| effort ||= target_efforts[w] }
|
252
372
|
end
|
253
|
-
|
373
|
+
# Fallback penalty for missing word
|
374
|
+
effort ||= spelling_effort(word)
|
254
375
|
list_effort += effort
|
376
|
+
|
255
377
|
effort = comp_efforts[word]
|
256
378
|
if !effort
|
257
379
|
words.each{|w| effort ||= comp_efforts[w] }
|
258
380
|
end
|
259
|
-
effort ||=
|
381
|
+
effort ||= spelling_effort(word)
|
260
382
|
comp_effort += effort
|
261
383
|
end
|
262
384
|
if missing.length > 0
|
@@ -266,8 +388,56 @@ module AACMetrics::Metrics
|
|
266
388
|
end
|
267
389
|
list_effort = list_effort.to_f / list['words'].length.to_f
|
268
390
|
comp_effort = comp_effort.to_f / list['words'].length.to_f
|
391
|
+
target_effort_tally += list_effort
|
392
|
+
comp_effort_tally += comp_effort
|
269
393
|
res[:cores][list['id']] = {name: list['name'], list: list['words'], average_effort: list_effort, comp_effort: comp_effort}
|
270
394
|
end
|
395
|
+
target_effort_tally = (target_effort_tally / core_lists.to_a.length) * 5.0
|
396
|
+
|
397
|
+
comp_effort_tally = (comp_effort_tally / core_lists.to_a.length) * 5.0
|
398
|
+
|
399
|
+
# TODO: Assemble or allow a battery of word combinations,
|
400
|
+
# and calculate the level of effort for each sequence,
|
401
|
+
# as well as an average level of effort across combinations.
|
402
|
+
res[:sentences] = []
|
403
|
+
sentences.each do |words|
|
404
|
+
puts " #{words.join(' ')}"
|
405
|
+
BOARD_CHANGE_PROCESSING_EFFORT
|
406
|
+
target_effort_score = 0.0
|
407
|
+
comp_effort_score = 0.0
|
408
|
+
words.each_with_index do |word, idx|
|
409
|
+
synonym_words = [word] + (synonyms[word] || [])
|
410
|
+
effort = target_efforts[word] || target_efforts[word.downcase]
|
411
|
+
if !effort
|
412
|
+
synonym_words.each{|w| effort ||= target_efforts[w] }
|
413
|
+
end
|
414
|
+
effort ||= spelling_effort(word)
|
415
|
+
effort += (idx == 0) ? 0.0 : BOARD_CHANGE_PROCESSING_EFFORT
|
416
|
+
ee = effort
|
417
|
+
target_effort_score += effort
|
418
|
+
|
419
|
+
effort = comp_efforts[word] || comp_efforts[word.downcase]
|
420
|
+
if !effort
|
421
|
+
synonym_words.each{|w| effort ||= comp_efforts[w] }
|
422
|
+
end
|
423
|
+
effort ||= spelling_effort(word)
|
424
|
+
effort += (idx == 0) ? 0.0 : BOARD_CHANGE_PROCESSING_EFFORT
|
425
|
+
comp_effort_score += effort
|
426
|
+
puts " #{word} #{ee} #{effort}"
|
427
|
+
end
|
428
|
+
target_effort_score = target_effort_score / words.length
|
429
|
+
comp_effort_score = comp_effort_score / words.length
|
430
|
+
res[:sentences] << {sentence: words.join(' '), words: words, effort: target_effort_score, comp_effort: comp_effort_score}
|
431
|
+
end
|
432
|
+
target_effort_tally += res[:sentences].map{|s| s[:effort] }.sum.to_f / res[:sentences].length.to_f * 3.0
|
433
|
+
comp_effort_tally += res[:sentences].map{|s| s[:comp_effort] }.sum.to_f / res[:sentences].length.to_f * 3.0
|
434
|
+
target_effort_tally += 100 # placeholder value for future added calculations
|
435
|
+
comp_effort_tally += 100
|
436
|
+
|
437
|
+
|
438
|
+
|
439
|
+
res[:target_effort_score] = target_effort_tally
|
440
|
+
res[:comp_effort_score] = comp_effort_tally
|
271
441
|
# puts "CONSIDER MAKING EASIER"
|
272
442
|
res[:high_effort_words] = too_hard
|
273
443
|
# puts too_hard.join(' ')
|
data/lib/aac-metrics.rb
CHANGED
@@ -8,7 +8,7 @@
|
|
8
8
|
# Are grammar inflections available?
|
9
9
|
# Does this support auto-home, or other motor planning supports?
|
10
10
|
# Are other language options available?
|
11
|
-
# How is
|
11
|
+
# How is the vocabulary organized?
|
12
12
|
# What platforms are supported?
|
13
13
|
# Access to keyboard w/ prediction? Numbers?
|
14
14
|
# How easy is it to say these personalized sentences: _____
|