chat_correct 0.0.2 → 0.0.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: acde234f5e17dabeb5812da984bcf25e4e9cfc59
4
- data.tar.gz: 7f1522162906c9d89b576bdd5ce73e94cb8b2a77
3
+ metadata.gz: dfcb30b6d537455888810e34146a4207e5c768ac
4
+ data.tar.gz: 1a647ede3674ab79cd9eba2edaf84b3c09f1dd6b
5
5
  SHA512:
6
- metadata.gz: f381bf523dcc848cd1513214194518e4bd53309bbb1afaf3cf68ca058891c1b8373a377722c98b01d4dad82dd35b150a6deff1f861748753418e32e0b4465f5f
7
- data.tar.gz: b106388df4b51e0ef9b2c450126678c1735d08104247cd9922bf20d2b4e5159a4cb47d3762f8dc7bfa5256797090aae01a8576677a49546f723a3dea2d2d38e5
6
+ metadata.gz: 113770db887825354e1770fe5833f3b6b1ef63e82d7fcc188c40f4f8bace0460253aa524fa41979b152a477f1a7fda8d5a0949c7f4ac736a60bc0225249a3c09
7
+ data.tar.gz: b5b10196bb691bab5bbf3785ad5ffd9863c936b4dccfe3bea032348678332fd5de272ddfbf62041eef06407324eb51453920c04837b60a505f95f7b46571b95d
@@ -1,16 +1,14 @@
1
- require 'engtagger'
2
-
3
1
  module ChatCorrect
4
2
  class CombineMultiWordVerbs
5
3
  TOKEN_ARRAY = ['are', 'am', 'was', 'were', 'have', 'has', 'had', 'will', 'would', 'could', 'did', 'arenƪt', 'wasnƪt', 'werenƪt', 'havenƪt', 'hasnƪt', 'hadnƪt', 'wouldnƪt', 'couldnƪt', 'didnƪt']
6
4
  TOKEN_ARRAY_2 = ['are', 'am', 'was', 'were', 'have', 'has', 'had', 'will', 'would', 'did', 'could']
7
- attr_reader :text
8
- def initialize(text:)
5
+ attr_reader :text, :tgr
6
+ def initialize(text:, tgr:)
9
7
  @text = text
8
+ @tgr = tgr
10
9
  end
11
10
 
12
11
  def combine
13
- tgr = EngTagger.new
14
12
  tokens = ChatCorrect::Tokenize.new(text: text).tokenize
15
13
  sentence_tagged = tgr.add_tags(text).split
16
14
  tokens_to_delete = []
@@ -59,4 +59,4 @@ module ChatCorrect
59
59
  COMMON_VERB_MISTAKES[token_b].eql?(token_a)
60
60
  end
61
61
  end
62
- end
62
+ end
@@ -100,4 +100,4 @@ module ChatCorrect
100
100
  token_b.eql?('them') && contraction.partition(" 'em")[0].eql?(token_a)
101
101
  end
102
102
  end
103
- end
103
+ end
@@ -1,51 +1,21 @@
1
- require 'engtagger'
2
- require 'text'
3
-
4
1
  module ChatCorrect
5
2
  class Correct
6
3
  TYPES_OF_MISTAKES = ['missing_word', 'unnecessary_word', 'spelling', 'verb', 'punctuation', 'word_order', 'capitalization', 'duplicate_word', 'word_choice', 'pluralization', 'possessive', 'stylistic_choice']
7
- attr_reader :original_sentence, :corrected_sentence
4
+ attr_reader :original_sentence, :corrected_sentence, :tgr
8
5
  def initialize(original_sentence:, corrected_sentence:)
9
6
  @original_sentence = original_sentence
10
7
  @corrected_sentence = corrected_sentence
8
+ @tgr = EngTagger.new
9
+ Linguistics.use(:en)
11
10
  end
12
11
 
13
12
  def correct
14
- # puts "OS: #{original_sentence}"
15
- # puts "CS: #{corrected_sentence}"
16
- # puts "OST: #{original_sentence_tokenized}"
17
- # puts "CST: #{corrected_sentence_tokenized}"
18
- # puts "OSTag: #{original_sentence_tagged}"
19
- # puts "CSTag: #{corrected_sentence_tagged}"
20
- # puts "OSTD: #{original_sentence_tokenized_downcased}"
21
- # puts "CSTD: #{corrected_sentence_tokenized_downcased}"
22
- stage_1
23
- debug
24
- stage_2
25
- debug
26
- iterate_sentences('stage_3')
27
- debug
28
- iterate_sentences('stage_4')
29
- debug
30
- iterate_sentences('stage_5')
31
- debug
32
- iterate_sentences('stage_6')
33
- debug
34
- iterate_sentences('stage_7')
35
- debug
36
- stage_8
37
- debug
38
- prev_next_match_check
39
- debug
40
- stage_9
41
- debug
42
- correction_hash = ChatCorrect::CorrectionsHash.new(original_sentence_info_hash: original_sentence_info_hash, corrected_sentence_info_hash: corrected_sentence_info_hash).create
43
- build_corrections_hash(correction_hash)
13
+ analyze
44
14
  end
45
15
 
46
16
  def mistakes
47
17
  mistakes_hash = {}
48
- correct.each do |key, value|
18
+ analyze.each do |key, value|
49
19
  next if !value['type'].split('_')[-1].eql?('mistake') || value['type'].split('_')[0].eql?('no')
50
20
  interim_hash = {}
51
21
  interim_hash['position'] = key
@@ -83,6 +53,35 @@ module ChatCorrect
83
53
 
84
54
  private
85
55
 
56
+ def analyze
57
+ @analyze ||= iterate_stages
58
+ end
59
+
60
+ def iterate_stages
61
+ stage_1
62
+ debug
63
+ stage_2
64
+ debug
65
+ iterate_sentences('stage_3')
66
+ debug
67
+ iterate_sentences('stage_4')
68
+ debug
69
+ iterate_sentences('stage_5')
70
+ debug
71
+ iterate_sentences('stage_6')
72
+ debug
73
+ iterate_sentences('stage_7')
74
+ debug
75
+ stage_8
76
+ debug
77
+ prev_next_match_check
78
+ debug
79
+ stage_9
80
+ debug
81
+ correction_hash = ChatCorrect::CorrectionsHash.new(original_sentence_info_hash: original_sentence_info_hash, corrected_sentence_info_hash: corrected_sentence_info_hash).create
82
+ build_corrections_hash(correction_hash)
83
+ end
84
+
86
85
  def build_corrections_hash(correction_hash)
87
86
  final_hash = {}
88
87
  correction_hash.each do |k, v|
@@ -106,20 +105,18 @@ module ChatCorrect
106
105
  end
107
106
 
108
107
  def original_sentence_tokenized
109
- @original_sentence_tokenized ||= ChatCorrect::CombineMultiWordVerbs.new(text: original_sentence).combine
108
+ @original_sentence_tokenized ||= ChatCorrect::CombineMultiWordVerbs.new(text: original_sentence, tgr: tgr).combine
110
109
  end
111
110
 
112
111
  def corrected_sentence_tokenized
113
- @corrected_sentence_tokenized ||= ChatCorrect::CombineMultiWordVerbs.new(text: corrected_sentence).combine
112
+ @corrected_sentence_tokenized ||= ChatCorrect::CombineMultiWordVerbs.new(text: corrected_sentence, tgr: tgr).combine
114
113
  end
115
114
 
116
115
  def original_sentence_tagged
117
- tgr = EngTagger.new
118
116
  @original_sentence_tagged ||= tgr.add_tags(original_sentence).split
119
117
  end
120
118
 
121
119
  def corrected_sentence_tagged
122
- tgr = EngTagger.new
123
120
  @corrected_sentence_tagged ||= tgr.add_tags(corrected_sentence).split
124
121
  end
125
122
 
@@ -240,7 +237,6 @@ module ChatCorrect
240
237
  !matched_id_array.include?(vc['match_id'].to_s) &&
241
238
  !vo['duplicates'] &&
242
239
  !vc['duplicates']
243
-
244
240
  original_sentence_info_hash[ko]['match_id'] = vc['match_id']
245
241
  corrected_sentence_info_hash[kc]['matched'] = true
246
242
  matched_id_array << vc['match_id'].to_s
@@ -290,42 +286,42 @@ module ChatCorrect
290
286
  end
291
287
 
292
288
  def stage_3(kc, vc, ks, vs)
293
- return unless vc['token'].eql?(vs['token']) &&
294
- (vc['prev_word1'].eql?(vs['prev_word1']) || vc['next_word1'].eql?(vs['next_word1'])) &&
295
- !vc['matched'] && vs['prev_word1'] != 'ȸ'
289
+ return if vc['token'] != vs['token'] ||
290
+ (vc['prev_word1'] != vs['prev_word1'] && vc['next_word1'] != vs['next_word1']) ||
291
+ vc['matched'] || vs['prev_word1'].eql?('ȸ')
296
292
  write_match_to_info_hash(ks, kc, vc)
297
293
  end
298
294
 
299
295
  def stage_4(kc, vc, ks, vs)
300
- return unless vc['token'].length > 3 && vs['token'].length > 3 &&
301
- Text::Levenshtein.distance(vc['token'], vs['token']) < 3 && !vc['matched']
296
+ return if vc['token'].length < 4 || vs['token'].length < 4 ||
297
+ Text::Levenshtein.distance(vc['token'], vs['token']) > 2 || vc['matched']
302
298
  write_match_to_info_hash(ks, kc, vc)
303
299
  end
304
300
 
305
301
  def stage_5(kc, vc, ks, vs)
306
- return unless ChatCorrect::Pluralization.new(token_a: vc['token'], token_b: vs['token']).pluralization_error? &&
307
- !vc['matched']
302
+ return if !ChatCorrect::Pluralization.new(token_a: vc['token'], token_b: vs['token']).pluralization_error? ||
303
+ vc['matched']
308
304
  write_match_to_info_hash(ks, kc, vc)
309
305
  end
310
306
 
311
307
  def stage_6(kc, vc, ks, vs)
312
- return unless ChatCorrect::Verb.new(word: vs['token'], pos: vc['pos_tag'], text: vc['token']).verb_error? &&
313
- (vc['prev_word1'].eql?(vs['prev_word1']) || vc['next_word1'].eql?(vs['next_word1'])) &&
314
- !vc['matched'] && !vs['next_word1'].include?(' ')
308
+ return if !ChatCorrect::Verb.new(word: vs['token'], pos: vc['pos_tag'], text: vc['token']).verb_error? ||
309
+ (vc['prev_word1'] != vs['prev_word1'] && vc['next_word1'] != vs['next_word1']) ||
310
+ vc['matched'] || vs['next_word1'].include?(' ')
315
311
  write_match_to_info_hash(ks, kc, vc)
316
312
  end
317
313
 
318
314
  def stage_7(kc, vc, ks, vs)
319
315
  # Distance between position of words is currently hardcoded to 5,
320
316
  # but this is a SWAG and can be adjusted based on testing.
321
- # The idea is to stop the algoroithm from matching words like 'to'
317
+ # The idea is to stop the algorithm from matching words like 'to'
322
318
  # and 'the' that appear very far apart in the sentence and should not be matched.
323
- return unless vc['token'].length > 1 &&
324
- vs['token'].length > 1 &&
325
- Text::Levenshtein.distance(vc['token'], vs['token']) < 3 &&
326
- vs['token'].to_s[0].eql?(vc['token'].to_s[0]) &&
327
- (vs['position'].to_i - vc['position'].to_i).abs < 5 &&
328
- !vc['matched']
319
+ return if vc['token'].length < 2 ||
320
+ vs['token'].length < 2 ||
321
+ Text::Levenshtein.distance(vc['token'], vs['token']) > 2 ||
322
+ vs['token'].to_s[0] != vc['token'].to_s[0] ||
323
+ (vs['position'].to_i - vc['position'].to_i).abs > 4 ||
324
+ vc['matched']
329
325
  write_match_to_info_hash(ks, kc, vc)
330
326
  end
331
327
 
@@ -1,5 +1,3 @@
1
- require 'linguistics'
2
-
3
1
  module ChatCorrect
4
2
  class Pluralization
5
3
  attr_reader :token_a, :token_b
@@ -10,7 +8,6 @@ module ChatCorrect
10
8
 
11
9
  def pluralization_error?
12
10
  begin
13
- Linguistics.use(:en)
14
11
  token_a_plural = token_a.en.plural
15
12
  token_b_plural = token_b.en.plural
16
13
  rescue
@@ -19,4 +16,4 @@ module ChatCorrect
19
16
  token_a_plural.eql?(token_b) || token_b_plural.eql?(token_a)
20
17
  end
21
18
  end
22
- end
19
+ end
@@ -22,4 +22,4 @@ module ChatCorrect
22
22
  (word_1.partition(mark)[2].eql?('s') || word_1.partition(mark)[2].length < 3))
23
23
  end
24
24
  end
25
- end
25
+ end
@@ -11,4 +11,4 @@ module ChatCorrect
11
11
  token_a.delete("ƪ").eql?(token_b.delete("ƪ"))
12
12
  end
13
13
  end
14
- end
14
+ end
@@ -1,5 +1,3 @@
1
- require 'text'
2
-
3
1
  module ChatCorrect
4
2
  class Spelling
5
3
  WORD_CHOICE = ["the", "that", "this", "on", "at", "in", "an", "it", "if", "of", "to"]
@@ -17,4 +15,4 @@ module ChatCorrect
17
15
  Text::Levenshtein.distance(token_a.downcase, token_b.downcase) < 3 && token_a.downcase != token_b.downcase
18
16
  end
19
17
  end
20
- end
18
+ end
@@ -1,5 +1,3 @@
1
- require 'verbs'
2
-
3
1
  module ChatCorrect
4
2
  class Verb
5
3
  attr_reader :word, :pos, :text
@@ -1,3 +1,3 @@
1
1
  module ChatCorrect
2
- VERSION = "0.0.2"
2
+ VERSION = "0.0.3"
3
3
  end
data/lib/chat_correct.rb CHANGED
@@ -14,3 +14,7 @@ require "chat_correct/time"
14
14
  require "chat_correct/punctuation"
15
15
  require "chat_correct/corrections_hash"
16
16
  require "chat_correct/mistake_analyzer"
17
+ require "engtagger"
18
+ require "text"
19
+ require "verbs"
20
+ require "linguistics"
@@ -1,39 +1,43 @@
1
1
  require 'spec_helper'
2
2
 
3
3
  RSpec.describe ChatCorrect::CombineMultiWordVerbs do
4
+ before do
5
+ @tgr = EngTagger.new
6
+ end
7
+
4
8
  it 'returns an array' do
5
9
  text = 'I would have gone to the store.'
6
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
10
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
7
11
  expect(cc.combine).to eq(["I", "would have gone", "to", "the", "store", "."])
8
12
  end
9
13
 
10
14
  it 'returns an array' do
11
15
  text = 'I will go to the store.'
12
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
16
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
13
17
  expect(cc.combine).to eq(["I", "will go", "to", "the", "store", "."])
14
18
  end
15
19
 
16
20
  it 'returns an array' do
17
21
  text = "He didn't realize that he should had changed the locks."
18
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
22
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
19
23
  expect(cc.combine).to eq(["He", "didnƪt realize", "that", "he", "should", "had changed", "the", "locks", "."])
20
24
  end
21
25
 
22
26
  it 'returns an array' do
23
27
  text = "He hadn't realized that he should have changed the locks."
24
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
28
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
25
29
  expect(cc.combine).to eq(["He", "hadnƪt realized", "that", "he", "should", "have changed", "the", "locks", "."])
26
30
  end
27
31
 
28
32
  it 'returns an array' do
29
33
  text = "I was not going to the party ."
30
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
34
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
31
35
  expect(cc.combine).to eq(["I", "was not going", "to", "the", "party", "."])
32
36
  end
33
37
 
34
38
  it 'returns an array' do
35
39
  text = "I did not go to the party ."
36
- cc = ChatCorrect::CombineMultiWordVerbs.new(text: text)
40
+ cc = ChatCorrect::CombineMultiWordVerbs.new(text: text, tgr: @tgr)
37
41
  expect(cc.combine).to eq(["I", "did not go", "to", "the", "party", "."])
38
42
  end
39
43
  end
@@ -10,7 +10,6 @@ RSpec.describe ChatCorrect::Correct do
10
10
 
11
11
  it 'Annotates the corrections' do
12
12
  expect(@cc.correct).to eq({0 => {'token' => 'There', 'type' => 'no_mistake'}, 1 => {'token' => 'are', 'type' => 'no_mistake'}, 2 => {'token' => 'no', 'type' => 'no_mistake'}, 3 => {'token' => 'mistakes', 'type' => 'no_mistake'}, 4 => {'token' => 'here', 'type' => 'no_mistake'}, 5 => {'token' => '.', 'type' => 'no_mistake'}})
13
- #expect(@cc.correct).to eq("{\"0\":{\"There\":\"no_mistake\"},\"1\":{\"are\":\"no_mistake\"},\"2\":{\"no\":\"no_mistake\"},\"3\":{\"mistakes\":\"no_mistake\"},\"4\":{\"here\":\"no_mistake\"},\"5\":{\".\":\"no_mistake\"}}")
14
13
  end
15
14
 
16
15
  it 'Reports the mistakes' do
@@ -35,7 +34,6 @@ RSpec.describe ChatCorrect::Correct do
35
34
 
36
35
  it 'Annotates the corrections' do
37
36
  expect(@cc.correct).to eq({0 => {'token' => 'is', 'type' => 'capitalization_mistake'}, 1 => {'token' => 'Is', 'type' => 'capitalization_correction'}, 2 => {'token' => 'the', 'type' => 'no_mistake'}, 3 => {'token' => ',', 'type' => 'punctuation_mistake'}, 4 => {'token' => 'puncttuation', 'type' => 'spelling_mistake'}, 5 => {'token' => 'punctuation', 'type' => 'spelling_correction'}, 6 => {'token' => 'are', 'type' => 'unnecessary_word_mistake'}, 7 => {'token' => 'wrong', 'type' => 'no_mistake'}, 8 => {'token' => '.', 'type' => 'punctuation_mistake'}, 9 => {'token' => '?', 'type' => 'punctuation_correction'}})
38
- #expect(@cc.correct).to eq("{\"0\":{\"is\":\"capitalization_mistake\"},\"1\":{\"Is\":\"capitalization_mistake_correction\"},\"2\":{\"the\":\"no_mistake\"},\"3\":{\",\":\"punctuation_mistake\"},\"4\":{\"puncttuation\":\"spelling_mistake\"},\"5\":{\"punctuation\":\"spelling_mistake_correction\"},\"6\":{\"are\":\"unnecessary_word_mistake\"},\"7\":{\"wrong\":\"no_mistake\"},\"8\":{\".\":\"punctuation_mistake\"},\"9\":{\"?\":\"punctuation_mistake_correction\"}}")
39
37
  end
40
38
 
41
39
  it 'Reports the mistakes' do
@@ -60,7 +58,6 @@ RSpec.describe ChatCorrect::Correct do
60
58
 
61
59
  it 'Annotates the corrections' do
62
60
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"need", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"missing_word_mistake"}, 3=>{"token"=>"go", "type"=>"no_mistake"}, 4=>{"token"=>"shopping", "type"=>"no_mistake"}, 5=>{"token"=>"at", "type"=>"unnecessary_word_mistake"}, 6=>{"token"=>"this", "type"=>"no_mistake"}, 7=>{"token"=>"weekend", "type"=>"no_mistake"}, 8=>{"token"=>".", "type"=>"no_mistake"}})
63
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"need\":\"no_mistake\"},\"2\":{\"to\":\"missing_word_mistake\"},\"3\":{\"go\":\"no_mistake\"},\"4\":{\"shopping\":\"no_mistake\"},\"5\":{\"at\":\"unnecessary_word_mistake\"},\"6\":{\"this\":\"no_mistake\"},\"7\":{\"weekend\":\"no_mistake\"},\"8\":{\".\":\"no_mistake\"}}")
64
61
  end
65
62
 
66
63
  it 'Counts the number of mistakes' do
@@ -81,7 +78,6 @@ RSpec.describe ChatCorrect::Correct do
81
78
 
82
79
  it 'Annotates the corrections' do
83
80
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"go", "type"=>"verb_mistake"}, 2=>{"token"=>"went", "type"=>"verb_correction"}, 3=>{"token"=>"on", "type"=>"missing_word_mistake"}, 4=>{"token"=>"a", "type"=>"missing_word_mistake"}, 5=>{"token"=>"trip", "type"=>"no_mistake"}, 6=>{"token"=>"last", "type"=>"no_mistake"}, 7=>{"token"=>"month", "type"=>"no_mistake"}, 8=>{"token"=>".", "type"=>"no_mistake"}})
84
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"go\":\"verb_mistake\"},\"2\":{\"went\":\"verb_mistake_correction\"},\"3\":{\"on\":\"missing_word_mistake\"},\"4\":{\"a\":\"missing_word_mistake\"},\"5\":{\"trip\":\"no_mistake\"},\"6\":{\"last\":\"no_mistake\"},\"7\":{\"month\":\"no_mistake\"},\"8\":{\".\":\"no_mistake\"}}")
85
81
  end
86
82
 
87
83
  it 'Counts the number of mistakes' do
@@ -102,7 +98,6 @@ RSpec.describe ChatCorrect::Correct do
102
98
 
103
99
  it 'Annotates the corrections' do
104
100
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"an", "type"=>"no_mistake"}, 3=>{"token"=>"exclamation", "type"=>"no_mistake"}, 4=>{"token"=>".", "type"=>"punctuation_mistake"}, 5=>{"token"=>"!", "type"=>"punctuation_correction"}})
105
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"an\":\"no_mistake\"},\"3\":{\"exclamation\":\"no_mistake\"},\"4\":{\".\":\"punctuation_mistake\"},\"5\":{\"!\":\"punctuation_mistake_correction\"}}")
106
101
  end
107
102
 
108
103
  it 'Counts the number of mistakes' do
@@ -123,7 +118,6 @@ RSpec.describe ChatCorrect::Correct do
123
118
 
124
119
  it 'Annotates the corrections' do
125
120
  expect(@cc.correct).to eq({0=>{"token"=>"what", "type"=>"capitalization_mistake"}, 1=>{"token"=>"What", "type"=>"capitalization_correction"}, 2=>{"token"=>"am", "type"=>"verb_mistake"}, 3=>{"token"=>"was", "type"=>"verb_correction"}, 4=>{"token"=>"i", "type"=>"capitalization_mistake"}, 5=>{"token"=>"I", "type"=>"capitalization_correction"}, 6=>{"token"=>"thinking", "type"=>"no_mistake"}, 7=>{"token"=>"!", "type"=>"punctuation_mistake"}, 8=>{"token"=>"?", "type"=>"punctuation_correction"}})
126
- #expect(@cc.correct).to eq("{\"0\":{\"what\":\"capitalization_mistake\"},\"1\":{\"What\":\"capitalization_mistake_correction\"},\"2\":{\"am\":\"verb_mistake\"},\"3\":{\"was\":\"verb_mistake_correction\"},\"4\":{\"i\":\"capitalization_mistake\"},\"5\":{\"I\":\"capitalization_mistake_correction\"},\"6\":{\"thinking\":\"no_mistake\"},\"7\":{\"!\":\"punctuation_mistake\"},\"8\":{\"?\":\"punctuation_mistake_correction\"}}")
127
121
  end
128
122
 
129
123
  it 'Counts the number of mistakes' do
@@ -144,16 +138,15 @@ RSpec.describe ChatCorrect::Correct do
144
138
 
145
139
  it 'Annotates the corrections' do
146
140
  expect(@cc.correct).to eq({0=>{"token"=>"There", "type"=>"no_mistake"}, 1=>{"token"=>"arre", "type"=>"spelling_mistake"}, 2=>{"token"=>"are", "type"=>"spelling_correction"}, 3=>{"token"=>"a", "type"=>"missing_word_mistake"}, 4=>{"token"=>"lotts", "type"=>"spelling_mistake"}, 5=>{"token"=>"lot", "type"=>"spelling_correction"}, 6=>{"token"=>"of", "type"=>"no_mistake"}, 7=>{"token"=>"misspeellings", "type"=>"spelling_mistake"}, 8=>{"token"=>"misspellings", "type"=>"spelling_correction"}, 9=>{"token"=>".", "type"=>"no_mistake"}})
147
- #expect(@cc.correct).to eq("{\"0\":{\"There\":\"no_mistake\"},\"1\":{\"arre\":\"spelling_mistake\"},\"2\":{\"are\":\"spelling_mistake_correction\"},\"3\":{\"a\":\"missing_word_mistake\"},\"4\":{\"lotts\":\"spelling_mistake\"},\"5\":{\"lot\":\"spelling_mistake_correction\"},\"6\":{\"of\":\"no_mistake\"},\"7\":{\"misspeellings\":\"spelling_mistake\"},\"8\":{\"misspellings\":\"spelling_mistake_correction\"},\"9\":{\".\":\"no_mistake\"}}")
148
141
  end
149
142
 
150
- # it 'Counts the number of mistakes' do
151
- # expect(@cc.number_of_mistakes).to eq([])
152
- # end
143
+ it 'Counts the number of mistakes' do
144
+ expect(@cc.number_of_mistakes).to eq(4)
145
+ end
153
146
 
154
- # it 'Reports the mistakes by mistake type' do
155
- # expect(@cc.mistake_report).to eq([])
156
- # end
147
+ it 'Reports the mistakes by mistake type' do
148
+ expect(@cc.mistake_report).to eq({"missing_word"=>1, "unnecessary_word"=>0, "spelling"=>3, "verb"=>0, "punctuation"=>0, "word_order"=>0, "capitalization"=>0, "duplicate_word"=>0, "word_choice"=>0, "pluralization"=>0, "possessive"=>0, "stylistic_choice"=>0})
149
+ end
157
150
  end
158
151
 
159
152
  context "example correction #008" do
@@ -165,16 +158,15 @@ RSpec.describe ChatCorrect::Correct do
165
158
 
166
159
  it 'Annotates the corrections' do
167
160
  expect(@cc.correct).to eq({0=>{"token"=>"There", "type"=>"no_mistake"}, 1=>{"token"=>"arre", "type"=>"spelling_mistake"}, 2=>{"token"=>"are", "type"=>"spelling_correction"}, 3=>{"token"=>"a", "type"=>"missing_word_mistake"}, 4=>{"token"=>"lotts", "type"=>"spelling_mistake"}, 5=>{"token"=>"lot", "type"=>"spelling_correction"}, 6=>{"token"=>",", "type"=>"punctuation_mistake"}, 7=>{"token"=>"off", "type"=>"spelling_mistake"}, 8=>{"token"=>"of", "type"=>"spelling_correction"}, 9=>{"token"=>"consecutiveee", "type"=>"spelling_mistake"}, 10=>{"token"=>"consecutive", "type"=>"spelling_correction"}, 11=>{"token"=>"misspeellings", "type"=>"spelling_mistake"}, 12=>{"token"=>"misspellings", "type"=>"spelling_correction"}, 13=>{"token"=>"!", "type"=>"punctuation_mistake"}, 14=>{"token"=>".", "type"=>"punctuation_correction"}})
168
- #expect(@cc.correct).to eq("{\"0\":{\"There\":\"no_mistake\"},\"1\":{\"arre\":\"spelling_mistake\"},\"2\":{\"are\":\"spelling_mistake_correction\"},\"3\":{\"a\":\"missing_word_mistake\"},\"4\":{\"lotts\":\"spelling_mistake\"},\"5\":{\"lot\":\"spelling_mistake_correction\"},\"6\":{\",\":\"punctuation_mistake\"},\"7\":{\"off\":\"spelling_mistake\"},\"8\":{\"of\":\"spelling_mistake_correction\"},\"9\":{\"consecutiveee\":\"spelling_mistake\"},\"10\":{\"consecutive\":\"spelling_mistake_correction\"},\"11\":{\"misspeellings\":\"spelling_mistake\"},\"12\":{\"misspellings\":\"spelling_mistake_correction\"},\"13\":{\"!\":\"punctuation_mistake\"},\"14\":{\".\":\"punctuation_mistake_correction\"}}")
169
161
  end
170
162
 
171
- # it 'Counts the number of mistakes' do
172
- # expect(@cc.number_of_mistakes).to eq([])
173
- # end
163
+ it 'Counts the number of mistakes' do
164
+ expect(@cc.number_of_mistakes).to eq(8)
165
+ end
174
166
 
175
- # it 'Reports the mistakes by mistake type' do
176
- # expect(@cc.mistake_report).to eq([])
177
- # end
167
+ it 'Reports the mistakes by mistake type' do
168
+ expect(@cc.mistake_report).to eq({"missing_word"=>1, "unnecessary_word"=>0, "spelling"=>5, "verb"=>0, "punctuation"=>2, "word_order"=>0, "capitalization"=>0, "duplicate_word"=>0, "word_choice"=>0, "pluralization"=>0, "possessive"=>0, "stylistic_choice"=>0})
169
+ end
178
170
  end
179
171
 
180
172
  context "example correction #009" do
@@ -186,7 +178,6 @@ RSpec.describe ChatCorrect::Correct do
186
178
 
187
179
  it 'Annotates the corrections' do
188
180
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"a", "type"=>"no_mistake"}, 3=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 4=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 5=>{"token"=>"double", "type"=>"no_mistake"}, 6=>{"token"=>"word", "type"=>"no_mistake"}, 7=>{"token"=>"check", "type"=>"no_mistake"}, 8=>{"token"=>".", "type"=>"no_mistake"}})
189
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"a\":\"no_mistake\"},\"3\":{\"double\":\"duplicate_word_mistake\"},\"4\":{\"double\":\"duplicate_word_mistake\"},\"5\":{\"double\":\"no_mistake\"},\"6\":{\"word\":\"no_mistake\"},\"7\":{\"check\":\"no_mistake\"},\"8\":{\".\":\"no_mistake\"}}")
190
181
  end
191
182
 
192
183
  it 'Counts the number of mistakes' do
@@ -207,7 +198,6 @@ RSpec.describe ChatCorrect::Correct do
207
198
 
208
199
  it 'Annotates the corrections' do
209
200
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"and", "type"=>"no_mistake"}, 3=>{"token"=>"this", "type"=>"no_mistake"}, 4=>{"token"=>"and", "type"=>"no_mistake"}, 5=>{"token"=>"this", "type"=>"no_mistake"}, 6=>{"token"=>"is", "type"=>"no_mistake"}, 7=>{"token"=>"a", "type"=>"no_mistake"}, 8=>{"token"=>"correct", "type"=>"no_mistake"}, 9=>{"token"=>"double", "type"=>"no_mistake"}, 10=>{"token"=>"word", "type"=>"no_mistake"}, 11=>{"token"=>"check", "type"=>"no_mistake"}, 12=>{"token"=>"!", "type"=>"punctuation_mistake"}, 13=>{"token"=>".", "type"=>"punctuation_correction"}})
210
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"and\":\"no_mistake\"},\"3\":{\"this\":\"no_mistake\"},\"4\":{\"and\":\"no_mistake\"},\"5\":{\"this\":\"no_mistake\"},\"6\":{\"is\":\"no_mistake\"},\"7\":{\"a\":\"no_mistake\"},\"8\":{\"correct\":\"no_mistake\"},\"9\":{\"double\":\"no_mistake\"},\"10\":{\"word\":\"no_mistake\"},\"11\":{\"check\":\"no_mistake\"},\"12\":{\"!\":\"punctuation_mistake\"},\"13\":{\".\":\"punctuation_mistake_correction\"}}")
211
201
  end
212
202
 
213
203
  # it 'Counts the number of mistakes' do
@@ -228,7 +218,6 @@ RSpec.describe ChatCorrect::Correct do
228
218
 
229
219
  it 'Annotates the corrections' do
230
220
  expect(@cc.correct).to eq({0=>{"token"=>"He", "type"=>"no_mistake"}, 1=>{"token"=>"said", "type"=>"no_mistake"}, 2=>{"token"=>",", "type"=>"no_mistake"}, 3=>{"token"=>"\"", "type"=>"no_mistake"}, 4=>{"token"=>"Shhe", "type"=>"spelling_mistake"}, 5=>{"token"=>"She", "type"=>"spelling_correction"}, 6=>{"token"=>"is", "type"=>"no_mistake"}, 7=>{"token"=>"a", "type"=>"no_mistake"}, 8=>{"token"=>"crazy", "type"=>"no_mistake"}, 9=>{"token"=>"girl", "type"=>"no_mistake"}, 10=>{"token"=>".", "type"=>"word_order_mistake"}, 11=>{"token"=>"\"", "type"=>"word_order_mistake"}})
231
- #expect(@cc.correct).to eq("{\"0\":{\"He\":\"no_mistake\"},\"1\":{\"said\":\"no_mistake\"},\"2\":{\",\":\"no_mistake\"},\"3\":{\"\"\":\"no_mistake\"},\"4\":{\"Shhe\":\"spelling_mistake\"},\"5\":{\"She\":\"spelling_mistake_correction\"},\"6\":{\"is\":\"no_mistake\"},\"7\":{\"a\":\"no_mistake\"},\"8\":{\"crazy\":\"no_mistake\"},\"9\":{\"girl\":\"no_mistake\"},\"10\":{\".\":\"word_order_mistake\"},\"11\":{\"\"\":\"word_order_mistake\"}}")
232
221
  end
233
222
 
234
223
  # it 'Counts the number of mistakes' do
@@ -249,7 +238,6 @@ RSpec.describe ChatCorrect::Correct do
249
238
 
250
239
  it 'Annotates the corrections' do
251
240
  expect(@cc.correct).to eq({0=>{"token"=>"Test", "type"=>"no_mistake"}, 1=>{"token"=>"the", "type"=>"no_mistake"}, 2=>{"token"=>"word", "type"=>"word_order_mistake"}, 3=>{"token"=>"order", "type"=>"word_order_mistake"}, 4=>{"token"=>".", "type"=>"no_mistake"}})
252
- #expect(@cc.correct).to eq("{\"0\":{\"Test\":\"no_mistake\"},\"1\":{\"the\":\"no_mistake\"},\"2\":{\"word\":\"word_order_mistake\"},\"3\":{\"order\":\"word_order_mistake\"},\"4\":{\".\":\"no_mistake\"}}")
253
241
  end
254
242
 
255
243
  # it 'Counts the number of mistakes' do
@@ -270,7 +258,6 @@ RSpec.describe ChatCorrect::Correct do
270
258
 
271
259
  it 'Annotates the corrections' do
272
260
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"a", "type"=>"no_mistake"}, 3=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 4=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 5=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 6=>{"token"=>"double", "type"=>"no_mistake"}, 7=>{"token"=>"word", "type"=>"no_mistake"}, 8=>{"token"=>"check", "type"=>"no_mistake"}, 9=>{"token"=>".", "type"=>"no_mistake"}})
273
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"a\":\"no_mistake\"},\"3\":{\"double\":\"duplicate_word_mistake\"},\"4\":{\"double\":\"duplicate_word_mistake\"},\"5\":{\"double\":\"duplicate_word_mistake\"},\"6\":{\"double\":\"no_mistake\"},\"7\":{\"word\":\"no_mistake\"},\"8\":{\"check\":\"no_mistake\"},\"9\":{\".\":\"no_mistake\"}}")
274
261
  end
275
262
 
276
263
  # it 'Counts the number of mistakes' do
@@ -291,7 +278,6 @@ RSpec.describe ChatCorrect::Correct do
291
278
 
292
279
  it 'Annotates the corrections' do
293
280
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"call", "type"=>"verb_mistake"}, 2=>{"token"=>"will call", "type"=>"verb_correction"}, 3=>{"token"=>"my", "type"=>"no_mistake"}, 4=>{"token"=>"mom", "type"=>"no_mistake"}, 5=>{"token"=>"tomorrow", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
294
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"call\":\"verb_mistake\"},\"2\":{\"will call\":\"verb_mistake_correction\"},\"3\":{\"my\":\"no_mistake\"},\"4\":{\"mom\":\"no_mistake\"},\"5\":{\"tomorrow\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
295
281
  end
296
282
 
297
283
  # it 'Counts the number of mistakes' do
@@ -312,7 +298,6 @@ RSpec.describe ChatCorrect::Correct do
312
298
 
313
299
  it 'Annotates the corrections' do
314
300
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"flied", "type"=>"verb_mistake"}, 2=>{"token"=>"flew", "type"=>"verb_correction"}, 3=>{"token"=>"home", "type"=>"no_mistake"}, 4=>{"token"=>"yesterday", "type"=>"no_mistake"}, 5=>{"token"=>".", "type"=>"no_mistake"}})
315
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"flied\":\"verb_mistake\"},\"2\":{\"flew\":\"verb_mistake_correction\"},\"3\":{\"home\":\"no_mistake\"},\"4\":{\"yesterday\":\"no_mistake\"},\"5\":{\".\":\"no_mistake\"}}")
316
301
  end
317
302
 
318
303
  # it 'Counts the number of mistakes' do
@@ -333,7 +318,6 @@ RSpec.describe ChatCorrect::Correct do
333
318
 
334
319
  it 'Annotates the corrections' do
335
320
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"shouldn't", "type"=>"no_mistake"}, 2=>{"token"=>"be", "type"=>"no_mistake"}, 3=>{"token"=>"use", "type"=>"verb_mistake"}, 4=>{"token"=>"used", "type"=>"verb_correction"}, 5=>{"token"=>"to", "type"=>"no_mistake"}, 6=>{"token"=>"test", "type"=>"no_mistake"}, 7=>{"token"=>"contractions", "type"=>"no_mistake"}, 8=>{"token"=>",", "type"=>"no_mistake"}, 9=>{"token"=>"but", "type"=>"no_mistake"}, 10=>{"token"=>"couln't", "type"=>"spelling_mistake"}, 11=>{"token"=>"couldn't", "type"=>"spelling_correction"}, 12=>{"token"=>"it", "type"=>"no_mistake"}, 13=>{"token"=>"?", "type"=>"no_mistake"}})
336
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"shouldn't\":\"no_mistake\"},\"2\":{\"be\":\"no_mistake\"},\"3\":{\"use\":\"verb_mistake\"},\"4\":{\"used\":\"verb_mistake_correction\"},\"5\":{\"to\":\"no_mistake\"},\"6\":{\"test\":\"no_mistake\"},\"7\":{\"contractions\":\"no_mistake\"},\"8\":{\",\":\"no_mistake\"},\"9\":{\"but\":\"no_mistake\"},\"10\":{\"couln't\":\"spelling_mistake\"},\"11\":{\"couldn't\":\"spelling_mistake_correction\"},\"12\":{\"it\":\"no_mistake\"},\"13\":{\"?\":\"no_mistake\"}}")
337
321
  end
338
322
 
339
323
  # it 'Counts the number of mistakes' do
@@ -354,7 +338,6 @@ RSpec.describe ChatCorrect::Correct do
354
338
 
355
339
  it 'Annotates the corrections' do
356
340
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"no_mistake"}, 3=>{"token"=>"test", "type"=>"no_mistake"}, 4=>{"token"=>",", "type"=>"no_mistake"}, 5=>{"token"=>"'", "type"=>"no_mistake"}, 6=>{"token"=>"single", "type"=>"capitalization_mistake"}, 7=>{"token"=>"Single", "type"=>"capitalization_correction"}, 8=>{"token"=>"quotes", "type"=>"no_mistake"}, 9=>{"token"=>".", "type"=>"word_order_mistake"}, 10=>{"token"=>"'", "type"=>"word_order_mistake"}})
357
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"to\":\"no_mistake\"},\"3\":{\"test\":\"no_mistake\"},\"4\":{\",\":\"no_mistake\"},\"5\":{\"'\":\"no_mistake\"},\"6\":{\"single\":\"capitalization_mistake\"},\"7\":{\"Single\":\"capitalization_mistake_correction\"},\"8\":{\"quotes\":\"no_mistake\"},\"9\":{\".\":\"word_order_mistake\"},\"10\":{\"'\":\"word_order_mistake\"}}")
358
341
  end
359
342
 
360
343
  # it 'Counts the number of mistakes' do
@@ -375,7 +358,6 @@ RSpec.describe ChatCorrect::Correct do
375
358
 
376
359
  it 'Annotates the corrections' do
377
360
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"no_mistake"}, 3=>{"token"=>"test", "type"=>"no_mistake"}, 4=>{"token"=>"quotations", "type"=>"no_mistake"}, 5=>{"token"=>"\"", "type"=>"punctuation_mistake"}, 6=>{"token"=>"again", "type"=>"no_mistake"}, 7=>{"token"=>"\"", "type"=>"punctuation_mistake"}})
378
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"to\":\"no_mistake\"},\"3\":{\"test\":\"no_mistake\"},\"4\":{\"quotations\":\"no_mistake\"},\"5\":{\"\"\":\"punctuation_mistake\"},\"6\":{\"again\":\"no_mistake\"},\"7\":{\"\"\":\"punctuation_mistake\"}}")
379
361
  end
380
362
 
381
363
  # it 'Counts the number of mistakes' do
@@ -396,7 +378,6 @@ RSpec.describe ChatCorrect::Correct do
396
378
 
397
379
  it 'Annotates the corrections' do
398
380
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"will call", "type"=>"verb_mistake"}, 2=>{"token"=>"called", "type"=>"verb_correction"}, 3=>{"token"=>"my", "type"=>"no_mistake"}, 4=>{"token"=>"mom", "type"=>"no_mistake"}, 5=>{"token"=>"yesterday", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
399
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"will call\":\"verb_mistake\"},\"2\":{\"called\":\"verb_mistake_correction\"},\"3\":{\"my\":\"no_mistake\"},\"4\":{\"mom\":\"no_mistake\"},\"5\":{\"yesterday\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
400
381
  end
401
382
 
402
383
  # it 'Counts the number of mistakes' do
@@ -417,7 +398,6 @@ RSpec.describe ChatCorrect::Correct do
417
398
 
418
399
  it 'Annotates the corrections' do
419
400
  expect(@cc.correct).to eq({0=>{"token"=>"Test", "type"=>"no_mistake"}, 1=>{"token"=>"run", "type"=>"unnecessary_word_mistake"}, 2=>{"token"=>"the", "type"=>"no_mistake"}, 3=>{"token"=>"word", "type"=>"word_order_mistake"}, 4=>{"token"=>"order", "type"=>"word_order_mistake"}, 5=>{"token"=>"with", "type"=>"no_mistake"}, 6=>{"token"=>"anotther", "type"=>"spelling_mistake"}, 7=>{"token"=>"another", "type"=>"spelling_correction"}, 8=>{"token"=>"simple", "type"=>"missing_word_mistake"}, 9=>{"token"=>"mistake", "type"=>"no_mistake"}, 10=>{"token"=>".", "type"=>"no_mistake"}})
420
- #expect(@cc.correct).to eq("{\"0\":{\"Test\":\"no_mistake\"},\"1\":{\"run\":\"unnecessary_word_mistake\"},\"2\":{\"the\":\"no_mistake\"},\"3\":{\"word\":\"word_order_mistake\"},\"4\":{\"order\":\"word_order_mistake\"},\"5\":{\"with\":\"no_mistake\"},\"6\":{\"anotther\":\"spelling_mistake\"},\"7\":{\"another\":\"spelling_mistake_correction\"},\"8\":{\"simple\":\"missing_word_mistake\"},\"9\":{\"mistake\":\"no_mistake\"},\"10\":{\".\":\"no_mistake\"}}")
421
401
  end
422
402
 
423
403
  # it 'Counts the number of mistakes' do
@@ -438,7 +418,6 @@ RSpec.describe ChatCorrect::Correct do
438
418
 
439
419
  it 'Annotates the corrections' do
440
420
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"no_mistake"}, 3=>{"token"=>"test", "type"=>"no_mistake"}, 4=>{"token"=>"quotations", "type"=>"no_mistake"}, 5=>{"token"=>"\"", "type"=>"punctuation_mistake"}, 6=>{"token"=>"again", "type"=>"no_mistake"}, 7=>{"token"=>"\"", "type"=>"punctuation_mistake"}, 8=>{"token"=>".", "type"=>"no_mistake"}})
441
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"to\":\"no_mistake\"},\"3\":{\"test\":\"no_mistake\"},\"4\":{\"quotations\":\"no_mistake\"},\"5\":{\"\"\":\"punctuation_mistake\"},\"6\":{\"again\":\"no_mistake\"},\"7\":{\"\"\":\"punctuation_mistake\"},\"8\":{\".\":\"no_mistake\"}}")
442
421
  end
443
422
 
444
423
  # it 'Counts the number of mistakes' do
@@ -459,7 +438,6 @@ RSpec.describe ChatCorrect::Correct do
459
438
 
460
439
  it 'Annotates the corrections' do
461
440
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"no_mistake"}, 3=>{"token"=>"test", "type"=>"no_mistake"}, 4=>{"token"=>"quotations", "type"=>"no_mistake"}, 5=>{"token"=>"\"", "type"=>"punctuation_mistake"}, 6=>{"token"=>"again", "type"=>"no_mistake"}, 7=>{"token"=>"\"", "type"=>"punctuation_mistake"}, 8=>{"token"=>".", "type"=>"punctuation_correction"}})
462
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"to\":\"no_mistake\"},\"3\":{\"test\":\"no_mistake\"},\"4\":{\"quotations\":\"no_mistake\"},\"5\":{\"\"\":\"punctuation_mistake\"},\"6\":{\"again\":\"no_mistake\"},\"7\":{\"\"\":\"punctuation_mistake\"},\"8\":{\".\":\"punctuation_mistake_correction\"}}")
463
441
  end
464
442
 
465
443
  # it 'Counts the number of mistakes' do
@@ -480,7 +458,6 @@ RSpec.describe ChatCorrect::Correct do
480
458
 
481
459
  it 'Annotates the corrections' do
482
460
  expect(@cc.correct).to eq({0=>{"token"=>"He", "type"=>"no_mistake"}, 1=>{"token"=>"didn't realize", "type"=>"verb_mistake"}, 2=>{"token"=>"hadn't realized", "type"=>"verb_correction"}, 3=>{"token"=>"that", "type"=>"no_mistake"}, 4=>{"token"=>"he", "type"=>"no_mistake"}, 5=>{"token"=>"should", "type"=>"no_mistake"}, 6=>{"token"=>"had changed", "type"=>"verb_mistake"}, 7=>{"token"=>"have changed", "type"=>"verb_correction"}, 8=>{"token"=>"the", "type"=>"no_mistake"}, 9=>{"token"=>"locks", "type"=>"no_mistake"}, 10=>{"token"=>".", "type"=>"no_mistake"}})
483
- #expect(@cc.correct).to eq("{\"0\":{\"He\":\"no_mistake\"},\"1\":{\"didn't realize\":\"verb_mistake\"},\"2\":{\"hadn't realized\":\"verb_mistake_correction\"},\"3\":{\"that\":\"no_mistake\"},\"4\":{\"he\":\"no_mistake\"},\"5\":{\"should\":\"no_mistake\"},\"6\":{\"had changed\":\"verb_mistake\"},\"7\":{\"have changed\":\"verb_mistake_correction\"},\"8\":{\"the\":\"no_mistake\"},\"9\":{\"locks\":\"no_mistake\"},\"10\":{\".\":\"no_mistake\"}}")
484
461
  end
485
462
 
486
463
  # it 'Counts the number of mistakes' do
@@ -501,7 +478,6 @@ RSpec.describe ChatCorrect::Correct do
501
478
 
502
479
  it 'Annotates the corrections' do
503
480
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"will call", "type"=>"verb_mistake"}, 2=>{"token"=>"would have called", "type"=>"verb_correction"}, 3=>{"token"=>"my", "type"=>"no_mistake"}, 4=>{"token"=>"mom", "type"=>"no_mistake"}, 5=>{"token"=>"yesterday", "type"=>"no_mistake"}, 6=>{"token"=>"if", "type"=>"no_mistake"}, 7=>{"token"=>"I", "type"=>"no_mistake"}, 8=>{"token"=>"had", "type"=>"no_mistake"}, 9=>{"token"=>"had", "type"=>"missing_word_mistake"}, 10=>{"token"=>"time", "type"=>"no_mistake"}, 11=>{"token"=>".", "type"=>"no_mistake"}})
504
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"will call\":\"verb_mistake\"},\"2\":{\"would have called\":\"verb_mistake_correction\"},\"3\":{\"my\":\"no_mistake\"},\"4\":{\"mom\":\"no_mistake\"},\"5\":{\"yesterday\":\"no_mistake\"},\"6\":{\"if\":\"no_mistake\"},\"7\":{\"I\":\"no_mistake\"},\"8\":{\"had\":\"no_mistake\"},\"9\":{\"had\":\"missing_word_mistake\"},\"10\":{\"time\":\"no_mistake\"},\"11\":{\".\":\"no_mistake\"}}")
505
481
  end
506
482
 
507
483
  # it 'Counts the number of mistakes' do
@@ -522,7 +498,6 @@ RSpec.describe ChatCorrect::Correct do
522
498
 
523
499
  it 'Annotates the corrections' do
524
500
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"call", "type"=>"verb_mistake"}, 2=>{"token"=>"would have called", "type"=>"verb_correction"}, 3=>{"token"=>"my", "type"=>"no_mistake"}, 4=>{"token"=>"mom", "type"=>"no_mistake"}, 5=>{"token"=>"yesterday", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
525
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"call\":\"verb_mistake\"},\"2\":{\"would have called\":\"verb_mistake_correction\"},\"3\":{\"my\":\"no_mistake\"},\"4\":{\"mom\":\"no_mistake\"},\"5\":{\"yesterday\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
526
501
  end
527
502
 
528
503
  # it 'Counts the number of mistakes' do
@@ -543,7 +518,6 @@ RSpec.describe ChatCorrect::Correct do
543
518
 
544
519
  it 'Annotates the corrections' do
545
520
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"singed", "type"=>"verb_mistake"}, 2=>{"token"=>"sang", "type"=>"verb_correction"}, 3=>{"token"=>"at", "type"=>"no_mistake"}, 4=>{"token"=>"the", "type"=>"no_mistake"}, 5=>{"token"=>"karaoke", "type"=>"no_mistake"}, 6=>{"token"=>"bar", "type"=>"no_mistake"}, 7=>{"token"=>".", "type"=>"no_mistake"}})
546
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"singed\":\"verb_mistake\"},\"2\":{\"sang\":\"verb_mistake_correction\"},\"3\":{\"at\":\"no_mistake\"},\"4\":{\"the\":\"no_mistake\"},\"5\":{\"karaoke\":\"no_mistake\"},\"6\":{\"bar\":\"no_mistake\"},\"7\":{\".\":\"no_mistake\"}}")
547
521
  end
548
522
 
549
523
  # it 'Counts the number of mistakes' do
@@ -564,7 +538,6 @@ RSpec.describe ChatCorrect::Correct do
564
538
 
565
539
  it 'Annotates the corrections' do
566
540
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"flied", "type"=>"verb_mistake"}, 2=>{"token"=>"flew", "type"=>"verb_correction"}, 3=>{"token"=>"to", "type"=>"no_mistake"}, 4=>{"token"=>"California", "type"=>"no_mistake"}, 5=>{"token"=>"and", "type"=>"no_mistake"}, 6=>{"token"=>"go", "type"=>"verb_mistake"}, 7=>{"token"=>"went", "type"=>"verb_correction"}, 8=>{"token"=>"to", "type"=>"no_mistake"}, 9=>{"token"=>"the", "type"=>"missing_word_mistake"}, 10=>{"token"=>"zoo", "type"=>"no_mistake"}, 11=>{"token"=>".", "type"=>"no_mistake"}})
567
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"flied\":\"verb_mistake\"},\"2\":{\"flew\":\"verb_mistake_correction\"},\"3\":{\"to\":\"no_mistake\"},\"4\":{\"California\":\"no_mistake\"},\"5\":{\"and\":\"no_mistake\"},\"6\":{\"go\":\"verb_mistake\"},\"7\":{\"went\":\"verb_mistake_correction\"},\"8\":{\"to\":\"no_mistake\"},\"9\":{\"the\":\"missing_word_mistake\"},\"10\":{\"zoo\":\"no_mistake\"},\"11\":{\".\":\"no_mistake\"}}")
568
541
  end
569
542
 
570
543
  # it 'Counts the number of mistakes' do
@@ -585,7 +558,6 @@ RSpec.describe ChatCorrect::Correct do
585
558
 
586
559
  it 'Annotates the corrections' do
587
560
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"a", "type"=>"no_mistake"}, 3=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 4=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 5=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 6=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 7=>{"token"=>"double", "type"=>"no_mistake"}, 8=>{"token"=>"word", "type"=>"no_mistake"}, 9=>{"token"=>"check", "type"=>"no_mistake"}, 10=>{"token"=>".", "type"=>"no_mistake"}})
588
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"a\":\"no_mistake\"},\"3\":{\"double\":\"duplicate_word_mistake\"},\"4\":{\"double\":\"duplicate_word_mistake\"},\"5\":{\"double\":\"duplicate_word_mistake\"},\"6\":{\"double\":\"duplicate_word_mistake\"},\"7\":{\"double\":\"no_mistake\"},\"8\":{\"word\":\"no_mistake\"},\"9\":{\"check\":\"no_mistake\"},\"10\":{\".\":\"no_mistake\"}}")
589
561
  end
590
562
 
591
563
  # it 'Counts the number of mistakes' do
@@ -606,7 +578,6 @@ RSpec.describe ChatCorrect::Correct do
606
578
 
607
579
  it 'Annotates the corrections' do
608
580
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"a", "type"=>"no_mistake"}, 3=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 4=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 5=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 6=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 7=>{"token"=>"double", "type"=>"duplicate_word_mistake"}, 8=>{"token"=>"double", "type"=>"no_mistake"}, 9=>{"token"=>"word", "type"=>"no_mistake"}, 10=>{"token"=>"check", "type"=>"no_mistake"}, 11=>{"token"=>".", "type"=>"no_mistake"}})
609
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"a\":\"no_mistake\"},\"3\":{\"double\":\"duplicate_word_mistake\"},\"4\":{\"double\":\"duplicate_word_mistake\"},\"5\":{\"double\":\"duplicate_word_mistake\"},\"6\":{\"double\":\"duplicate_word_mistake\"},\"7\":{\"double\":\"duplicate_word_mistake\"},\"8\":{\"double\":\"no_mistake\"},\"9\":{\"word\":\"no_mistake\"},\"10\":{\"check\":\"no_mistake\"},\"11\":{\".\":\"no_mistake\"}}")
610
581
  end
611
582
 
612
583
  # it 'Counts the number of mistakes' do
@@ -627,7 +598,6 @@ RSpec.describe ChatCorrect::Correct do
627
598
 
628
599
  it 'Annotates the corrections' do
629
600
  expect(@cc.correct).to eq({0=>{"token"=>"If", "type"=>"no_mistake"}, 1=>{"token"=>"my", "type"=>"no_mistake"}, 2=>{"token"=>"school", "type"=>"no_mistake"}, 3=>{"token"=>"were located", "type"=>"no_mistake"}, 4=>{"token"=>"in", "type"=>"no_mistake"}, 5=>{"token"=>"Tokyo", "type"=>"no_mistake"}, 6=>{"token"=>",", "type"=>"no_mistake"}, 7=>{"token"=>"the", "type"=>"missing_word_mistake"}, 8=>{"token"=>"situation", "type"=>"no_mistake"}, 9=>{"token"=>"would have", "type"=>"verb_mistake"}, 10=>{"token"=>"would have been", "type"=>"verb_correction"}, 11=>{"token"=>"quite", "type"=>"no_mistake"}, 12=>{"token"=>"changed", "type"=>"word_choice_mistake"}, 13=>{"token"=>"different", "type"=>"word_choice_correction"}, 14=>{"token"=>".", "type"=>"no_mistake"}})
630
- #expect(@cc.correct).to eq("{\"0\":{\"If\":\"no_mistake\"},\"1\":{\"my\":\"no_mistake\"},\"2\":{\"school\":\"no_mistake\"},\"3\":{\"were located\":\"no_mistake\"},\"4\":{\"in\":\"no_mistake\"},\"5\":{\"Tokyo\":\"no_mistake\"},\"6\":{\",\":\"no_mistake\"},\"7\":{\"the\":\"missing_word_mistake\"},\"8\":{\"situation\":\"no_mistake\"},\"9\":{\"would have\":\"verb_mistake\"},\"10\":{\"would have been\":\"verb_mistake_correction\"},\"11\":{\"quite\":\"no_mistake\"},\"12\":{\"changed\":\"word_choice_mistake\"},\"13\":{\"different\":\"word_choice_mistake_correction\"},\"14\":{\".\":\"no_mistake\"}}")
631
601
  end
632
602
 
633
603
  # it 'Counts the number of mistakes' do
@@ -648,7 +618,6 @@ RSpec.describe ChatCorrect::Correct do
648
618
 
649
619
  it 'Annotates the corrections' do
650
620
  expect(@cc.correct).to eq({0=>{"token"=>"However", "type"=>"no_mistake"}, 1=>{"token"=>",", "type"=>"no_mistake"}, 2=>{"token"=>"under", "type"=>"missing_word_mistake"}, 3=>{"token"=>"normal", "type"=>"missing_word_mistake"}, 4=>{"token"=>"circumstances", "type"=>"missing_word_mistake"}, 5=>{"token"=>",", "type"=>"punctuation_mistake"}, 6=>{"token"=>"I", "type"=>"no_mistake"}, 7=>{"token"=>"think", "type"=>"no_mistake"}, 8=>{"token"=>"a", "type"=>"unnecessary_word_mistake"}, 9=>{"token"=>"success", "type"=>"no_mistake"}, 10=>{"token"=>"in", "type"=>"no_mistake"}, 11=>{"token"=>"life", "type"=>"no_mistake"}, 12=>{"token"=>"comes", "type"=>"no_mistake"}, 13=>{"token"=>"from", "type"=>"no_mistake"}, 14=>{"token"=>"careful", "type"=>"no_mistake"}, 15=>{"token"=>"planning", "type"=>"no_mistake"}, 16=>{"token"=>"when", "type"=>"unnecessary_word_mistake"}, 17=>{"token"=>"it", "type"=>"unnecessary_word_mistake"}, 18=>{"token"=>"is", "type"=>"unnecessary_word_mistake"}, 19=>{"token"=>"a", "type"=>"unnecessary_word_mistake"}, 20=>{"token"=>"usual", "type"=>"unnecessary_word_mistake"}, 21=>{"token"=>"situation", "type"=>"unnecessary_word_mistake"}, 22=>{"token"=>".", "type"=>"no_mistake"}})
651
- #expect(@cc.correct).to eq("{\"0\":{\"However\":\"no_mistake\"},\"1\":{\",\":\"no_mistake\"},\"2\":{\"under\":\"missing_word_mistake\"},\"3\":{\"normal\":\"missing_word_mistake\"},\"4\":{\"circumstances\":\"missing_word_mistake\"},\"5\":{\",\":\"punctuation_mistake\"},\"6\":{\"I\":\"no_mistake\"},\"7\":{\"think\":\"no_mistake\"},\"8\":{\"a\":\"unnecessary_word_mistake\"},\"9\":{\"success\":\"no_mistake\"},\"10\":{\"in\":\"no_mistake\"},\"11\":{\"life\":\"no_mistake\"},\"12\":{\"comes\":\"no_mistake\"},\"13\":{\"from\":\"no_mistake\"},\"14\":{\"careful\":\"no_mistake\"},\"15\":{\"planning\":\"no_mistake\"},\"16\":{\"when\":\"unnecessary_word_mistake\"},\"17\":{\"it\":\"unnecessary_word_mistake\"},\"18\":{\"is\":\"unnecessary_word_mistake\"},\"19\":{\"a\":\"unnecessary_word_mistake\"},\"20\":{\"usual\":\"unnecessary_word_mistake\"},\"21\":{\"situation\":\"unnecessary_word_mistake\"},\"22\":{\".\":\"no_mistake\"}}")
652
621
  end
653
622
 
654
623
  # it 'Counts the number of mistakes' do
@@ -669,7 +638,6 @@ RSpec.describe ChatCorrect::Correct do
669
638
 
670
639
  it 'Annotates the corrections' do
671
640
  expect(@cc.correct).to eq({0=>{"token"=>"He", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"super", "type"=>"no_mistake"}, 3=>{"token"=>"rad", "type"=>"word_choice_mistake"}, 4=>{"token"=>"cool", "type"=>"word_choice_correction"}, 5=>{"token"=>".", "type"=>"punctuation_mistake"}, 6=>{"token"=>"!", "type"=>"punctuation_correction"}})
672
- #expect(@cc.correct).to eq("{\"0\":{\"He\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"super\":\"no_mistake\"},\"3\":{\"rad\":\"word_choice_mistake\"},\"4\":{\"cool\":\"word_choice_mistake_correction\"},\"5\":{\".\":\"punctuation_mistake\"},\"6\":{\"!\":\"punctuation_mistake_correction\"}}")
673
641
  end
674
642
 
675
643
  # it 'Counts the number of mistakes' do
@@ -690,7 +658,6 @@ RSpec.describe ChatCorrect::Correct do
690
658
 
691
659
  it 'Annotates the corrections' do
692
660
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"was not going", "type"=>"verb_mistake"}, 2=>{"token"=>"did not go", "type"=>"verb_correction"}, 3=>{"token"=>"to", "type"=>"no_mistake"}, 4=>{"token"=>"the", "type"=>"no_mistake"}, 5=>{"token"=>"party", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
693
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"was not going\":\"verb_mistake\"},\"2\":{\"did not go\":\"verb_mistake_correction\"},\"3\":{\"to\":\"no_mistake\"},\"4\":{\"the\":\"no_mistake\"},\"5\":{\"party\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
694
661
  end
695
662
 
696
663
  # it 'Counts the number of mistakes' do
@@ -711,7 +678,6 @@ RSpec.describe ChatCorrect::Correct do
711
678
 
712
679
  it 'Annotates the corrections' do
713
680
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"had", "type"=>"no_mistake"}, 2=>{"token"=>"experiences", "type"=>"no_mistake"}, 3=>{"token"=>"to", "type"=>"word_choice_mistake"}, 4=>{"token"=>"which", "type"=>"word_choice_correction"}, 5=>{"token"=>"support", "type"=>"no_mistake"}, 6=>{"token"=>"my", "type"=>"no_mistake"}, 7=>{"token"=>"opinion", "type"=>"no_mistake"}, 8=>{"token"=>"which", "type"=>"unnecessary_word_mistake"}, 9=>{"token"=>"a", "type"=>"unnecessary_word_mistake"}, 10=>{"token"=>"that", "type"=>"missing_word_mistake"}, 11=>{"token"=>"success", "type"=>"no_mistake"}, 12=>{"token"=>"in", "type"=>"no_mistake"}, 13=>{"token"=>"life", "type"=>"no_mistake"}, 14=>{"token"=>"comes", "type"=>"no_mistake"}, 15=>{"token"=>"from", "type"=>"no_mistake"}, 16=>{"token"=>"careful", "type"=>"no_mistake"}, 17=>{"token"=>"planning", "type"=>"no_mistake"}, 18=>{"token"=>".", "type"=>"no_mistake"}})
714
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"had\":\"no_mistake\"},\"2\":{\"experiences\":\"no_mistake\"},\"3\":{\"to\":\"word_choice_mistake\"},\"4\":{\"which\":\"word_choice_mistake_correction\"},\"5\":{\"support\":\"no_mistake\"},\"6\":{\"my\":\"no_mistake\"},\"7\":{\"opinion\":\"no_mistake\"},\"8\":{\"which\":\"unnecessary_word_mistake\"},\"9\":{\"a\":\"unnecessary_word_mistake\"},\"10\":{\"that\":\"missing_word_mistake\"},\"11\":{\"success\":\"no_mistake\"},\"12\":{\"in\":\"no_mistake\"},\"13\":{\"life\":\"no_mistake\"},\"14\":{\"comes\":\"no_mistake\"},\"15\":{\"from\":\"no_mistake\"},\"16\":{\"careful\":\"no_mistake\"},\"17\":{\"planning\":\"no_mistake\"},\"18\":{\".\":\"no_mistake\"}}")
715
681
  end
716
682
 
717
683
  # it 'Counts the number of mistakes' do
@@ -732,7 +698,6 @@ RSpec.describe ChatCorrect::Correct do
732
698
 
733
699
  it 'Annotates the corrections' do
734
700
  expect(@cc.correct).to eq({0=>{"token"=>"She", "type"=>"unnecessary_word_mistake"}, 1=>{"token"=>"have", "type"=>"unnecessary_word_mistake"}, 2=>{"token"=>"a", "type"=>"unnecessary_word_mistake"}, 3=>{"token"=>"good", "type"=>"unnecessary_word_mistake"}, 4=>{"token"=>"day", "type"=>"unnecessary_word_mistake"}, 5=>{"token"=>"yesterday", "type"=>"unnecessary_word_mistake"}, 6=>{"token"=>".", "type"=>"punctuation_mistake"}, 7=>{"token"=>"hello", "type"=>"missing_word_mistake"}, 8=>{"token"=>"world", "type"=>"missing_word_mistake"}})
735
- #expect(@cc.correct).to eq("{\"0\":{\"She\":\"unnecessary_word_mistake\"},\"1\":{\"have\":\"unnecessary_word_mistake\"},\"2\":{\"a\":\"unnecessary_word_mistake\"},\"3\":{\"good\":\"unnecessary_word_mistake\"},\"4\":{\"day\":\"unnecessary_word_mistake\"},\"5\":{\"yesterday\":\"unnecessary_word_mistake\"},\"6\":{\".\":\"punctuation_mistake\"},\"7\":{\"hello\":\"missing_word_mistake\"},\"8\":{\"world\":\"missing_word_mistake\"}}")
736
701
  end
737
702
 
738
703
  # it 'Counts the number of mistakes' do
@@ -753,7 +718,6 @@ RSpec.describe ChatCorrect::Correct do
753
718
 
754
719
  it 'Annotates the corrections' do
755
720
  expect(@cc.correct).to eq({0=>{"token"=>"If", "type"=>"no_mistake"}, 1=>{"token"=>"my", "type"=>"no_mistake"}, 2=>{"token"=>"school", "type"=>"no_mistake"}, 3=>{"token"=>"were located", "type"=>"verb_mistake"}, 4=>{"token"=>"had been located", "type"=>"verb_correction"}, 5=>{"token"=>"in", "type"=>"no_mistake"}, 6=>{"token"=>"Tokyo", "type"=>"no_mistake"}, 7=>{"token"=>",", "type"=>"no_mistake"}, 8=>{"token"=>"the", "type"=>"missing_word_mistake"}, 9=>{"token"=>"situation", "type"=>"no_mistake"}, 10=>{"token"=>"would have", "type"=>"verb_mistake"}, 11=>{"token"=>"would have been", "type"=>"verb_correction"}, 12=>{"token"=>"quite", "type"=>"no_mistake"}, 13=>{"token"=>"changed", "type"=>"word_choice_mistake"}, 14=>{"token"=>"different", "type"=>"word_choice_correction"}, 15=>{"token"=>".", "type"=>"no_mistake"}})
756
- #expect(@cc.correct).to eq("{\"0\":{\"If\":\"no_mistake\"},\"1\":{\"my\":\"no_mistake\"},\"2\":{\"school\":\"no_mistake\"},\"3\":{\"were located\":\"verb_mistake\"},\"4\":{\"had been located\":\"verb_mistake_correction\"},\"5\":{\"in\":\"no_mistake\"},\"6\":{\"Tokyo\":\"no_mistake\"},\"7\":{\",\":\"no_mistake\"},\"8\":{\"the\":\"missing_word_mistake\"},\"9\":{\"situation\":\"no_mistake\"},\"10\":{\"would have\":\"verb_mistake\"},\"11\":{\"would have been\":\"verb_mistake_correction\"},\"12\":{\"quite\":\"no_mistake\"},\"13\":{\"changed\":\"word_choice_mistake\"},\"14\":{\"different\":\"word_choice_mistake_correction\"},\"15\":{\".\":\"no_mistake\"}}")
757
721
  end
758
722
 
759
723
  # it 'Counts the number of mistakes' do
@@ -774,7 +738,6 @@ RSpec.describe ChatCorrect::Correct do
774
738
 
775
739
  it 'Annotates the corrections' do
776
740
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"have", "type"=>"no_mistake"}, 2=>{"token"=>"three", "type"=>"no_mistake"}, 3=>{"token"=>"child", "type"=>"pluralization_mistake"}, 4=>{"token"=>"children", "type"=>"pluralization_correction"}, 5=>{"token"=>".", "type"=>"missing_punctuation_mistake"}})
777
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"have\":\"no_mistake\"},\"2\":{\"three\":\"no_mistake\"},\"3\":{\"child\":\"pluralization_mistake\"},\"4\":{\"children\":\"pluralization_mistake_correction\"},\"5\":{\".\":\"missing_punctuation_mistake\"}}")
778
741
  end
779
742
 
780
743
  # it 'Counts the number of mistakes' do
@@ -795,7 +758,6 @@ RSpec.describe ChatCorrect::Correct do
795
758
 
796
759
  it 'Annotates the corrections' do
797
760
  expect(@cc.correct).to eq({0=>{"token"=>"is", "type"=>"capitalization_mistake"}, 1=>{"token"=>"Is", "type"=>"capitalization_correction"}, 2=>{"token"=>"the", "type"=>"no_mistake"}, 3=>{"token"=>"bag", "type"=>"no_mistake"}, 4=>{"token"=>"your", "type"=>"pluralization_mistake"}, 5=>{"token"=>"yours", "type"=>"pluralization_correction"}, 6=>{"token"=>"?", "type"=>"missing_punctuation_mistake"}})
798
- #expect(@cc.correct).to eq("{\"0\":{\"is\":\"capitalization_mistake\"},\"1\":{\"Is\":\"capitalization_mistake_correction\"},\"2\":{\"the\":\"no_mistake\"},\"3\":{\"bag\":\"no_mistake\"},\"4\":{\"your\":\"pluralization_mistake\"},\"5\":{\"yours\":\"pluralization_mistake_correction\"},\"6\":{\"?\":\"missing_punctuation_mistake\"}}")
799
761
  end
800
762
 
801
763
  # it 'Counts the number of mistakes' do
@@ -816,7 +778,6 @@ RSpec.describe ChatCorrect::Correct do
816
778
 
817
779
  it 'Annotates the corrections' do
818
780
  expect(@cc.correct).to eq({0=>{"token"=>"Is", "type"=>"no_mistake"}, 1=>{"token"=>"the", "type"=>"word_choice_mistake"}, 2=>{"token"=>"this", "type"=>"word_choice_correction"}, 3=>{"token"=>"your", "type"=>"word_order_mistake"}, 4=>{"token"=>"bag", "type"=>"word_order_mistake"}, 5=>{"token"=>"?", "type"=>"missing_punctuation_mistake"}})
819
- #expect(@cc.correct).to eq("{\"0\":{\"Is\":\"no_mistake\"},\"1\":{\"the\":\"word_choice_mistake\"},\"2\":{\"this\":\"word_choice_mistake_correction\"},\"3\":{\"your\":\"word_order_mistake\"},\"4\":{\"bag\":\"word_order_mistake\"},\"5\":{\"?\":\"missing_punctuation_mistake\"}}")
820
781
  end
821
782
 
822
783
  # it 'Counts the number of mistakes' do
@@ -837,7 +798,6 @@ RSpec.describe ChatCorrect::Correct do
837
798
 
838
799
  it 'Annotates the corrections' do
839
800
  expect(@cc.correct).to eq({0=>{"token"=>"He", "type"=>"no_mistake"}, 1=>{"token"=>"doesnt", "type"=>"punctuation_mistake"}, 2=>{"token"=>"doesn't", "type"=>"punctuation_correction"}, 3=>{"token"=>"wear", "type"=>"no_mistake"}, 4=>{"token"=>"a", "type"=>"no_mistake"}, 5=>{"token"=>"tie", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
840
- #expect(@cc.correct).to eq("{\"0\":{\"He\":\"no_mistake\"},\"1\":{\"doesnt\":\"punctuation_mistake\"},\"2\":{\"doesn't\":\"punctuation_mistake_correction\"},\"3\":{\"wear\":\"no_mistake\"},\"4\":{\"a\":\"no_mistake\"},\"5\":{\"tie\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
841
801
  end
842
802
 
843
803
  # it 'Counts the number of mistakes' do
@@ -858,7 +818,6 @@ RSpec.describe ChatCorrect::Correct do
858
818
 
859
819
  it 'Annotates the corrections' do
860
820
  expect(@cc.correct).to eq({0=>{"token"=>"Rather", "type"=>"word_choice_mistake"}, 1=>{"token"=>"More", "type"=>"word_choice_correction"}, 2=>{"token"=>"than", "type"=>"no_mistake"}, 3=>{"token"=>"the", "type"=>"unnecessary_word_mistake"}, 4=>{"token"=>"TV", "type"=>"no_mistake"}, 5=>{"token"=>",", "type"=>"no_mistake"}, 6=>{"token"=>"I", "type"=>"no_mistake"}, 7=>{"token"=>"feel", "type"=>"no_mistake"}, 8=>{"token"=>"the", "type"=>"no_mistake"}, 9=>{"token"=>"appearance", "type"=>"no_mistake"}, 10=>{"token"=>"of", "type"=>"no_mistake"}, 11=>{"token"=>"internet", "type"=>"no_mistake"}, 12=>{"token"=>"to", "type"=>"no_mistake"}, 13=>{"token"=>"be", "type"=>"no_mistake"}, 14=>{"token"=>"more", "type"=>"no_mistake"}, 15=>{"token"=>"likely", "type"=>"missing_word_mistake"}, 16=>{"token"=>"to", "type"=>"missing_word_mistake"}, 17=>{"token"=>"interrupt", "type"=>"no_mistake"}, 18=>{"token"=>"our", "type"=>"no_mistake"}, 19=>{"token"=>"communication", "type"=>"no_mistake"}, 20=>{"token"=>".", "type"=>"no_mistake"}})
861
- #expect(@cc.correct).to eq("{\"0\":{\"Rather\":\"word_choice_mistake\"},\"1\":{\"More\":\"word_choice_mistake_correction\"},\"2\":{\"than\":\"no_mistake\"},\"3\":{\"the\":\"unnecessary_word_mistake\"},\"4\":{\"TV\":\"no_mistake\"},\"5\":{\",\":\"no_mistake\"},\"6\":{\"I\":\"no_mistake\"},\"7\":{\"feel\":\"no_mistake\"},\"8\":{\"the\":\"no_mistake\"},\"9\":{\"appearance\":\"no_mistake\"},\"10\":{\"of\":\"no_mistake\"},\"11\":{\"internet\":\"no_mistake\"},\"12\":{\"to\":\"no_mistake\"},\"13\":{\"be\":\"no_mistake\"},\"14\":{\"more\":\"no_mistake\"},\"15\":{\"likely\":\"missing_word_mistake\"},\"16\":{\"to\":\"missing_word_mistake\"},\"17\":{\"interrupt\":\"no_mistake\"},\"18\":{\"our\":\"no_mistake\"},\"19\":{\"communication\":\"no_mistake\"},\"20\":{\".\":\"no_mistake\"}}")
862
821
  end
863
822
 
864
823
  # it 'Counts the number of mistakes' do
@@ -901,7 +860,6 @@ RSpec.describe ChatCorrect::Correct do
901
860
 
902
861
  it 'Annotates the corrections' do
903
862
  expect(@cc.correct).to eq({0=>{"token"=>"That", "type"=>"no_mistake"}, 1=>{"token"=>"is", "type"=>"no_mistake"}, 2=>{"token"=>"the", "type"=>"no_mistake"}, 3=>{"token"=>"reason", "type"=>"missing_word_mistake"}, 4=>{"token"=>"why", "type"=>"no_mistake"}, 5=>{"token"=>"not", "type"=>"no_mistake"}, 6=>{"token"=>"only", "type"=>"no_mistake"}, 7=>{"token"=>"we", "type"=>"no_mistake"}, 8=>{"token"=>"can", "type"=>"no_mistake"}, 9=>{"token"=>"get", "type"=>"no_mistake"}, 10=>{"token"=>"information", "type"=>"no_mistake"}, 11=>{"token"=>"from", "type"=>"no_mistake"}, 12=>{"token"=>"TV", "type"=>"no_mistake"}, 13=>{"token"=>",", "type"=>"no_mistake"}, 14=>{"token"=>"but", "type"=>"no_mistake"}, 15=>{"token"=>"also", "type"=>"no_mistake"}, 16=>{"token"=>"we", "type"=>"no_mistake"}, 17=>{"token"=>"can", "type"=>"no_mistake"}, 18=>{"token"=>"get", "type"=>"no_mistake"}, 19=>{"token"=>"information", "type"=>"no_mistake"}, 20=>{"token"=>"from", "type"=>"no_mistake"}, 21=>{"token"=>"the", "type"=>"no_mistake"}, 22=>{"token"=>"internet", "type"=>"no_mistake"}, 23=>{"token"=>"nowadays", "type"=>"no_mistake"}, 24=>{"token"=>".", "type"=>"no_mistake"}})
904
- #expect(@cc.correct).to eq("{\"0\":{\"That\":\"no_mistake\"},\"1\":{\"is\":\"no_mistake\"},\"2\":{\"the\":\"no_mistake\"},\"3\":{\"reason\":\"missing_word_mistake\"},\"4\":{\"why\":\"no_mistake\"},\"5\":{\"not\":\"no_mistake\"},\"6\":{\"only\":\"no_mistake\"},\"7\":{\"we\":\"no_mistake\"},\"8\":{\"can\":\"no_mistake\"},\"9\":{\"get\":\"no_mistake\"},\"10\":{\"information\":\"no_mistake\"},\"11\":{\"from\":\"no_mistake\"},\"12\":{\"TV\":\"no_mistake\"},\"13\":{\",\":\"no_mistake\"},\"14\":{\"but\":\"no_mistake\"},\"15\":{\"also\":\"no_mistake\"},\"16\":{\"we\":\"no_mistake\"},\"17\":{\"can\":\"no_mistake\"},\"18\":{\"get\":\"no_mistake\"},\"19\":{\"information\":\"no_mistake\"},\"20\":{\"from\":\"no_mistake\"},\"21\":{\"the\":\"no_mistake\"},\"22\":{\"internet\":\"no_mistake\"},\"23\":{\"nowadays\":\"no_mistake\"},\"24\":{\".\":\"no_mistake\"}}")
905
863
  end
906
864
 
907
865
  # it 'Counts the number of mistakes' do
@@ -922,7 +880,6 @@ RSpec.describe ChatCorrect::Correct do
922
880
 
923
881
  it 'Annotates the corrections' do
924
882
  expect(@cc.correct).to eq({0=>{"token"=>"In", "type"=>"word_choice_mistake"}, 1=>{"token"=>"On", "type"=>"word_choice_correction"}, 2=>{"token"=>"the", "type"=>"no_mistake"}, 3=>{"token"=>"other", "type"=>"no_mistake"}, 4=>{"token"=>"hand", "type"=>"no_mistake"}, 5=>{"token"=>",", "type"=>"no_mistake"}, 6=>{"token"=>"TV", "type"=>"no_mistake"}, 7=>{"token"=>"topic", "type"=>"pluralization_mistake"}, 8=>{"token"=>"topics", "type"=>"pluralization_correction"}, 9=>{"token"=>"develop", "type"=>"no_mistake"}, 10=>{"token"=>"our", "type"=>"no_mistake"}, 11=>{"token"=>"relationship", "type"=>"no_mistake"}, 12=>{"token"=>"to", "type"=>"no_mistake"}, 13=>{"token"=>"provide", "type"=>"no_mistake"}, 14=>{"token"=>"some", "type"=>"no_mistake"}, 15=>{"token"=>"topics", "type"=>"no_mistake"}, 16=>{"token"=>"such", "type"=>"no_mistake"}, 17=>{"token"=>"as", "type"=>"no_mistake"}, 18=>{"token"=>"a", "type"=>"no_mistake"}, 19=>{"token"=>"news", "type"=>"no_mistake"}, 20=>{"token"=>",", "type"=>"no_mistake"}, 21=>{"token"=>"drama", "type"=>"no_mistake"}, 22=>{"token"=>",", "type"=>"no_mistake"}, 23=>{"token"=>"comedy", "type"=>"no_mistake"}, 24=>{"token"=>",", "type"=>"no_mistake"}, 25=>{"token"=>"animation", "type"=>"no_mistake"}, 26=>{"token"=>",", "type"=>"no_mistake"}, 27=>{"token"=>"and", "type"=>"no_mistake"}, 28=>{"token"=>"so", "type"=>"no_mistake"}, 29=>{"token"=>"on", "type"=>"no_mistake"}, 30=>{"token"=>".", "type"=>"no_mistake"}})
925
- #expect(@cc.correct).to eq("{\"0\":{\"In\":\"word_choice_mistake\"},\"1\":{\"On\":\"word_choice_mistake_correction\"},\"2\":{\"the\":\"no_mistake\"},\"3\":{\"other\":\"no_mistake\"},\"4\":{\"hand\":\"no_mistake\"},\"5\":{\",\":\"no_mistake\"},\"6\":{\"TV\":\"no_mistake\"},\"7\":{\"topic\":\"pluralization_mistake\"},\"8\":{\"topics\":\"pluralization_mistake_correction\"},\"9\":{\"develop\":\"no_mistake\"},\"10\":{\"our\":\"no_mistake\"},\"11\":{\"relationship\":\"no_mistake\"},\"12\":{\"to\":\"no_mistake\"},\"13\":{\"provide\":\"no_mistake\"},\"14\":{\"some\":\"no_mistake\"},\"15\":{\"topics\":\"no_mistake\"},\"16\":{\"such\":\"no_mistake\"},\"17\":{\"as\":\"no_mistake\"},\"18\":{\"a\":\"no_mistake\"},\"19\":{\"news\":\"no_mistake\"},\"20\":{\",\":\"no_mistake\"},\"21\":{\"drama\":\"no_mistake\"},\"22\":{\",\":\"no_mistake\"},\"23\":{\"comedy\":\"no_mistake\"},\"24\":{\",\":\"no_mistake\"},\"25\":{\"animation\":\"no_mistake\"},\"26\":{\",\":\"no_mistake\"},\"27\":{\"and\":\"no_mistake\"},\"28\":{\"so\":\"no_mistake\"},\"29\":{\"on\":\"no_mistake\"},\"30\":{\".\":\"no_mistake\"}}")
926
883
  end
927
884
 
928
885
  # it 'Counts the number of mistakes' do
@@ -943,7 +900,6 @@ RSpec.describe ChatCorrect::Correct do
943
900
 
944
901
  it 'Annotates the corrections' do
945
902
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"carried", "type"=>"verb_mistake"}, 2=>{"token"=>"carry", "type"=>"verb_correction"}, 3=>{"token"=>".", "type"=>"punctuation_mistake"}})
946
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"carried\":\"verb_mistake\"},\"2\":{\"carry\":\"verb_mistake_correction\"},\"3\":{\".\":\"punctuation_mistake\"}}")
947
903
  end
948
904
 
949
905
  # it 'Counts the number of mistakes' do
@@ -964,7 +920,6 @@ RSpec.describe ChatCorrect::Correct do
964
920
 
965
921
  it 'Annotates the corrections' do
966
922
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"went", "type"=>"no_mistake"}, 2=>{"token"=>"to", "type"=>"no_mistake"}, 3=>{"token"=>"the", "type"=>"no_mistake"}, 4=>{"token"=>"store", "type"=>"no_mistake"}, 5=>{"token"=>".", "type"=>"no_mistake"}, 6=>{"token"=>"I", "type"=>"unnecessary_word_mistake"}, 7=>{"token"=>"bought", "type"=>"unnecessary_word_mistake"}, 8=>{"token"=>"some", "type"=>"unnecessary_word_mistake"}, 9=>{"token"=>"eggs", "type"=>"unnecessary_word_mistake"}, 10=>{"token"=>".", "type"=>"punctuation_mistake"}})
967
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"went\":\"no_mistake\"},\"2\":{\"to\":\"no_mistake\"},\"3\":{\"the\":\"no_mistake\"},\"4\":{\"store\":\"no_mistake\"},\"5\":{\".\":\"no_mistake\"},\"6\":{\"I\":\"unnecessary_word_mistake\"},\"7\":{\"bought\":\"unnecessary_word_mistake\"},\"8\":{\"some\":\"unnecessary_word_mistake\"},\"9\":{\"eggs\":\"unnecessary_word_mistake\"},\"10\":{\".\":\"punctuation_mistake\"}}")
968
923
  end
969
924
 
970
925
  # it 'Counts the number of mistakes' do
@@ -985,7 +940,6 @@ RSpec.describe ChatCorrect::Correct do
985
940
 
986
941
  it 'Annotates the corrections' do
987
942
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"missing_word_mistake"}, 1=>{"token"=>"can", "type"=>"word_order_mistake"}, 2=>{"token"=>"do", "type"=>"missing_word_mistake"}, 3=>{"token"=>"make", "type"=>"unnecessary_word_mistake"}, 4=>{"token"=>"a", "type"=>"word_order_mistake"}, 5=>{"token"=>"lot", "type"=>"word_order_mistake"}, 6=>{"token"=>"of", "type"=>"word_order_mistake"}, 7=>{"token"=>"things", "type"=>"word_order_mistake"}, 8=>{"token"=>"with", "type"=>"missing_word_mistake"}, 9=>{"token"=>"the", "type"=>"word_order_mistake"}, 10=>{"token"=>"laptop", "type"=>"word_order_mistake"}, 11=>{"token"=>"such", "type"=>"no_mistake"}, 12=>{"token"=>"as", "type"=>"no_mistake"}, 13=>{"token"=>"a", "type"=>"unnecessary_word_mistake"}, 14=>{"token"=>"searching", "type"=>"no_mistake"}, 15=>{"token"=>"knowledge", "type"=>"no_mistake"}, 16=>{"token"=>",", "type"=>"no_mistake"}, 17=>{"token"=>"writing", "type"=>"no_mistake"}, 18=>{"token"=>"down", "type"=>"no_mistake"}, 19=>{"token"=>"some", "type"=>"no_mistake"}, 20=>{"token"=>"memo", "type"=>"pluralization_mistake"}, 21=>{"token"=>"memos", "type"=>"pluralization_correction"}, 22=>{"token"=>",", "type"=>"no_mistake"}, 23=>{"token"=>"watching", "type"=>"no_mistake"}, 24=>{"token"=>"a", "type"=>"no_mistake"}, 25=>{"token"=>"movie", "type"=>"no_mistake"}, 26=>{"token"=>"and", "type"=>"no_mistake"}, 27=>{"token"=>"so", "type"=>"no_mistake"}, 28=>{"token"=>"on", "type"=>"no_mistake"}, 29=>{"token"=>".", "type"=>"no_mistake"}})
988
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"missing_word_mistake\"},\"1\":{\"can\":\"word_order_mistake\"},\"2\":{\"do\":\"missing_word_mistake\"},\"3\":{\"make\":\"unnecessary_word_mistake\"},\"4\":{\"a\":\"word_order_mistake\"},\"5\":{\"lot\":\"word_order_mistake\"},\"6\":{\"of\":\"word_order_mistake\"},\"7\":{\"things\":\"word_order_mistake\"},\"8\":{\"with\":\"missing_word_mistake\"},\"9\":{\"the\":\"word_order_mistake\"},\"10\":{\"laptop\":\"word_order_mistake\"},\"11\":{\"such\":\"no_mistake\"},\"12\":{\"as\":\"no_mistake\"},\"13\":{\"a\":\"unnecessary_word_mistake\"},\"14\":{\"searching\":\"no_mistake\"},\"15\":{\"knowledge\":\"no_mistake\"},\"16\":{\",\":\"no_mistake\"},\"17\":{\"writing\":\"no_mistake\"},\"18\":{\"down\":\"no_mistake\"},\"19\":{\"some\":\"no_mistake\"},\"20\":{\"memo\":\"pluralization_mistake\"},\"21\":{\"memos\":\"pluralization_mistake_correction\"},\"22\":{\",\":\"no_mistake\"},\"23\":{\"watching\":\"no_mistake\"},\"24\":{\"a\":\"no_mistake\"},\"25\":{\"movie\":\"no_mistake\"},\"26\":{\"and\":\"no_mistake\"},\"27\":{\"so\":\"no_mistake\"},\"28\":{\"on\":\"no_mistake\"},\"29\":{\".\":\"no_mistake\"}}")
989
943
  end
990
944
 
991
945
  # it 'Counts the number of mistakes' do
@@ -1006,7 +960,6 @@ RSpec.describe ChatCorrect::Correct do
1006
960
 
1007
961
  it 'Annotates the corrections' do
1008
962
  expect(@cc.correct).to eq({0=>{"token"=>"Actually", "type"=>"no_mistake"}, 1=>{"token"=>"when", "type"=>"unnecessary_word_mistake"}, 2=>{"token"=>",", "type"=>"missing_punctuation_mistake"}, 3=>{"token"=>"I", "type"=>"no_mistake"}, 4=>{"token"=>"attended", "type"=>"no_mistake"}, 5=>{"token"=>"the", "type"=>"no_mistake"}, 6=>{"token"=>"international", "type"=>"no_mistake"}, 7=>{"token"=>"meeting", "type"=>"no_mistake"}, 8=>{"token"=>",", "type"=>"punctuation_mistake"}, 9=>{"token"=>"or", "type"=>"unnecessary_word_mistake"}, 10=>{"token"=>"for", "type"=>"missing_word_mistake"}, 11=>{"token"=>"the", "type"=>"missing_word_mistake"}, 12=>{"token"=>"American", "type"=>"no_mistake"}, 13=>{"token"=>"society", "type"=>"capitalization_mistake"}, 14=>{"token"=>"Society", "type"=>"capitalization_correction"}, 15=>{"token"=>"for", "type"=>"no_mistake"}, 16=>{"token"=>"surgery", "type"=>"no_mistake"}, 17=>{"token"=>"of", "type"=>"no_mistake"}, 18=>{"token"=>"hand", "type"=>"no_mistake"}, 19=>{"token"=>",", "type"=>"punctuation_mistake"}, 20=>{"token"=>"at", "type"=>"unnecessary_word_mistake"}, 21=>{"token"=>"while", "type"=>"missing_word_mistake"}, 22=>{"token"=>"I", "type"=>"missing_word_mistake"}, 23=>{"token"=>"was", "type"=>"missing_word_mistake"}, 24=>{"token"=>"on", "type"=>"missing_word_mistake"}, 25=>{"token"=>"the", "type"=>"no_mistake"}, 26=>{"token"=>"trip", "type"=>"no_mistake"}, 27=>{"token"=>".", "type"=>"no_mistake"}})
1009
- #expect(@cc.correct).to eq("{\"0\":{\"Actually\":\"no_mistake\"},\"1\":{\"when\":\"unnecessary_word_mistake\"},\"2\":{\",\":\"missing_punctuation_mistake\"},\"3\":{\"I\":\"no_mistake\"},\"4\":{\"attended\":\"no_mistake\"},\"5\":{\"the\":\"no_mistake\"},\"6\":{\"international\":\"no_mistake\"},\"7\":{\"meeting\":\"no_mistake\"},\"8\":{\",\":\"punctuation_mistake\"},\"9\":{\"or\":\"unnecessary_word_mistake\"},\"10\":{\"for\":\"missing_word_mistake\"},\"11\":{\"the\":\"missing_word_mistake\"},\"12\":{\"American\":\"no_mistake\"},\"13\":{\"society\":\"capitalization_mistake\"},\"14\":{\"Society\":\"capitalization_mistake_correction\"},\"15\":{\"for\":\"no_mistake\"},\"16\":{\"surgery\":\"no_mistake\"},\"17\":{\"of\":\"no_mistake\"},\"18\":{\"hand\":\"no_mistake\"},\"19\":{\",\":\"punctuation_mistake\"},\"20\":{\"at\":\"unnecessary_word_mistake\"},\"21\":{\"while\":\"missing_word_mistake\"},\"22\":{\"I\":\"missing_word_mistake\"},\"23\":{\"was\":\"missing_word_mistake\"},\"24\":{\"on\":\"missing_word_mistake\"},\"25\":{\"the\":\"no_mistake\"},\"26\":{\"trip\":\"no_mistake\"},\"27\":{\".\":\"no_mistake\"}}")
1010
963
  end
1011
964
 
1012
965
  # it 'Counts the number of mistakes' do
@@ -1048,7 +1001,6 @@ RSpec.describe ChatCorrect::Correct do
1048
1001
 
1049
1002
  it 'Annotates the corrections' do
1050
1003
  expect(@cc.correct).to eq({0=>{"token"=>"This", "type"=>"no_mistake"}, 1=>{"token"=>"year", "type"=>"no_mistake"}, 2=>{"token"=>",", "type"=>"no_mistake"}, 3=>{"token"=>"going", "type"=>"no_mistake"}, 4=>{"token"=>"back", "type"=>"no_mistake"}, 5=>{"token"=>"to", "type"=>"no_mistake"}, 6=>{"token"=>"my", "type"=>"unnecessary_word_mistake"}, 7=>{"token"=>"work", "type"=>"no_mistake"}, 8=>{"token"=>"at", "type"=>"no_mistake"}, 9=>{"token"=>"my", "type"=>"no_mistake"}, 10=>{"token"=>"university", "type"=>"no_mistake"}, 11=>{"token"=>",", "type"=>"no_mistake"}, 12=>{"token"=>"I", "type"=>"no_mistake"}, 13=>{"token"=>"have", "type"=>"no_mistake"}, 14=>{"token"=>"less", "type"=>"no_mistake"}, 15=>{"token"=>"time", "type"=>"unnecessary_word_mistake"}, 16=>{"token"=>"to", "type"=>"unnecessary_word_mistake"}, 17=>{"token"=>"spend", "type"=>"unnecessary_word_mistake"}, 18=>{"token"=>"my", "type"=>"unnecessary_word_mistake"}, 19=>{"token"=>"private", "type"=>"no_mistake"}, 20=>{"token"=>"time", "type"=>"no_mistake"}, 21=>{"token"=>",", "type"=>"no_mistake"}, 22=>{"token"=>"for", "type"=>"no_mistake"}, 23=>{"token"=>"example", "type"=>"no_mistake"}, 24=>{"token"=>",", "type"=>"no_mistake"}, 25=>{"token"=>"morning", "type"=>"no_mistake"}, 26=>{"token"=>"conference", "type"=>"no_mistake"}, 27=>{"token"=>"at", "type"=>"no_mistake"}, 28=>{"token"=>"7:10", "type"=>"no_mistake"}, 29=>{"token"=>"on", "type"=>"no_mistake"}, 30=>{"token"=>"Tuesday", "type"=>"no_mistake"}, 31=>{"token"=>",", "type"=>"no_mistake"}, 32=>{"token"=>"at", "type"=>"no_mistake"}, 33=>{"token"=>"7:30", "type"=>"no_mistake"}, 34=>{"token"=>"on", "type"=>"no_mistake"}, 35=>{"token"=>"Thursday", "type"=>"no_mistake"}, 36=>{"token"=>",", "type"=>"no_mistake"}, 37=>{"token"=>"preparation", "type"=>"no_mistake"}, 38=>{"token"=>"for", "type"=>"no_mistake"}, 39=>{"token"=>"operation", "type"=>"pluralization_mistake"}, 40=>{"token"=>"operations", "type"=>"pluralization_correction"}, 41=>{"token"=>",", "type"=>"no_mistake"}, 42=>{"token"=>"meeting", "type"=>"pluralization_mistake"}, 43=>{"token"=>"meetings", "type"=>"pluralization_correction"}, 44=>{"token"=>",", "type"=>"no_mistake"}, 45=>{"token"=>"work", "type"=>"no_mistake"}, 46=>{"token"=>"for", "type"=>"no_mistake"}, 47=>{"token"=>"outpatients", "type"=>"no_mistake"}, 48=>{"token"=>"and", "type"=>"no_mistake"}, 49=>{"token"=>"so", "type"=>"no_mistake"}, 50=>{"token"=>"on", "type"=>"no_mistake"}, 51=>{"token"=>".", "type"=>"no_mistake"}})
1051
- #expect(@cc.correct).to eq("{\"0\":{\"This\":\"no_mistake\"},\"1\":{\"year\":\"no_mistake\"},\"2\":{\",\":\"no_mistake\"},\"3\":{\"going\":\"no_mistake\"},\"4\":{\"back\":\"no_mistake\"},\"5\":{\"to\":\"no_mistake\"},\"6\":{\"my\":\"unnecessary_word_mistake\"},\"7\":{\"work\":\"no_mistake\"},\"8\":{\"at\":\"no_mistake\"},\"9\":{\"my\":\"no_mistake\"},\"10\":{\"university\":\"no_mistake\"},\"11\":{\",\":\"no_mistake\"},\"12\":{\"I\":\"no_mistake\"},\"13\":{\"have\":\"no_mistake\"},\"14\":{\"less\":\"no_mistake\"},\"15\":{\"time\":\"unnecessary_word_mistake\"},\"16\":{\"to\":\"unnecessary_word_mistake\"},\"17\":{\"spend\":\"unnecessary_word_mistake\"},\"18\":{\"my\":\"unnecessary_word_mistake\"},\"19\":{\"private\":\"no_mistake\"},\"20\":{\"time\":\"no_mistake\"},\"21\":{\",\":\"no_mistake\"},\"22\":{\"for\":\"no_mistake\"},\"23\":{\"example\":\"no_mistake\"},\"24\":{\",\":\"no_mistake\"},\"25\":{\"morning\":\"no_mistake\"},\"26\":{\"conference\":\"no_mistake\"},\"27\":{\"at\":\"no_mistake\"},\"28\":{\"7:10\":\"no_mistake\"},\"29\":{\"on\":\"no_mistake\"},\"30\":{\"Tuesday\":\"no_mistake\"},\"31\":{\",\":\"no_mistake\"},\"32\":{\"at\":\"no_mistake\"},\"33\":{\"7:30\":\"no_mistake\"},\"34\":{\"on\":\"no_mistake\"},\"35\":{\"Thursday\":\"no_mistake\"},\"36\":{\",\":\"no_mistake\"},\"37\":{\"preparation\":\"no_mistake\"},\"38\":{\"for\":\"no_mistake\"},\"39\":{\"operation\":\"pluralization_mistake\"},\"40\":{\"operations\":\"pluralization_mistake_correction\"},\"41\":{\",\":\"no_mistake\"},\"42\":{\"meeting\":\"pluralization_mistake\"},\"43\":{\"meetings\":\"pluralization_mistake_correction\"},\"44\":{\",\":\"no_mistake\"},\"45\":{\"work\":\"no_mistake\"},\"46\":{\"for\":\"no_mistake\"},\"47\":{\"outpatients\":\"no_mistake\"},\"48\":{\"and\":\"no_mistake\"},\"49\":{\"so\":\"no_mistake\"},\"50\":{\"on\":\"no_mistake\"},\"51\":{\".\":\"no_mistake\"}}")
1052
1004
  end
1053
1005
 
1054
1006
  # it 'Counts the number of mistakes' do
@@ -1090,7 +1042,6 @@ RSpec.describe ChatCorrect::Correct do
1090
1042
 
1091
1043
  it 'Annotates the corrections' do
1092
1044
  expect(@cc.correct).to eq({0=>{"token"=>"Her", "type"=>"no_mistake"}, 1=>{"token"=>"name", "type"=>"no_mistake"}, 2=>{"token"=>"was", "type"=>"verb_mistake"}, 3=>{"token"=>"is", "type"=>"verb_correction"}, 4=>{"token"=>"Dr.", "type"=>"no_mistake"}, 5=>{"token"=>"Cole", "type"=>"no_mistake"}, 6=>{"token"=>".", "type"=>"no_mistake"}})
1093
- #expect(@cc.correct).to eq("{\"0\":{\"Her\":\"no_mistake\"},\"1\":{\"name\":\"no_mistake\"},\"2\":{\"was\":\"verb_mistake\"},\"3\":{\"is\":\"verb_mistake_correction\"},\"4\":{\"Dr.\":\"no_mistake\"},\"5\":{\"Cole\":\"no_mistake\"},\"6\":{\".\":\"no_mistake\"}}")
1094
1045
  end
1095
1046
 
1096
1047
  # it 'Counts the number of mistakes' do
@@ -1111,7 +1062,6 @@ RSpec.describe ChatCorrect::Correct do
1111
1062
 
1112
1063
  it 'Annotates the corrections' do
1113
1064
  expect(@cc.correct).to eq({0=>{"token"=>"Because", "type"=>"no_mistake"}, 1=>{"token"=>"of", "type"=>"missing_word_mistake"}, 2=>{"token"=>"gravity", "type"=>"no_mistake"}, 3=>{"token"=>".", "type"=>"punctuation_mistake"}})
1114
- #expect(@cc.correct).to eq("{\"0\":{\"Because\":\"no_mistake\"},\"1\":{\"of\":\"missing_word_mistake\"},\"2\":{\"gravity\":\"no_mistake\"},\"3\":{\".\":\"punctuation_mistake\"}}")
1115
1065
  end
1116
1066
 
1117
1067
  # it 'Counts the number of mistakes' do
@@ -1132,7 +1082,6 @@ RSpec.describe ChatCorrect::Correct do
1132
1082
 
1133
1083
  it 'Annotates the corrections' do
1134
1084
  expect(@cc.correct).to eq({0=>{"token"=>"I", "type"=>"no_mistake"}, 1=>{"token"=>"gots", "type"=>"verb_mistake"}, 2=>{"token"=>"have", "type"=>"verb_correction"}, 3=>{"token"=>"a", "type"=>"no_mistake"}, 4=>{"token"=>"dog", "type"=>"no_mistake"}, 5=>{"token"=>".", "type"=>"no_mistake"}})
1135
- #expect(@cc.correct).to eq("{\"0\":{\"I\":\"no_mistake\"},\"1\":{\"gots\":\"verb_mistake\"},\"2\":{\"have\":\"verb_mistake_correction\"},\"3\":{\"a\":\"no_mistake\"},\"4\":{\"dog\":\"no_mistake\"},\"5\":{\".\":\"no_mistake\"}}")
1136
1085
  end
1137
1086
 
1138
1087
  # it 'Counts the number of mistakes' do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: chat_correct
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.2
4
+ version: 0.0.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Kevin S. Dias
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-02-05 00:00:00.000000000 Z
11
+ date: 2015-02-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler