srl_ruby 0.4.13 → 0.4.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,6 @@ require_relative '../spec_helper'
4
4
 
5
5
  RSpec.describe SrlRuby do
6
6
  it 'has a version number' do
7
- expect(SrlRuby::VERSION).not_to be nil
7
+ expect(SrlRuby::VERSION).not_to be_nil
8
8
  end
9
9
  end
@@ -13,81 +13,81 @@ module SrlRuby
13
13
  end
14
14
  end
15
15
 
16
- subject { Tokenizer.new('') }
16
+ subject(:tokenizer) { described_class.new('') }
17
17
 
18
18
  context 'Initialization:' do
19
- it 'should be initialized with a text to tokenize' do
20
- expect { Tokenizer.new('anything') }.not_to raise_error
19
+ it 'is initialized with a text to tokenize' do
20
+ expect { described_class.new('anything') }.not_to raise_error
21
21
  end
22
22
 
23
- it 'should have its scanner initialized' do
24
- expect(subject.scanner).to be_kind_of(StringScanner)
23
+ it 'has its scanner initialized' do
24
+ expect(tokenizer.scanner).to be_a(StringScanner)
25
25
  end
26
26
  end # context
27
27
 
28
28
  context 'Single token recognition:' do
29
- it 'should tokenize delimiters and separators' do
30
- subject.scanner.string = ','
31
- token = subject.tokens.first
32
- expect(token).to be_kind_of(Rley::Lexical::Token)
29
+ it 'tokenizes delimiters and separators' do
30
+ tokenizer.scanner.string = ','
31
+ token = tokenizer.tokens.first
32
+ expect(token).to be_a(Rley::Lexical::Token)
33
33
  expect(token.terminal).to eq('COMMA')
34
34
  expect(token.lexeme).to eq(',')
35
35
  end
36
36
 
37
- it 'should tokenize keywords' do
37
+ it 'tokenizes keywords' do
38
38
  sample = 'between Exactly oncE optional TWICE'
39
- subject.scanner.string = sample
40
- subject.tokens.each do |tok|
41
- expect(tok).to be_kind_of(Rley::Lexical::Token)
39
+ tokenizer.scanner.string = sample
40
+ tokenizer.tokens.each do |tok|
41
+ expect(tok).to be_a(Rley::Lexical::Token)
42
42
  expect(tok.terminal).to eq(tok.lexeme.upcase)
43
43
  end
44
44
  end
45
45
 
46
- it 'should tokenize integer values' do
47
- subject.scanner.string = ' 123 '
48
- token = subject.tokens.first
49
- expect(token).to be_kind_of(Rley::Lexical::Token)
46
+ it 'tokenizes integer values' do
47
+ tokenizer.scanner.string = ' 123 '
48
+ token = tokenizer.tokens.first
49
+ expect(token).to be_a(Rley::Lexical::Token)
50
50
  expect(token.terminal).to eq('INTEGER')
51
51
  expect(token.lexeme).to eq('123')
52
52
  end
53
53
 
54
- it 'should tokenize single digits' do
55
- subject.scanner.string = ' 1 '
56
- token = subject.tokens.first
57
- expect(token).to be_kind_of(Rley::Lexical::Token)
54
+ it 'tokenizes single digits' do
55
+ tokenizer.scanner.string = ' 1 '
56
+ token = tokenizer.tokens.first
57
+ expect(token).to be_a(Rley::Lexical::Token)
58
58
  expect(token.terminal).to eq('DIGIT_LIT')
59
59
  expect(token.lexeme).to eq('1')
60
60
  end
61
61
  end # context
62
62
 
63
63
  context 'String literal tokenization:' do
64
- it "should recognize 'literally ...'" do
64
+ it "Recognizes 'literally ...'" do
65
65
  input = 'literally "hello"'
66
- subject.scanner.string = input
66
+ tokenizer.scanner.string = input
67
67
  expectations = [
68
68
  %w[LITERALLY literally],
69
69
  %w[STRING_LIT hello]
70
70
  ]
71
- match_expectations(subject, expectations)
71
+ match_expectations(tokenizer, expectations)
72
72
  end
73
73
  end # context
74
74
 
75
75
  context 'Character range tokenization:' do
76
- it "should recognize 'letter from ... to ...'" do
76
+ it "recognizes 'letter from ... to ...'" do
77
77
  input = 'letter a to f'
78
- subject.scanner.string = input
78
+ tokenizer.scanner.string = input
79
79
  expectations = [
80
80
  %w[LETTER letter],
81
81
  %w[LETTER_LIT a],
82
82
  %w[TO to],
83
83
  %w[LETTER_LIT f]
84
84
  ]
85
- match_expectations(subject, expectations)
85
+ match_expectations(tokenizer, expectations)
86
86
  end
87
87
 
88
- it "should recognize 'letter from ... to ... followed by comma'" do
88
+ it "recognizes 'letter from ... to ... followed by comma'" do
89
89
  input = 'letter a to f,'
90
- subject.scanner.string = input
90
+ tokenizer.scanner.string = input
91
91
  expectations = [
92
92
  %w[LETTER letter],
93
93
  %w[LETTER_LIT a],
@@ -95,25 +95,25 @@ module SrlRuby
95
95
  %w[LETTER_LIT f],
96
96
  %w[COMMA ,]
97
97
  ]
98
- match_expectations(subject, expectations)
98
+ match_expectations(tokenizer, expectations)
99
99
  end
100
100
  end # context
101
101
 
102
102
  context 'Quantifier tokenization:' do
103
- it "should recognize 'exactly ... times'" do
103
+ it "recognizes 'exactly ... times'" do
104
104
  input = 'exactly 4 Times'
105
- subject.scanner.string = input
105
+ tokenizer.scanner.string = input
106
106
  expectations = [
107
107
  %w[EXACTLY exactly],
108
108
  %w[DIGIT_LIT 4],
109
109
  %w[TIMES Times]
110
110
  ]
111
- match_expectations(subject, expectations)
111
+ match_expectations(tokenizer, expectations)
112
112
  end
113
113
 
114
- it "should recognize 'between ... and ... times'" do
114
+ it "recognizes 'between ... and ... times'" do
115
115
  input = 'Between 2 AND 4 times'
116
- subject.scanner.string = input
116
+ tokenizer.scanner.string = input
117
117
  expectations = [
118
118
  %w[BETWEEN Between],
119
119
  %w[DIGIT_LIT 2],
@@ -121,41 +121,41 @@ module SrlRuby
121
121
  %w[DIGIT_LIT 4],
122
122
  %w[TIMES times]
123
123
  ]
124
- match_expectations(subject, expectations)
124
+ match_expectations(tokenizer, expectations)
125
125
  end
126
126
 
127
- it "should recognize 'once or more'" do
127
+ it "recognizes 'once or more'" do
128
128
  input = 'Once or MORE'
129
- subject.scanner.string = input
129
+ tokenizer.scanner.string = input
130
130
  expectations = [
131
131
  %w[ONCE Once],
132
132
  %w[OR or],
133
133
  %w[MORE MORE]
134
134
  ]
135
- match_expectations(subject, expectations)
135
+ match_expectations(tokenizer, expectations)
136
136
  end
137
137
 
138
- it "should recognize 'never or more'" do
138
+ it "recognizes 'never or more'" do
139
139
  input = 'never or more'
140
- subject.scanner.string = input
140
+ tokenizer.scanner.string = input
141
141
  expectations = [
142
142
  %w[NEVER never],
143
143
  %w[OR or],
144
144
  %w[MORE more]
145
145
  ]
146
- match_expectations(subject, expectations)
146
+ match_expectations(tokenizer, expectations)
147
147
  end
148
148
 
149
- it "should recognize 'at least ... times'" do
149
+ it "recognizes 'at least ... times'" do
150
150
  input = 'at least 10 times'
151
- subject.scanner.string = input
151
+ tokenizer.scanner.string = input
152
152
  expectations = [
153
153
  %w[AT at],
154
154
  %w[LEAST least],
155
155
  %w[INTEGER 10],
156
156
  %w[TIMES times]
157
157
  ]
158
- match_expectations(subject, expectations)
158
+ match_expectations(tokenizer, expectations)
159
159
  end
160
160
  end # context
161
161
  end # describe