srl_ruby 0.4.13 → 0.4.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,25 +9,26 @@ require_relative '../../lib/regex/match_option'
9
9
  module Regex # This module is used as a namespace
10
10
  describe MatchOption do
11
11
  let(:sample_child) { double('fake-child') }
12
- subject { MatchOption.new(sample_child, [Regexp::MULTILINE, Regexp::IGNORECASE]) }
12
+
13
+ subject(:option) { described_class.new(sample_child, [Regexp::MULTILINE, Regexp::IGNORECASE]) }
13
14
 
14
15
  context 'Creation & initialisation' do
15
- it 'should be created with a child and flags' do
16
- expect { MatchOption.new(sample_child, []) }.not_to raise_error
16
+ it 'is created with a child and flags' do
17
+ expect { described_class.new(sample_child, []) }.not_to raise_error
17
18
  end
18
19
 
19
- it 'should know its child' do
20
- expect(subject.child).to eq(sample_child)
20
+ it 'Knows its child' do
21
+ expect(option.child).to eq(sample_child)
21
22
  end
22
23
 
23
- it 'should know its flags' do
24
- expect(subject.flags).to eq([Regexp::MULTILINE, Regexp::IGNORECASE])
24
+ it 'Knows its flags' do
25
+ expect(option.flags).to eq([Regexp::MULTILINE, Regexp::IGNORECASE])
25
26
  end
26
27
  end # context
27
28
 
28
29
  context 'Provided services' do
29
- it 'should combine the flag bits' do
30
- expect(subject.combine_opts).to eq(Regexp::MULTILINE | Regexp::IGNORECASE)
30
+ it 'Combines the flag bits' do
31
+ expect(option.combine_opts).to eq(Regexp::MULTILINE | Regexp::IGNORECASE)
31
32
  end
32
33
  end # context
33
34
  end # describe
@@ -8,31 +8,32 @@ require_relative '../../lib/regex/monadic_expression'
8
8
  module Regex # This module is used as a namespace
9
9
  describe MonadicExpression do
10
10
  let(:sample_child) { double('fake_regex') }
11
- subject { MonadicExpression.new(sample_child) }
11
+
12
+ subject(:monadic_expr) { described_class.new(sample_child) }
12
13
 
13
14
  context 'Creation & initialisation' do
14
- it 'should be created with a child expression' do
15
- expect { MonadicExpression.new(sample_child) }.not_to raise_error
15
+ it 'is created with a child expression' do
16
+ expect { described_class.new(sample_child) }.not_to raise_error
16
17
  end
17
18
 
18
- it 'should know its child' do
19
- expect(subject.child).to eq(sample_child)
19
+ it 'Knows its child' do
20
+ expect(monadic_expr.child).to eq(sample_child)
20
21
  end
21
22
 
22
- it 'should know that it is not atomic' do
23
- expect(subject).not_to be_atomic
23
+ it 'Know that it is not atomic' do
24
+ expect(monadic_expr).not_to be_atomic
24
25
  end
25
26
  end # context
26
27
 
27
28
  context 'Provided services' do
28
- it 'should propagate done! notification' do
29
- expect(sample_child).to receive(:done!)
30
- expect { subject.done! }.not_to raise_error
29
+ it 'Propagates done! notification' do
30
+ allow(sample_child).to receive(:done!)
31
+ expect { monadic_expr.done! }.not_to raise_error
31
32
  end
32
33
 
33
- it 'should be unchanged by a lazy! notification' do
34
- expect(sample_child).to receive(:lazy!)
35
- expect { subject.lazy! }.not_to raise_error
34
+ it 'is unchanged by a lazy! notification' do
35
+ allow(sample_child).to receive(:lazy!)
36
+ expect { monadic_expr.lazy! }.not_to raise_error
36
37
  end
37
38
  end # context
38
39
  end # describe
@@ -9,39 +9,39 @@ require_relative '../../lib/regex/multiplicity'
9
9
  module Regex # This module is used as a namespace
10
10
  describe Multiplicity do
11
11
  context 'Creation & initialisation' do
12
- it 'should be created with 3 arguments' do
12
+ it 'is created with 3 arguments' do
13
13
  # Valid cases: initialized with two integer values and a policy symbol
14
14
  %i[greedy lazy possessive].each do |policy|
15
- expect { Multiplicity.new(0, 1, policy) }.not_to raise_error
15
+ expect { described_class.new(0, 1, policy) }.not_to raise_error
16
16
  end
17
17
 
18
18
  # Invalid case: initialized with invalid policy value
19
19
  err = StandardError
20
20
  msg = "Invalid repetition policy 'KO'."
21
- expect { Multiplicity.new(0, :more, 'KO') }.to raise_error(err, msg)
21
+ expect { described_class.new(0, :more, 'KO') }.to raise_error(err, msg)
22
22
  end
23
23
  end
24
24
 
25
25
  context 'Provided services' do
26
26
  # rubocop: disable Style/CombinableLoops
27
- it 'should know its text representation' do
27
+ it 'Knows its text representation' do
28
28
  policy2text = { greedy: '', lazy: '?', possessive: '+' }
29
29
 
30
30
  # Case: zero or one
31
31
  policy2text.each_key do |policy|
32
- multi = Multiplicity.new(0, 1, policy)
32
+ multi = described_class.new(0, 1, policy)
33
33
  expect(multi.to_str).to eq("?#{policy2text[policy]}")
34
34
  end
35
35
 
36
36
  # Case: zero or more
37
37
  policy2text.each_key do |policy|
38
- multi = Multiplicity.new(0, :more, policy)
38
+ multi = described_class.new(0, :more, policy)
39
39
  expect(multi.to_str).to eq("*#{policy2text[policy]}")
40
40
  end
41
41
 
42
42
  # Case: one or more
43
43
  policy2text.each_key do |policy|
44
- multi = Multiplicity.new(1, :more, policy)
44
+ multi = described_class.new(1, :more, policy)
45
45
  expect(multi.to_str).to eq("+#{policy2text[policy]}")
46
46
  end
47
47
 
@@ -49,7 +49,7 @@ module Regex # This module is used as a namespace
49
49
  policy2text.each_key do |policy|
50
50
  samples = [1, 2, 5, 100]
51
51
  samples.each do |count|
52
- multi = Multiplicity.new(count, count, policy)
52
+ multi = described_class.new(count, count, policy)
53
53
  expect(multi.to_str).to eq("{#{count}}#{policy2text[policy]}")
54
54
  end
55
55
  end
@@ -59,7 +59,7 @@ module Regex # This module is used as a namespace
59
59
  samples = [1, 2, 5, 100]
60
60
  samples.each do |count|
61
61
  upper = count + 1 + rand(20)
62
- multi = Multiplicity.new(count, upper, policy)
62
+ multi = described_class.new(count, upper, policy)
63
63
  expectation = "{#{count},#{upper}}#{policy2text[policy]}"
64
64
  expect(multi.to_str).to eq(expectation)
65
65
  end
@@ -69,7 +69,7 @@ module Regex # This module is used as a namespace
69
69
  policy2text.each_key do |policy|
70
70
  samples = [2, 3, 5, 100]
71
71
  samples.each do |count|
72
- multi = Multiplicity.new(count, :more, policy)
72
+ multi = described_class.new(count, :more, policy)
73
73
  expect(multi.to_str).to eq("{#{count},}#{policy2text[policy]}")
74
74
  end
75
75
  end
@@ -9,23 +9,24 @@ module Regex # This module is used as a namespace
9
9
  describe Repetition do
10
10
  let(:optional) { Multiplicity.new(0, 1, :possessive) }
11
11
  let(:sample_child) { double('fake_regex') }
12
- subject { Repetition.new(sample_child, optional) }
12
+
13
+ subject(:repetition) { described_class.new(sample_child, optional) }
13
14
 
14
15
  context 'Creation & initialisation' do
15
- it 'should be created with a child expression and a multiplicity' do
16
- expect { Repetition.new(sample_child, optional) }.not_to raise_error
16
+ it 'is created with a child expression and a multiplicity' do
17
+ expect { described_class.new(sample_child, optional) }.not_to raise_error
17
18
  end
18
19
 
19
- it 'should its multiplicity' do
20
- expect(subject.multiplicity).to eq(optional)
20
+ it 'Knows its multiplicity' do
21
+ expect(repetition.multiplicity).to eq(optional)
21
22
  end
22
23
  end # context
23
24
 
24
25
  context 'Provided services' do
25
- it 'should change its policy with lazy! notification' do
26
- expect(sample_child).to receive(:lazy!)
27
- expect { subject.lazy! }.not_to raise_error
28
- expect(subject.multiplicity.policy).to eq(:lazy)
26
+ it 'Changes its policy with lazy! notification' do
27
+ allow(sample_child).to receive(:lazy!)
28
+ expect { repetition.lazy! }.not_to raise_error
29
+ expect(repetition.multiplicity.policy).to eq(:lazy)
29
30
  end
30
31
  end # context
31
32
  end # describe
@@ -4,6 +4,6 @@ require_relative '../spec_helper'
4
4
 
5
5
  RSpec.describe SrlRuby do
6
6
  it 'has a version number' do
7
- expect(SrlRuby::VERSION).not_to be nil
7
+ expect(SrlRuby::VERSION).not_to be_nil
8
8
  end
9
9
  end
@@ -13,81 +13,81 @@ module SrlRuby
13
13
  end
14
14
  end
15
15
 
16
- subject { Tokenizer.new('') }
16
+ subject(:tokenizer) { described_class.new('') }
17
17
 
18
18
  context 'Initialization:' do
19
- it 'should be initialized with a text to tokenize' do
20
- expect { Tokenizer.new('anything') }.not_to raise_error
19
+ it 'is initialized with a text to tokenize' do
20
+ expect { described_class.new('anything') }.not_to raise_error
21
21
  end
22
22
 
23
- it 'should have its scanner initialized' do
24
- expect(subject.scanner).to be_kind_of(StringScanner)
23
+ it 'has its scanner initialized' do
24
+ expect(tokenizer.scanner).to be_a(StringScanner)
25
25
  end
26
26
  end # context
27
27
 
28
28
  context 'Single token recognition:' do
29
- it 'should tokenize delimiters and separators' do
30
- subject.scanner.string = ','
31
- token = subject.tokens.first
32
- expect(token).to be_kind_of(Rley::Lexical::Token)
29
+ it 'tokenizes delimiters and separators' do
30
+ tokenizer.scanner.string = ','
31
+ token = tokenizer.tokens.first
32
+ expect(token).to be_a(Rley::Lexical::Token)
33
33
  expect(token.terminal).to eq('COMMA')
34
34
  expect(token.lexeme).to eq(',')
35
35
  end
36
36
 
37
- it 'should tokenize keywords' do
37
+ it 'tokenizes keywords' do
38
38
  sample = 'between Exactly oncE optional TWICE'
39
- subject.scanner.string = sample
40
- subject.tokens.each do |tok|
41
- expect(tok).to be_kind_of(Rley::Lexical::Token)
39
+ tokenizer.scanner.string = sample
40
+ tokenizer.tokens.each do |tok|
41
+ expect(tok).to be_a(Rley::Lexical::Token)
42
42
  expect(tok.terminal).to eq(tok.lexeme.upcase)
43
43
  end
44
44
  end
45
45
 
46
- it 'should tokenize integer values' do
47
- subject.scanner.string = ' 123 '
48
- token = subject.tokens.first
49
- expect(token).to be_kind_of(Rley::Lexical::Token)
46
+ it 'tokenizes integer values' do
47
+ tokenizer.scanner.string = ' 123 '
48
+ token = tokenizer.tokens.first
49
+ expect(token).to be_a(Rley::Lexical::Token)
50
50
  expect(token.terminal).to eq('INTEGER')
51
51
  expect(token.lexeme).to eq('123')
52
52
  end
53
53
 
54
- it 'should tokenize single digits' do
55
- subject.scanner.string = ' 1 '
56
- token = subject.tokens.first
57
- expect(token).to be_kind_of(Rley::Lexical::Token)
54
+ it 'tokenizes single digits' do
55
+ tokenizer.scanner.string = ' 1 '
56
+ token = tokenizer.tokens.first
57
+ expect(token).to be_a(Rley::Lexical::Token)
58
58
  expect(token.terminal).to eq('DIGIT_LIT')
59
59
  expect(token.lexeme).to eq('1')
60
60
  end
61
61
  end # context
62
62
 
63
63
  context 'String literal tokenization:' do
64
- it "should recognize 'literally ...'" do
64
+ it "Recognizes 'literally ...'" do
65
65
  input = 'literally "hello"'
66
- subject.scanner.string = input
66
+ tokenizer.scanner.string = input
67
67
  expectations = [
68
68
  %w[LITERALLY literally],
69
69
  %w[STRING_LIT hello]
70
70
  ]
71
- match_expectations(subject, expectations)
71
+ match_expectations(tokenizer, expectations)
72
72
  end
73
73
  end # context
74
74
 
75
75
  context 'Character range tokenization:' do
76
- it "should recognize 'letter from ... to ...'" do
76
+ it "recognizes 'letter from ... to ...'" do
77
77
  input = 'letter a to f'
78
- subject.scanner.string = input
78
+ tokenizer.scanner.string = input
79
79
  expectations = [
80
80
  %w[LETTER letter],
81
81
  %w[LETTER_LIT a],
82
82
  %w[TO to],
83
83
  %w[LETTER_LIT f]
84
84
  ]
85
- match_expectations(subject, expectations)
85
+ match_expectations(tokenizer, expectations)
86
86
  end
87
87
 
88
- it "should recognize 'letter from ... to ... followed by comma'" do
88
+ it "recognizes 'letter from ... to ... followed by comma'" do
89
89
  input = 'letter a to f,'
90
- subject.scanner.string = input
90
+ tokenizer.scanner.string = input
91
91
  expectations = [
92
92
  %w[LETTER letter],
93
93
  %w[LETTER_LIT a],
@@ -95,25 +95,25 @@ module SrlRuby
95
95
  %w[LETTER_LIT f],
96
96
  %w[COMMA ,]
97
97
  ]
98
- match_expectations(subject, expectations)
98
+ match_expectations(tokenizer, expectations)
99
99
  end
100
100
  end # context
101
101
 
102
102
  context 'Quantifier tokenization:' do
103
- it "should recognize 'exactly ... times'" do
103
+ it "recognizes 'exactly ... times'" do
104
104
  input = 'exactly 4 Times'
105
- subject.scanner.string = input
105
+ tokenizer.scanner.string = input
106
106
  expectations = [
107
107
  %w[EXACTLY exactly],
108
108
  %w[DIGIT_LIT 4],
109
109
  %w[TIMES Times]
110
110
  ]
111
- match_expectations(subject, expectations)
111
+ match_expectations(tokenizer, expectations)
112
112
  end
113
113
 
114
- it "should recognize 'between ... and ... times'" do
114
+ it "recognizes 'between ... and ... times'" do
115
115
  input = 'Between 2 AND 4 times'
116
- subject.scanner.string = input
116
+ tokenizer.scanner.string = input
117
117
  expectations = [
118
118
  %w[BETWEEN Between],
119
119
  %w[DIGIT_LIT 2],
@@ -121,41 +121,41 @@ module SrlRuby
121
121
  %w[DIGIT_LIT 4],
122
122
  %w[TIMES times]
123
123
  ]
124
- match_expectations(subject, expectations)
124
+ match_expectations(tokenizer, expectations)
125
125
  end
126
126
 
127
- it "should recognize 'once or more'" do
127
+ it "recognizes 'once or more'" do
128
128
  input = 'Once or MORE'
129
- subject.scanner.string = input
129
+ tokenizer.scanner.string = input
130
130
  expectations = [
131
131
  %w[ONCE Once],
132
132
  %w[OR or],
133
133
  %w[MORE MORE]
134
134
  ]
135
- match_expectations(subject, expectations)
135
+ match_expectations(tokenizer, expectations)
136
136
  end
137
137
 
138
- it "should recognize 'never or more'" do
138
+ it "recognizes 'never or more'" do
139
139
  input = 'never or more'
140
- subject.scanner.string = input
140
+ tokenizer.scanner.string = input
141
141
  expectations = [
142
142
  %w[NEVER never],
143
143
  %w[OR or],
144
144
  %w[MORE more]
145
145
  ]
146
- match_expectations(subject, expectations)
146
+ match_expectations(tokenizer, expectations)
147
147
  end
148
148
 
149
- it "should recognize 'at least ... times'" do
149
+ it "recognizes 'at least ... times'" do
150
150
  input = 'at least 10 times'
151
- subject.scanner.string = input
151
+ tokenizer.scanner.string = input
152
152
  expectations = [
153
153
  %w[AT at],
154
154
  %w[LEAST least],
155
155
  %w[INTEGER 10],
156
156
  %w[TIMES times]
157
157
  ]
158
- match_expectations(subject, expectations)
158
+ match_expectations(tokenizer, expectations)
159
159
  end
160
160
  end # context
161
161
  end # describe