skeem 0.2.21 → 0.2.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,8 +16,8 @@ module Skeem
16
16
  # Assumption: subject is a Skeem::Tokenizer
17
17
  def check_tokens(tokenTests, tokType)
18
18
  tokenTests.each do |(input, prediction)|
19
- subject.reinitialize(input)
20
- token = subject.tokens.first
19
+ tokenizer.reset(input)
20
+ token = tokenizer.tokens.first
21
21
  expect(token.terminal).to eq(tokType)
22
22
  expect(token.lexeme).to eq(prediction)
23
23
  end
@@ -28,23 +28,23 @@ module Skeem
28
28
  end
29
29
 
30
30
  # Default instantiation
31
- subject { Tokenizer.new('') }
31
+ subject(:tokenizer) { described_class.new('') }
32
32
 
33
33
  context 'Initialization:' do
34
- it 'should be initialized with a text to tokenize' do
35
- expect { Tokenizer.new('(+ 2 3)') }.not_to raise_error
34
+ it 'is initialized with a text to tokenize' do
35
+ expect { described_class.new('(+ 2 3)') }.not_to raise_error
36
36
  end
37
37
 
38
- it 'should have its scanner initialized' do
39
- expect(subject.scanner).to be_kind_of(StringScanner)
38
+ it 'has its scanner initialized' do
39
+ expect(tokenizer.scanner).to be_a(StringScanner)
40
40
  end
41
41
  end # context
42
42
 
43
43
  context 'Delimiter and separator token recognition:' do
44
- it 'should tokenize single char delimiters' do
45
- subject.reinitialize("( ) ' ` . , ,@")
46
- tokens = subject.tokens
47
- tokens.each { |token| expect(token).to be_kind_of(Rley::Lexical::Token) }
44
+ it 'tokenizes single char delimiters' do
45
+ tokenizer.reset("( ) ' ` . , ,@")
46
+ tokens = tokenizer.tokens
47
+ expect(tokens).to all(be_a(Rley::Lexical::Token))
48
48
  terminals = tokens.map(&:terminal)
49
49
  prediction = %w[LPAREN RPAREN APOSTROPHE
50
50
  GRAVE_ACCENT PERIOD
@@ -54,7 +54,7 @@ module Skeem
54
54
  end # context
55
55
 
56
56
  context 'Boolean literals recognition:' do
57
- it 'should tokenize boolean constants' do
57
+ it 'tokenizes boolean constants' do
58
58
  tests = [
59
59
  # couple [raw input, expected]
60
60
  ['#t', true],
@@ -68,7 +68,7 @@ module Skeem
68
68
  end # context
69
69
 
70
70
  context 'Integer literals recognition:' do
71
- it 'should tokenize integers in default radix 10' do
71
+ it 'tokenizes integers in default radix 10' do
72
72
  tests = [
73
73
  # couple [raw input, expected]
74
74
  ['0', 0],
@@ -82,7 +82,7 @@ module Skeem
82
82
  check_tokens(tests, 'INTEGER')
83
83
  end
84
84
 
85
- it 'should tokenize integers with explicit radix 10' do
85
+ it 'tokenizes integers with explicit radix 10' do
86
86
  tests = [
87
87
  # couple [raw input, expected]
88
88
  ['#d0', 0],
@@ -96,7 +96,7 @@ module Skeem
96
96
  check_tokens(tests, 'INTEGER')
97
97
  end
98
98
 
99
- it 'should tokenize integers in hexadecimal notation' do
99
+ it 'tokenizes integers in hexadecimal notation' do
100
100
  tests = [
101
101
  # couple [raw input, expected]
102
102
  ['#x0', 0],
@@ -111,7 +111,7 @@ module Skeem
111
111
  end # context
112
112
 
113
113
  context 'Rational literals recognition:' do
114
- it 'should tokenize rational in default radix 10' do
114
+ it 'tokenizes rational in default radix 10' do
115
115
  tests = [
116
116
  # couple [raw input, expected]
117
117
  ['1/2', Rational(1, 2)],
@@ -121,8 +121,8 @@ module Skeem
121
121
  check_tokens(tests, 'RATIONAL')
122
122
 
123
123
  # Special case: implicit promotion to integer
124
- subject.reinitialize('8/4')
125
- token = subject.tokens.first
124
+ tokenizer.reset('8/4')
125
+ token = tokenizer.tokens.first
126
126
  expect(token.terminal).to eq('INTEGER')
127
127
  expect(token.lexeme).to eq(2)
128
128
  end
@@ -130,7 +130,7 @@ module Skeem
130
130
 
131
131
  context 'Real number recognition:' do
132
132
  # rubocop: disable Style/ExponentialNotation
133
- it 'should tokenize real numbers' do
133
+ it 'tokenizes real numbers' do
134
134
  tests = [
135
135
  # couple [raw input, expected]
136
136
  ["\t\t3.45e+6", 3.45e+6],
@@ -145,7 +145,7 @@ module Skeem
145
145
  end # context
146
146
 
147
147
  context 'Character literal recognition:' do
148
- it 'should tokenize named characters' do
148
+ it 'tokenizes named characters' do
149
149
  tests = [
150
150
  # couple [raw input, expected]
151
151
  ['#\alarm', ?\a],
@@ -156,7 +156,7 @@ module Skeem
156
156
  check_tokens(tests, 'CHAR')
157
157
  end
158
158
 
159
- it 'should tokenize escaped characters' do
159
+ it 'tokenizes escaped characters' do
160
160
  tests = [
161
161
  # couple [raw input, expected]
162
162
  ['#\a', ?a],
@@ -168,7 +168,7 @@ module Skeem
168
168
  check_tokens(tests, 'CHAR')
169
169
  end
170
170
 
171
- it 'should tokenize hex-coded characters' do
171
+ it 'tokenizes hex-coded characters' do
172
172
  tests = [
173
173
  # couple [raw input, expected]
174
174
  ['#\x07', ?\a],
@@ -181,7 +181,7 @@ module Skeem
181
181
  end # context
182
182
 
183
183
  context 'String recognition:' do
184
- it 'should tokenize strings' do
184
+ it 'tokenizes strings' do
185
185
  examples = [
186
186
  # Some examples taken from R7RS document
187
187
  '"Hello, world"',
@@ -190,8 +190,8 @@ module Skeem
190
190
 
191
191
  examples.each do |input|
192
192
  # puts input
193
- subject.reinitialize(input)
194
- token = subject.tokens.first
193
+ tokenizer.reset(input)
194
+ token = tokenizer.tokens.first
195
195
  expect(token.terminal).to eq('STRING_LIT')
196
196
  expect(token.lexeme).to eq(unquoted(input))
197
197
  end
@@ -204,7 +204,7 @@ module Skeem
204
204
  # "\x03B1; is named GREEK SMALL LETTER ALPHA."
205
205
 
206
206
  context 'Identifier recognition:' do
207
- it 'should tokenize identifiers' do
207
+ it 'tokenizes identifiers' do
208
208
  examples = [
209
209
  # Examples taken from R7RS document
210
210
  '+', '+soup+', '<=?',
@@ -215,8 +215,8 @@ module Skeem
215
215
  ]
216
216
 
217
217
  examples.each do |input|
218
- subject.reinitialize(input)
219
- token = subject.tokens.first
218
+ tokenizer.reset(input)
219
+ token = tokenizer.tokens.first
220
220
  if token.lexeme == 'lambda'
221
221
  expect(token.terminal).to eq('LAMBDA')
222
222
  else
@@ -226,19 +226,19 @@ module Skeem
226
226
  end
227
227
  end
228
228
 
229
- it 'should recognize ellipsis' do
229
+ it 'recognizes ellipsis' do
230
230
  input = '...'
231
- subject.reinitialize(input)
232
- token = subject.tokens.first
231
+ tokenizer.reset(input)
232
+ token = tokenizer.tokens.first
233
233
  expect(token.terminal).to eq('ELLIPSIS')
234
234
  expect(token.lexeme).to eq(input)
235
235
  end
236
236
  end # context
237
237
 
238
238
  context 'Vector recognition' do
239
- it 'should tokenize vectors' do
239
+ it 'tokenizes vectors' do
240
240
  input = '#(0 -2 "Sue")'
241
- subject.reinitialize(input)
241
+ tokenizer.reset(input)
242
242
  predictions = [
243
243
  ['VECTOR_BEGIN', '#(', 1],
244
244
  ['INTEGER', 0, 3],
@@ -246,7 +246,7 @@ module Skeem
246
246
  ['STRING_LIT', 'Sue', 8],
247
247
  ['RPAREN', ')', 13]
248
248
  ]
249
- tokens = subject.tokens
249
+ tokens = tokenizer.tokens
250
250
  predictions.each_with_index do |(pr_terminal, pr_lexeme, pr_position), i|
251
251
  expect(tokens[i].terminal).to eq(pr_terminal)
252
252
  expect(tokens[i].lexeme).to eq(pr_lexeme)
@@ -256,27 +256,27 @@ module Skeem
256
256
  end
257
257
 
258
258
  context 'Comments:' do
259
- it 'should skip heading comments' do
259
+ it 'skips heading comments' do
260
260
  input = "; Starting comment\n \"Some text\""
261
- subject.reinitialize(input)
262
- token = subject.tokens.first
261
+ tokenizer.reset(input)
262
+ token = tokenizer.tokens.first
263
263
  expect(token.terminal).to eq('STRING_LIT')
264
264
  expect(token.lexeme).to eq('Some text')
265
265
  expect(token.position.line).to eq(2)
266
266
  end
267
267
 
268
- it 'should skip trailing comments' do
268
+ it 'skips trailing comments' do
269
269
  input = '"Some text"; Trailing comment'
270
- subject.reinitialize(input)
271
- token = subject.tokens.first
270
+ tokenizer.reset(input)
271
+ token = tokenizer.tokens.first
272
272
  expect(token.terminal).to eq('STRING_LIT')
273
273
  expect(token.lexeme).to eq('Some text')
274
274
  end
275
275
 
276
- it 'should skip embedded comments' do
276
+ it 'skips embedded comments' do
277
277
  input = "\"First text\"; Middle comment\n\"Second text\""
278
- subject.reinitialize(input)
279
- tokens = subject.tokens
278
+ tokenizer.reset(input)
279
+ tokens = tokenizer.tokens
280
280
  expect(tokens.size).to eq(2)
281
281
  token = tokens[0]
282
282
  expect(token.terminal).to eq('STRING_LIT')
@@ -286,10 +286,10 @@ module Skeem
286
286
  expect(token.lexeme).to eq('Second text')
287
287
  end
288
288
 
289
- it 'should skip block comments' do
289
+ it 'skips block comments' do
290
290
  input = '"First text" #| Middle comment |# "Second text"'
291
- subject.reinitialize(input)
292
- tokens = subject.tokens
291
+ tokenizer.reset(input)
292
+ tokens = tokenizer.tokens
293
293
  expect(tokens.size).to eq(2)
294
294
  token = tokens[0]
295
295
  expect(token.terminal).to eq('STRING_LIT')
@@ -299,10 +299,10 @@ module Skeem
299
299
  expect(token.lexeme).to eq('Second text')
300
300
  end
301
301
 
302
- it 'should cope with nested block comments' do
302
+ it 'copes with nested block comments' do
303
303
  input = '"First text" #| One #| Two |# comment #| Three |# |# "Second text"'
304
- subject.reinitialize(input)
305
- tokens = subject.tokens
304
+ tokenizer.reset(input)
305
+ tokens = tokenizer.tokens
306
306
  expect(tokens.size).to eq(2)
307
307
  token = tokens[0]
308
308
  expect(token.terminal).to eq('STRING_LIT')
@@ -314,10 +314,10 @@ module Skeem
314
314
  end
315
315
 
316
316
  context 'Scanning Scheme sample code' do
317
- it 'should produce a sequence of token objects' do
317
+ it 'produces a sequence of token objects' do
318
318
  # Deeper tokenizer testing
319
319
  source = '(define circle-area (lambda (r) (* pi (* r r))))'
320
- subject.reinitialize(source)
320
+ tokenizer.reset(source)
321
321
  predicted = [
322
322
  %w[LPAREN (],
323
323
  %w[DEFINE define],
@@ -339,7 +339,7 @@ module Skeem
339
339
  %w[RPAREN )],
340
340
  %w[RPAREN )]
341
341
  ]
342
- match_expectations(subject, predicted)
342
+ match_expectations(tokenizer, predicted)
343
343
  end
344
344
  end # context
345
345
  end # describe
data/spec/skeem_spec.rb CHANGED
@@ -2,6 +2,6 @@
2
2
 
3
3
  RSpec.describe Skeem do
4
4
  it 'has a version number' do
5
- expect(Skeem::VERSION).not_to be nil
5
+ expect(Skeem::VERSION).not_to be_nil
6
6
  end
7
7
  end
data/spec/spec_helper.rb CHANGED
@@ -17,7 +17,6 @@ RSpec.configure do |config|
17
17
  config.full_backtrace = true
18
18
  end
19
19
 
20
-
21
20
  module InterpreterSpec
22
21
  def expect_expr(aSkeemExpr)
23
22
  result = subject.run(aSkeemExpr)
@@ -35,7 +34,6 @@ module InterpreterSpec
35
34
  else
36
35
  expect(result).to eq(predicted)
37
36
  end
38
-
39
37
  rescue Exception => e
40
38
  $stderr.puts "Row #{index + 1} failed."
41
39
  throw e
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: skeem
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.21
4
+ version: 0.2.22
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dimitri Geshef
8
- autorequire:
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2022-04-24 00:00:00.000000000 Z
10
+ date: 2025-03-30 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: rley
@@ -16,56 +15,102 @@ dependencies:
16
15
  requirements:
17
16
  - - "~>"
18
17
  - !ruby/object:Gem::Version
19
- version: 0.8.11
18
+ version: '0.9'
19
+ - - ">="
20
+ - !ruby/object:Gem::Version
21
+ version: 0.9.02
20
22
  type: :runtime
21
23
  prerelease: false
22
24
  version_requirements: !ruby/object:Gem::Requirement
23
25
  requirements:
24
26
  - - "~>"
25
27
  - !ruby/object:Gem::Version
26
- version: 0.8.11
28
+ version: '0.9'
29
+ - - ">="
30
+ - !ruby/object:Gem::Version
31
+ version: 0.9.02
27
32
  - !ruby/object:Gem::Dependency
28
- name: bundler
33
+ name: benchmark
29
34
  requirement: !ruby/object:Gem::Requirement
30
35
  requirements:
31
36
  - - "~>"
32
37
  - !ruby/object:Gem::Version
33
- version: '2.0'
38
+ version: 0.4.0
34
39
  type: :development
35
40
  prerelease: false
36
41
  version_requirements: !ruby/object:Gem::Requirement
37
42
  requirements:
38
43
  - - "~>"
39
44
  - !ruby/object:Gem::Version
40
- version: '2.0'
45
+ version: 0.4.0
41
46
  - !ruby/object:Gem::Dependency
42
- name: rake
47
+ name: bundler
43
48
  requirement: !ruby/object:Gem::Requirement
44
49
  requirements:
50
+ - - "~>"
51
+ - !ruby/object:Gem::Version
52
+ version: '2.4'
45
53
  - - ">="
46
54
  - !ruby/object:Gem::Version
47
- version: '12.0'
55
+ version: 2.4.1
48
56
  type: :development
49
57
  prerelease: false
50
58
  version_requirements: !ruby/object:Gem::Requirement
51
59
  requirements:
60
+ - - "~>"
61
+ - !ruby/object:Gem::Version
62
+ version: '2.4'
52
63
  - - ">="
53
64
  - !ruby/object:Gem::Version
54
- version: '12.0'
65
+ version: 2.4.1
66
+ - !ruby/object:Gem::Dependency
67
+ name: ostruct
68
+ requirement: !ruby/object:Gem::Requirement
69
+ requirements:
70
+ - - "~>"
71
+ - !ruby/object:Gem::Version
72
+ version: 0.6.1
73
+ type: :development
74
+ prerelease: false
75
+ version_requirements: !ruby/object:Gem::Requirement
76
+ requirements:
77
+ - - "~>"
78
+ - !ruby/object:Gem::Version
79
+ version: 0.6.1
80
+ - !ruby/object:Gem::Dependency
81
+ name: rake
82
+ requirement: !ruby/object:Gem::Requirement
83
+ requirements:
84
+ - - "~>"
85
+ - !ruby/object:Gem::Version
86
+ version: '13'
87
+ type: :development
88
+ prerelease: false
89
+ version_requirements: !ruby/object:Gem::Requirement
90
+ requirements:
91
+ - - "~>"
92
+ - !ruby/object:Gem::Version
93
+ version: '13'
55
94
  - !ruby/object:Gem::Dependency
56
95
  name: rspec
57
96
  requirement: !ruby/object:Gem::Requirement
58
97
  requirements:
59
98
  - - "~>"
60
99
  - !ruby/object:Gem::Version
61
- version: '3.0'
100
+ version: '3'
101
+ - - ">="
102
+ - !ruby/object:Gem::Version
103
+ version: 3.10.0
62
104
  type: :development
63
105
  prerelease: false
64
106
  version_requirements: !ruby/object:Gem::Requirement
65
107
  requirements:
66
108
  - - "~>"
67
109
  - !ruby/object:Gem::Version
68
- version: '3.0'
110
+ version: '3'
111
+ - - ">="
112
+ - !ruby/object:Gem::Version
113
+ version: 3.10.0
69
114
  description: " Skeem is a Scheme language interpreter implemented in Ruby.\n"
70
115
  email:
71
116
  - famished.tiger@yahoo.com
@@ -139,8 +184,8 @@ files:
139
184
  homepage: https://github.com/famished-tiger/Skeem
140
185
  licenses:
141
186
  - MIT
142
- metadata: {}
143
- post_install_message:
187
+ metadata:
188
+ rubygems_mfa_required: 'true'
144
189
  rdoc_options:
145
190
  - --charset=UTF-8 --exclude="examples|spec"
146
191
  require_paths:
@@ -149,15 +194,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
149
194
  requirements:
150
195
  - - ">="
151
196
  - !ruby/object:Gem::Version
152
- version: 2.6.0
197
+ version: 3.1.0
153
198
  required_rubygems_version: !ruby/object:Gem::Requirement
154
199
  requirements:
155
200
  - - ">="
156
201
  - !ruby/object:Gem::Version
157
202
  version: '0'
158
203
  requirements: []
159
- rubygems_version: 3.3.7
160
- signing_key:
204
+ rubygems_version: 3.6.6
161
205
  specification_version: 4
162
206
  summary: Skeem is an interpreter of a subset of the Scheme programming language. Scheme
163
207
  is a descendent of the Lisp language.