sparkql 1.2.8 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/sparkql/lexer.rb CHANGED
@@ -2,9 +2,9 @@ require 'strscan'
2
2
 
3
3
  class Sparkql::Lexer < StringScanner
4
4
  include Sparkql::Token
5
-
5
+
6
6
  attr_accessor :level, :block_group_identifier
7
-
7
+
8
8
  attr_reader :last_field, :current_token_value, :token_index
9
9
 
10
10
  def initialize(str)
@@ -14,51 +14,50 @@ class Sparkql::Lexer < StringScanner
14
14
  @block_group_identifier = 0
15
15
  @expression_count = 0
16
16
  end
17
-
17
+
18
18
  # Lookup the next matching token
19
19
  def shift
20
20
  @token_index = self.pos
21
21
 
22
- token = case
23
- when @current_token_value = scan(SPACE)
24
- [:SPACE, @current_token_value]
25
- when @current_token_value = scan(LPAREN)
26
- levelup
27
- [:LPAREN, @current_token_value]
28
- when @current_token_value = scan(RPAREN)
29
- # leveldown: do this after parsing group
30
- [:RPAREN, @current_token_value]
31
- when @current_token_value = scan(/\,/)
32
- [:COMMA,@current_token_value]
33
- when @current_token_value = scan(NULL)
34
- literal :NULL, "NULL"
35
- when @current_token_value = scan(STANDARD_FIELD)
36
- check_standard_fields(@current_token_value)
37
- when @current_token_value = scan(DATETIME)
38
- literal :DATETIME, @current_token_value
39
- when @current_token_value = scan(DATE)
40
- literal :DATE, @current_token_value
41
- when @current_token_value = scan(TIME)
42
- literal :TIME, @current_token_value
43
- when @current_token_value = scan(DECIMAL)
44
- literal :DECIMAL, @current_token_value
45
- when @current_token_value = scan(INTEGER)
46
- literal :INTEGER, @current_token_value
47
- when @current_token_value = scan(/\-/)
48
- [:UMINUS, @current_token_value]
49
- when @current_token_value = scan(CHARACTER)
50
- literal :CHARACTER, @current_token_value
51
- when @current_token_value = scan(BOOLEAN)
52
- literal :BOOLEAN, @current_token_value
53
- when @current_token_value = scan(KEYWORD)
54
- check_keywords(@current_token_value)
55
- when @current_token_value = scan(CUSTOM_FIELD)
56
- [:CUSTOM_FIELD,@current_token_value]
57
- when eos?
58
- [false, false] # end of file, \Z don't work with StringScanner
59
- else
60
- [:UNKNOWN, "ERROR: '#{self.string}'"]
61
- end
22
+ token = if (@current_token_value = scan(SPACE))
23
+ [:SPACE, @current_token_value]
24
+ elsif (@current_token_value = scan(LPAREN))
25
+ levelup
26
+ [:LPAREN, @current_token_value]
27
+ elsif (@current_token_value = scan(RPAREN))
28
+ # leveldown: do this after parsing group
29
+ [:RPAREN, @current_token_value]
30
+ elsif (@current_token_value = scan(/,/))
31
+ [:COMMA, @current_token_value]
32
+ elsif (@current_token_value = scan(NULL))
33
+ literal :NULL, "NULL"
34
+ elsif (@current_token_value = scan(STANDARD_FIELD))
35
+ check_standard_fields(@current_token_value)
36
+ elsif (@current_token_value = scan(DATETIME))
37
+ literal :DATETIME, @current_token_value
38
+ elsif (@current_token_value = scan(DATE))
39
+ literal :DATE, @current_token_value
40
+ elsif (@current_token_value = scan(TIME))
41
+ literal :TIME, @current_token_value
42
+ elsif (@current_token_value = scan(DECIMAL))
43
+ literal :DECIMAL, @current_token_value
44
+ elsif (@current_token_value = scan(INTEGER))
45
+ literal :INTEGER, @current_token_value
46
+ elsif (@current_token_value = scan(/-/))
47
+ [:UMINUS, @current_token_value]
48
+ elsif (@current_token_value = scan(CHARACTER))
49
+ literal :CHARACTER, @current_token_value
50
+ elsif (@current_token_value = scan(BOOLEAN))
51
+ literal :BOOLEAN, @current_token_value
52
+ elsif (@current_token_value = scan(KEYWORD))
53
+ check_keywords(@current_token_value)
54
+ elsif (@current_token_value = scan(CUSTOM_FIELD))
55
+ [:CUSTOM_FIELD, @current_token_value]
56
+ elsif eos?
57
+ [false, false] # end of file, \Z don't work with StringScanner
58
+ else
59
+ [:UNKNOWN, "ERROR: '#{self.string}'"]
60
+ end
62
61
 
63
62
  token.freeze
64
63
  end
@@ -66,13 +65,13 @@ class Sparkql::Lexer < StringScanner
66
65
  def check_reserved_words(value)
67
66
  u_value = value.capitalize
68
67
  if OPERATORS.include?(u_value)
69
- [:OPERATOR,u_value]
68
+ [:OPERATOR, u_value]
70
69
  elsif RANGE_OPERATOR == u_value
71
- [:RANGE_OPERATOR,u_value]
70
+ [:RANGE_OPERATOR, u_value]
72
71
  elsif CONJUNCTIONS.include?(u_value)
73
- [:CONJUNCTION,u_value]
72
+ [:CONJUNCTION, u_value]
74
73
  elsif UNARY_CONJUNCTIONS.include?(u_value)
75
- [:UNARY_CONJUNCTION,u_value]
74
+ [:UNARY_CONJUNCTION, u_value]
76
75
  elsif ADD == u_value
77
76
  [:ADD, u_value]
78
77
  elsif SUB == u_value
@@ -87,12 +86,12 @@ class Sparkql::Lexer < StringScanner
87
86
  [:UNKNOWN, "ERROR: '#{self.string}'"]
88
87
  end
89
88
  end
90
-
89
+
91
90
  def check_standard_fields(value)
92
91
  result = check_reserved_words(value)
93
92
  if result.first == :UNKNOWN
94
93
  @last_field = value
95
- result = [:STANDARD_FIELD,value]
94
+ result = [:STANDARD_FIELD, value]
96
95
  end
97
96
  result
98
97
  end
@@ -100,26 +99,25 @@ class Sparkql::Lexer < StringScanner
100
99
  def check_keywords(value)
101
100
  result = check_reserved_words(value)
102
101
  if result.first == :UNKNOWN
103
- result = [:KEYWORD,value]
102
+ result = [:KEYWORD, value]
104
103
  end
105
104
  result
106
105
  end
107
-
106
+
108
107
  def levelup
109
108
  @level += 1
110
109
  @block_group_identifier += 1
111
110
  end
112
-
111
+
113
112
  def leveldown
114
113
  @level -= 1
115
114
  end
116
-
115
+
117
116
  def literal(symbol, value)
118
117
  node = {
119
- :type => symbol.to_s.downcase.to_sym,
120
- :value => value
118
+ type: symbol.to_s.downcase.to_sym,
119
+ value: value
121
120
  }
122
121
  [symbol, node]
123
122
  end
124
-
125
123
  end
@@ -1,7 +1,7 @@
1
1
  #
2
2
  # DO NOT MODIFY!!!!
3
- # This file is automatically generated by Racc 1.4.15
4
- # from Racc grammer file "".
3
+ # This file is automatically generated by Racc 1.4.16
4
+ # from Racc grammar file "".
5
5
  #
6
6
 
7
7
  require 'racc/parser.rb'
@@ -345,7 +345,7 @@ Racc_debug_parser = false
345
345
  # reduce 1 omitted
346
346
 
347
347
  def _reduce_2(val, _values, result)
348
- result = 0
348
+ result = 0
349
349
  result
350
350
  end
351
351
 
@@ -356,71 +356,71 @@ end
356
356
  # reduce 5 omitted
357
357
 
358
358
  def _reduce_6(val, _values, result)
359
- result = tokenize_expression(val[0], val[1],val[2])
359
+ result = tokenize_expression(val[0], val[1],val[2])
360
360
  result
361
361
  end
362
362
 
363
363
  def _reduce_7(val, _values, result)
364
- result = tokenize_expression(val[0], val[1], val[2])
364
+ result = tokenize_expression(val[0], val[1], val[2])
365
365
  result
366
366
  end
367
367
 
368
368
  # reduce 8 omitted
369
369
 
370
370
  def _reduce_9(val, _values, result)
371
- result = tokenize_unary_conjunction(val[0], val[1])
371
+ result = tokenize_unary_conjunction(val[0], val[1])
372
372
  result
373
373
  end
374
374
 
375
375
  def _reduce_10(val, _values, result)
376
- result = tokenize_conjunction(val[0], val[1],val[2])
376
+ result = tokenize_conjunction(val[0], val[1],val[2])
377
377
  result
378
378
  end
379
379
 
380
380
  def _reduce_11(val, _values, result)
381
- result = tokenize_conjunction(val[0], val[1],val[2])
381
+ result = tokenize_conjunction(val[0], val[1],val[2])
382
382
  result
383
383
  end
384
384
 
385
385
  def _reduce_12(val, _values, result)
386
- result = tokenize_group(val[1])
386
+ result = tokenize_group(val[1])
387
387
  result
388
388
  end
389
389
 
390
390
  # reduce 13 omitted
391
391
 
392
392
  def _reduce_14(val, _values, result)
393
- result = tokenize_arithmetic(val[0], val[1], val[2])
393
+ result = tokenize_arithmetic(val[0], val[1], val[2])
394
394
  result
395
395
  end
396
396
 
397
397
  def _reduce_15(val, _values, result)
398
- result = tokenize_arithmetic(val[0], val[1], val[2])
398
+ result = tokenize_arithmetic(val[0], val[1], val[2])
399
399
  result
400
400
  end
401
401
 
402
402
  def _reduce_16(val, _values, result)
403
- result = tokenize_arithmetic(val[0], val[1], val[2])
403
+ result = tokenize_arithmetic(val[0], val[1], val[2])
404
404
  result
405
405
  end
406
406
 
407
407
  def _reduce_17(val, _values, result)
408
- result = tokenize_arithmetic(val[0], val[1], val[2])
408
+ result = tokenize_arithmetic(val[0], val[1], val[2])
409
409
  result
410
410
  end
411
411
 
412
412
  def _reduce_18(val, _values, result)
413
- result = tokenize_arithmetic(val[0], val[1], val[2])
413
+ result = tokenize_arithmetic(val[0], val[1], val[2])
414
414
  result
415
415
  end
416
416
 
417
417
  def _reduce_19(val, _values, result)
418
- result = tokenize_arithmetic_group(val[1])
418
+ result = tokenize_arithmetic_group(val[1])
419
419
  result
420
420
  end
421
421
 
422
422
  def _reduce_20(val, _values, result)
423
- result = tokenize_arithmetic_negation(val[1])
423
+ result = tokenize_arithmetic_negation(val[1])
424
424
  result
425
425
  end
426
426
 
@@ -435,64 +435,64 @@ end
435
435
  # reduce 25 omitted
436
436
 
437
437
  def _reduce_26(val, _values, result)
438
- result = tokenize_list(val[0])
438
+ result = tokenize_list(val[0])
439
439
  result
440
440
  end
441
441
 
442
442
  # reduce 27 omitted
443
443
 
444
444
  def _reduce_28(val, _values, result)
445
- result = group_fold(val[1])
445
+ result = group_fold(val[1])
446
446
  result
447
447
  end
448
448
 
449
449
  def _reduce_29(val, _values, result)
450
- result = tokenize_literal_negation(val[1])
450
+ result = tokenize_literal_negation(val[1])
451
451
  result
452
452
  end
453
453
 
454
454
  def _reduce_30(val, _values, result)
455
- result = add_fold(val[0], val[2])
455
+ result = add_fold(val[0], val[2])
456
456
  result
457
457
  end
458
458
 
459
459
  def _reduce_31(val, _values, result)
460
- result = sub_fold(val[0], val[2])
460
+ result = sub_fold(val[0], val[2])
461
461
  result
462
462
  end
463
463
 
464
464
  def _reduce_32(val, _values, result)
465
- result = mul_fold(val[0], val[2])
465
+ result = mul_fold(val[0], val[2])
466
466
  result
467
467
  end
468
468
 
469
469
  def _reduce_33(val, _values, result)
470
- result = div_fold(val[0], val[2])
470
+ result = div_fold(val[0], val[2])
471
471
  result
472
472
  end
473
473
 
474
474
  def _reduce_34(val, _values, result)
475
- result = mod_fold(val[0], val[2])
475
+ result = mod_fold(val[0], val[2])
476
476
  result
477
477
  end
478
478
 
479
479
  def _reduce_35(val, _values, result)
480
- result = tokenize_function(val[0], [])
480
+ result = tokenize_function(val[0], [])
481
481
  result
482
482
  end
483
483
 
484
484
  def _reduce_36(val, _values, result)
485
- result = tokenize_function(val[0], val[2])
485
+ result = tokenize_function(val[0], val[2])
486
486
  result
487
487
  end
488
488
 
489
489
  def _reduce_37(val, _values, result)
490
- result = tokenize_function(val[0], [])
490
+ result = tokenize_function(val[0], [])
491
491
  result
492
492
  end
493
493
 
494
494
  def _reduce_38(val, _values, result)
495
- result = tokenize_function(val[0], val[2])
495
+ result = tokenize_function(val[0], val[2])
496
496
  result
497
497
  end
498
498
 
@@ -501,12 +501,12 @@ end
501
501
  # reduce 40 omitted
502
502
 
503
503
  def _reduce_41(val, _values, result)
504
- result = tokenize_function_args(val[0], val[2])
504
+ result = tokenize_function_args(val[0], val[2])
505
505
  result
506
506
  end
507
507
 
508
508
  def _reduce_42(val, _values, result)
509
- result = tokenize_field_arg(val[0])
509
+ result = tokenize_field_arg(val[0])
510
510
  result
511
511
  end
512
512
 
@@ -517,7 +517,7 @@ end
517
517
  # reduce 45 omitted
518
518
 
519
519
  def _reduce_46(val, _values, result)
520
- result = tokenize_function_args(val[0], val[2])
520
+ result = tokenize_function_args(val[0], val[2])
521
521
  result
522
522
  end
523
523
 
@@ -530,17 +530,17 @@ end
530
530
  # reduce 50 omitted
531
531
 
532
532
  def _reduce_51(val, _values, result)
533
- result = tokenize_multiple(val[0], val[2])
533
+ result = tokenize_multiple(val[0], val[2])
534
534
  result
535
535
  end
536
536
 
537
537
  def _reduce_52(val, _values, result)
538
- result = tokenize_multiple(val[0], val[2])
538
+ result = tokenize_multiple(val[0], val[2])
539
539
  result
540
540
  end
541
541
 
542
542
  def _reduce_53(val, _values, result)
543
- result = tokenize_multiple(val[0], val[2])
543
+ result = tokenize_multiple(val[0], val[2])
544
544
  result
545
545
  end
546
546
 
@@ -581,7 +581,7 @@ def _reduce_none(val, _values, result)
581
581
  end
582
582
 
583
583
  end # class Parser
584
- end # module Sparkql
584
+ end # module Sparkql
585
585
 
586
586
 
587
587
  # END PARSER