natalie_parser 1.1.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +25 -1
- data/Rakefile +3 -1
- data/include/natalie_parser/node/array_pattern_node.hpp +20 -2
- data/include/natalie_parser/node/bignum_node.hpp +4 -0
- data/include/natalie_parser/node/case_in_node.hpp +5 -2
- data/include/natalie_parser/node/colon2_node.hpp +1 -0
- data/include/natalie_parser/node/fixnum_node.hpp +4 -0
- data/include/natalie_parser/node/float_node.hpp +4 -0
- data/include/natalie_parser/node/hash_node.hpp +8 -3
- data/include/natalie_parser/node/hash_pattern_node.hpp +2 -1
- data/include/natalie_parser/node/infix_op_node.hpp +1 -1
- data/include/natalie_parser/node/keyword_rest_pattern_node.hpp +43 -0
- data/include/natalie_parser/node/node.hpp +3 -0
- data/include/natalie_parser/node/unary_op_node.hpp +1 -1
- data/include/natalie_parser/node.hpp +1 -0
- data/include/natalie_parser/parser.hpp +4 -1
- data/include/natalie_parser/token.hpp +43 -13
- data/lib/natalie_parser/version.rb +1 -1
- data/src/lexer/interpolated_string_lexer.cpp +9 -9
- data/src/lexer/regexp_lexer.cpp +7 -7
- data/src/lexer/word_array_lexer.cpp +13 -13
- data/src/lexer.cpp +164 -169
- data/src/node/begin_rescue_node.cpp +1 -1
- data/src/node/node.cpp +7 -0
- data/src/parser.cpp +185 -70
- metadata +3 -2
@@ -11,7 +11,7 @@ Token WordArrayLexer::build_next_token() {
|
|
11
11
|
return consume_array();
|
12
12
|
case State::DynamicStringBegin:
|
13
13
|
m_state = State::EvaluateBegin;
|
14
|
-
return Token { Token::Type::String, m_buffer, m_file, m_token_line, m_token_column };
|
14
|
+
return Token { Token::Type::String, m_buffer, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
15
15
|
case State::DynamicStringEnd:
|
16
16
|
if (current_char() == m_stop_char) {
|
17
17
|
advance();
|
@@ -19,18 +19,18 @@ Token WordArrayLexer::build_next_token() {
|
|
19
19
|
} else {
|
20
20
|
m_state = State::InProgress;
|
21
21
|
}
|
22
|
-
return Token { Token::Type::InterpolatedStringEnd, m_file, m_token_line, m_token_column };
|
22
|
+
return Token { Token::Type::InterpolatedStringEnd, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
23
23
|
case State::EvaluateBegin:
|
24
24
|
return start_evaluation();
|
25
25
|
case State::EvaluateEnd:
|
26
26
|
advance(); // }
|
27
27
|
m_state = State::DynamicStringInProgress;
|
28
|
-
return Token { Token::Type::EvaluateToStringEnd, m_file, m_token_line, m_token_column };
|
28
|
+
return Token { Token::Type::EvaluateToStringEnd, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
29
29
|
case State::EndToken:
|
30
30
|
m_state = State::Done;
|
31
|
-
return Token { Token::Type::RBracket, m_file, m_cursor_line, m_cursor_column };
|
31
|
+
return Token { Token::Type::RBracket, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
32
32
|
case State::Done:
|
33
|
-
return Token { Token::Type::Eof, m_file, m_cursor_line, m_cursor_column };
|
33
|
+
return Token { Token::Type::Eof, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
34
34
|
}
|
35
35
|
TM_UNREACHABLE();
|
36
36
|
}
|
@@ -70,7 +70,7 @@ Token WordArrayLexer::consume_array() {
|
|
70
70
|
return dynamic_string_finish();
|
71
71
|
}
|
72
72
|
if (!m_buffer->is_empty()) {
|
73
|
-
auto token = Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column };
|
73
|
+
auto token = Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
74
74
|
advance();
|
75
75
|
return token;
|
76
76
|
}
|
@@ -97,38 +97,38 @@ Token WordArrayLexer::consume_array() {
|
|
97
97
|
}
|
98
98
|
}
|
99
99
|
|
100
|
-
return Token { Token::Type::UnterminatedWordArray, m_buffer, m_file, m_token_line, m_token_column };
|
100
|
+
return Token { Token::Type::UnterminatedWordArray, m_buffer, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
101
101
|
}
|
102
102
|
|
103
103
|
Token WordArrayLexer::in_progress_start_dynamic_string() {
|
104
104
|
advance(2); // #{
|
105
105
|
m_state = State::DynamicStringBegin;
|
106
|
-
return Token { Token::Type::InterpolatedStringBegin, m_file, m_cursor_line, m_cursor_column };
|
106
|
+
return Token { Token::Type::InterpolatedStringBegin, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
107
107
|
}
|
108
108
|
|
109
109
|
Token WordArrayLexer::start_evaluation() {
|
110
110
|
m_nested_lexer = new Lexer { *this, '{', '}' };
|
111
111
|
m_state = State::EvaluateEnd;
|
112
|
-
return Token { Token::Type::EvaluateToStringBegin, m_file, m_token_line, m_token_column };
|
112
|
+
return Token { Token::Type::EvaluateToStringBegin, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
113
113
|
}
|
114
114
|
|
115
115
|
Token WordArrayLexer::dynamic_string_finish() {
|
116
116
|
if (!m_buffer->is_empty()) {
|
117
117
|
m_state = State::DynamicStringEnd;
|
118
|
-
return Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column };
|
118
|
+
return Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
119
119
|
}
|
120
120
|
m_state = State::InProgress;
|
121
|
-
return Token { Token::Type::InterpolatedStringEnd, m_file, m_token_line, m_token_column };
|
121
|
+
return Token { Token::Type::InterpolatedStringEnd, m_file, m_token_line, m_token_column, m_whitespace_precedes };
|
122
122
|
}
|
123
123
|
|
124
124
|
Token WordArrayLexer::in_progress_finish() {
|
125
125
|
advance(); // ) or ] or } or whatever
|
126
126
|
if (!m_buffer->is_empty()) {
|
127
127
|
m_state = State::EndToken;
|
128
|
-
return Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column };
|
128
|
+
return Token { Token::Type::String, m_buffer, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
129
129
|
}
|
130
130
|
m_state = State::Done;
|
131
|
-
return Token { Token::Type::RBracket, m_file, m_cursor_line, m_cursor_column };
|
131
|
+
return Token { Token::Type::RBracket, m_file, m_cursor_line, m_cursor_column, m_whitespace_precedes };
|
132
132
|
}
|
133
133
|
|
134
134
|
};
|