tailor 0.1.5 → 1.0.0.alpha
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +9 -1
- data/.rspec +2 -1
- data/.tailor +6 -0
- data/Gemfile.lock +47 -78
- data/{ChangeLog.rdoc → History.rdoc} +0 -0
- data/README.rdoc +157 -24
- data/Rakefile +0 -9
- data/bin/tailor +16 -69
- data/features/configurable.feature +78 -0
- data/features/horizontal_spacing.feature +262 -0
- data/features/indentation.feature +17 -21
- data/features/indentation/bad_files_with_no_trailing_newline.feature +90 -0
- data/features/indentation/good_files_with_no_trailing_newline.feature +206 -0
- data/features/name_detection.feature +72 -0
- data/features/step_definitions/indentation_steps.rb +10 -133
- data/features/support/env.rb +7 -15
- data/features/support/file_cases/horizontal_spacing_cases.rb +265 -0
- data/features/support/file_cases/indentation_cases.rb +972 -0
- data/features/support/file_cases/naming_cases.rb +52 -0
- data/features/support/file_cases/vertical_spacing_cases.rb +70 -0
- data/features/support/hooks.rb +8 -0
- data/features/support/{1_file_with_bad_operator_spacing → legacy}/bad_op_spacing.rb +0 -0
- data/features/support/{1_file_with_bad_ternary_colon_spacing → legacy}/bad_ternary_colon_spacing.rb +0 -0
- data/features/support/{1_long_file_with_indentation/my_project.rb → legacy/long_file_with_indentation.rb} +1 -1
- data/features/support/world.rb +14 -0
- data/features/vertical_spacing.feature +114 -0
- data/lib/ext/string_ext.rb +5 -0
- data/lib/tailor.rb +6 -252
- data/lib/tailor/cli.rb +49 -0
- data/lib/tailor/cli/options.rb +251 -0
- data/lib/tailor/composite_observable.rb +56 -0
- data/lib/tailor/configuration.rb +263 -0
- data/lib/tailor/critic.rb +162 -0
- data/lib/tailor/formatters/text.rb +126 -0
- data/lib/tailor/lexed_line.rb +246 -0
- data/lib/tailor/lexer.rb +428 -0
- data/lib/tailor/lexer/token.rb +103 -0
- data/lib/tailor/lexer_constants.rb +75 -0
- data/lib/tailor/logger.rb +28 -0
- data/lib/tailor/problem.rb +100 -0
- data/lib/tailor/reporter.rb +48 -0
- data/lib/tailor/ruler.rb +39 -0
- data/lib/tailor/rulers.rb +7 -0
- data/lib/tailor/rulers/allow_camel_case_methods_ruler.rb +30 -0
- data/lib/tailor/rulers/allow_hard_tabs_ruler.rb +22 -0
- data/lib/tailor/rulers/allow_screaming_snake_case_classes_ruler.rb +32 -0
- data/lib/tailor/rulers/allow_trailing_line_spaces_ruler.rb +33 -0
- data/lib/tailor/rulers/indentation_spaces_ruler.rb +199 -0
- data/lib/tailor/rulers/indentation_spaces_ruler/indentation_manager.rb +362 -0
- data/lib/tailor/rulers/max_code_lines_in_class_ruler.rb +84 -0
- data/lib/tailor/rulers/max_code_lines_in_method_ruler.rb +84 -0
- data/lib/tailor/rulers/max_line_length_ruler.rb +31 -0
- data/lib/tailor/rulers/spaces_after_comma_ruler.rb +83 -0
- data/lib/tailor/rulers/spaces_after_lbrace_ruler.rb +114 -0
- data/lib/tailor/rulers/spaces_after_lbracket_ruler.rb +123 -0
- data/lib/tailor/rulers/spaces_after_lparen_ruler.rb +116 -0
- data/lib/tailor/rulers/spaces_before_comma_ruler.rb +67 -0
- data/lib/tailor/rulers/spaces_before_lbrace_ruler.rb +93 -0
- data/lib/tailor/rulers/spaces_before_rbrace_ruler.rb +98 -0
- data/lib/tailor/rulers/spaces_before_rbracket_ruler.rb +70 -0
- data/lib/tailor/rulers/spaces_before_rparen_ruler.rb +70 -0
- data/lib/tailor/rulers/spaces_in_empty_braces_ruler.rb +94 -0
- data/lib/tailor/rulers/trailing_newlines_ruler.rb +36 -0
- data/lib/tailor/runtime_error.rb +3 -0
- data/lib/tailor/tailorrc.erb +88 -0
- data/lib/tailor/version.rb +2 -2
- data/spec/spec_helper.rb +7 -5
- data/spec/tailor/cli_spec.rb +94 -0
- data/spec/tailor/configuration_spec.rb +147 -0
- data/spec/tailor/critic_spec.rb +63 -0
- data/spec/tailor/lexed_line_spec.rb +569 -0
- data/spec/tailor/lexer/token_spec.rb +46 -0
- data/spec/tailor/lexer_spec.rb +181 -0
- data/spec/tailor/options_spec.rb +6 -0
- data/spec/tailor/problem_spec.rb +74 -0
- data/spec/tailor/reporter_spec.rb +53 -0
- data/spec/tailor/ruler_spec.rb +56 -0
- data/spec/tailor/rulers/indentation_spaces_ruler/indentation_manager_spec.rb +454 -0
- data/spec/tailor/rulers/indentation_spaces_ruler_spec.rb +128 -0
- data/spec/tailor/rulers/spaces_after_comma_spec.rb +31 -0
- data/spec/tailor/rulers/spaces_after_lbrace_ruler_spec.rb +145 -0
- data/spec/tailor/rulers/spaces_before_lbrace_ruler_spec.rb +63 -0
- data/spec/tailor/rulers/spaces_before_rbrace_ruler_spec.rb +63 -0
- data/spec/tailor/rulers_spec.rb +9 -0
- data/spec/tailor/version_spec.rb +6 -0
- data/spec/tailor_spec.rb +9 -21
- data/tailor.gemspec +22 -35
- data/tasks/features.rake +7 -0
- data/tasks/roodi.rake +9 -0
- data/tasks/roodi_config.yaml +14 -0
- data/tasks/spec.rake +16 -0
- data/tasks/yard.rake +14 -0
- metadata +224 -77
- data/features/case_checking.feature +0 -38
- data/features/spacing.feature +0 -97
- data/features/spacing/commas.feature +0 -44
- data/features/step_definitions/case_checking_steps.rb +0 -42
- data/features/step_definitions/spacing_steps.rb +0 -156
- data/features/support/1_file_with_bad_comma_spacing/bad_comma_spacing.rb +0 -43
- data/features/support/1_file_with_bad_curly_brace_spacing/bad_curly_brace_spacing.rb +0 -60
- data/features/support/1_file_with_bad_parenthesis/bad_parenthesis.rb +0 -4
- data/features/support/1_file_with_bad_square_brackets/bad_square_brackets.rb +0 -62
- data/features/support/1_file_with_camel_case_class/camel_case_class.rb +0 -5
- data/features/support/1_file_with_camel_case_method/camel_case_method.rb +0 -3
- data/features/support/1_file_with_hard_tabs/hard_tab.rb +0 -3
- data/features/support/1_file_with_long_lines/long_lines.rb +0 -5
- data/features/support/1_file_with_snake_case_class/snake_case_class.rb +0 -5
- data/features/support/1_file_with_snake_case_method/snake_case_method.rb +0 -3
- data/features/support/1_file_with_trailing_whitespace/trailing_whitespace.rb +0 -5
- data/features/support/1_good_simple_file/simple_project.rb +0 -5
- data/features/support/common.rb +0 -102
- data/features/support/matchers.rb +0 -11
- data/lib/tailor/file_line.rb +0 -220
- data/lib/tailor/indentation.rb +0 -245
- data/lib/tailor/spacing.rb +0 -237
- data/spec/file_line_spec.rb +0 -70
- data/spec/indentation_spec.rb +0 -259
- data/spec/spacing/colon_spacing_spec.rb +0 -71
- data/spec/spacing/comma_spacing_spec.rb +0 -159
- data/spec/spacing/curly_brace_spacing_spec.rb +0 -257
- data/spec/spacing/parentheses_spacing_spec.rb +0 -28
- data/spec/spacing/square_bracket_spacing_spec.rb +0 -116
- data/spec/spacing_spec.rb +0 -167
- data/tasks/metrics.rake +0 -23
data/lib/tailor/lexer.rb
ADDED
@@ -0,0 +1,428 @@
|
|
1
|
+
require 'ripper'
|
2
|
+
require_relative 'composite_observable'
|
3
|
+
require_relative 'lexed_line'
|
4
|
+
require_relative 'lexer_constants'
|
5
|
+
require_relative 'logger'
|
6
|
+
require_relative 'lexer/token'
|
7
|
+
|
8
|
+
|
9
|
+
class Tailor
|
10
|
+
|
11
|
+
# https://github.com/svenfuchs/ripper2ruby/blob/303d7ac4dfc2d8dbbdacaa6970fc41ff56b31d82/notes/scanner_events
|
12
|
+
# https://github.com/ruby/ruby/blob/trunk/test/ripper/test_scanner_events.rb
|
13
|
+
class Lexer < Ripper::Lexer
|
14
|
+
include CompositeObservable
|
15
|
+
include LexerConstants
|
16
|
+
include LogSwitch::Mixin
|
17
|
+
|
18
|
+
# @param [String] file The string to lex, or name of the file to read
|
19
|
+
# and analyze.
|
20
|
+
def initialize(file)
|
21
|
+
@original_file_text = if File.exists? file
|
22
|
+
@file_name = file
|
23
|
+
File.open(@file_name, 'r').read
|
24
|
+
else
|
25
|
+
@file_name = "<notafile>"
|
26
|
+
file
|
27
|
+
end
|
28
|
+
|
29
|
+
@file_text = ensure_trailing_newline(@original_file_text)
|
30
|
+
super @file_text
|
31
|
+
@added_newline = @file_text != @original_file_text
|
32
|
+
end
|
33
|
+
|
34
|
+
def check_added_newline
|
35
|
+
file_changed
|
36
|
+
notify_file_observers(count_trailing_newlines(@original_file_text))
|
37
|
+
end
|
38
|
+
|
39
|
+
def on_backref(token)
|
40
|
+
log "BACKREF: '#{token}'"
|
41
|
+
super(token)
|
42
|
+
end
|
43
|
+
|
44
|
+
def on_backtick(token)
|
45
|
+
log "BACKTICK: '#{token}'"
|
46
|
+
super(token)
|
47
|
+
end
|
48
|
+
|
49
|
+
def on_comma(token)
|
50
|
+
log "COMMA: #{token}"
|
51
|
+
log "Line length: #{current_line_of_text.length}"
|
52
|
+
|
53
|
+
comma_changed
|
54
|
+
notify_comma_observers(current_line_of_text, lineno, column)
|
55
|
+
|
56
|
+
super(token)
|
57
|
+
end
|
58
|
+
|
59
|
+
def on_comment(token)
|
60
|
+
log "COMMENT: '#{token}'"
|
61
|
+
|
62
|
+
l_token = Tailor::Lexer::Token.new(token)
|
63
|
+
lexed_line = LexedLine.new(super, lineno)
|
64
|
+
comment_changed
|
65
|
+
notify_comment_observers(l_token, lexed_line, @file_text, lineno, column)
|
66
|
+
|
67
|
+
super(token)
|
68
|
+
end
|
69
|
+
|
70
|
+
def on_const(token)
|
71
|
+
log "CONST: '#{token}'"
|
72
|
+
|
73
|
+
l_token = Tailor::Lexer::Token.new(token)
|
74
|
+
lexed_line = LexedLine.new(super, lineno)
|
75
|
+
const_changed
|
76
|
+
notify_const_observers(l_token, lexed_line, lineno, column)
|
77
|
+
|
78
|
+
super(token)
|
79
|
+
end
|
80
|
+
|
81
|
+
def on_cvar(token)
|
82
|
+
log "CVAR: '#{token}'"
|
83
|
+
super(token)
|
84
|
+
end
|
85
|
+
|
86
|
+
def on_embdoc(token)
|
87
|
+
log "EMBDOC: '#{token}'"
|
88
|
+
super(token)
|
89
|
+
end
|
90
|
+
|
91
|
+
def on_embdoc_beg(token)
|
92
|
+
log "EMBDOC_BEG: '#{token}'"
|
93
|
+
super(token)
|
94
|
+
end
|
95
|
+
|
96
|
+
def on_embdoc_end(token)
|
97
|
+
log "EMBDOC_BEG: '#{token}'"
|
98
|
+
super(token)
|
99
|
+
end
|
100
|
+
|
101
|
+
# Matches the { in an expression embedded in a string.
|
102
|
+
def on_embexpr_beg(token)
|
103
|
+
log "EMBEXPR_BEG: '#{token}'"
|
104
|
+
embexpr_beg_changed
|
105
|
+
notify_embexpr_beg_observers
|
106
|
+
super(token)
|
107
|
+
end
|
108
|
+
|
109
|
+
def on_embexpr_end(token)
|
110
|
+
log "EMBEXPR_END: '#{token}'"
|
111
|
+
embexpr_end_changed
|
112
|
+
notify_embexpr_end_observers
|
113
|
+
super(token)
|
114
|
+
end
|
115
|
+
|
116
|
+
def on_embvar(token)
|
117
|
+
log "EMBVAR: '#{token}'"
|
118
|
+
super(token)
|
119
|
+
end
|
120
|
+
|
121
|
+
def on_float(token)
|
122
|
+
log "FLOAT: '#{token}'"
|
123
|
+
super(token)
|
124
|
+
end
|
125
|
+
|
126
|
+
# Global variable
|
127
|
+
def on_gvar(token)
|
128
|
+
log "GVAR: '#{token}'"
|
129
|
+
super(token)
|
130
|
+
end
|
131
|
+
|
132
|
+
def on_heredoc_beg(token)
|
133
|
+
log "HEREDOC_BEG: '#{token}'"
|
134
|
+
super(token)
|
135
|
+
end
|
136
|
+
|
137
|
+
def on_heredoc_end(token)
|
138
|
+
log "HEREDOC_END: '#{token}'"
|
139
|
+
super(token)
|
140
|
+
end
|
141
|
+
|
142
|
+
def on_ident(token)
|
143
|
+
log "IDENT: '#{token}'"
|
144
|
+
l_token = Tailor::Lexer::Token.new(token)
|
145
|
+
lexed_line = LexedLine.new(super, lineno)
|
146
|
+
ident_changed
|
147
|
+
notify_ident_observers(l_token, lexed_line, lineno, column)
|
148
|
+
super(token)
|
149
|
+
end
|
150
|
+
|
151
|
+
# Called when the lexer matches a Ruby ignored newline. Ignored newlines
|
152
|
+
# occur when a newline is encountered, but the statement that was expressed
|
153
|
+
# on that line was not completed on that line.
|
154
|
+
#
|
155
|
+
# @param [String] token The token that the lexer matched.
|
156
|
+
def on_ignored_nl(token)
|
157
|
+
log "IGNORED_NL"
|
158
|
+
|
159
|
+
current_line = LexedLine.new(super, lineno)
|
160
|
+
ignored_nl_changed
|
161
|
+
notify_ignored_nl_observers(current_line, lineno, column)
|
162
|
+
|
163
|
+
super(token)
|
164
|
+
end
|
165
|
+
|
166
|
+
def on_int(token)
|
167
|
+
log "INT: '#{token}'"
|
168
|
+
super(token)
|
169
|
+
end
|
170
|
+
|
171
|
+
# Instance variable
|
172
|
+
def on_ivar(token)
|
173
|
+
log "IVAR: '#{token}'"
|
174
|
+
super(token)
|
175
|
+
end
|
176
|
+
|
177
|
+
# Called when the lexer matches a Ruby keyword
|
178
|
+
#
|
179
|
+
# @param [String] token The token that the lexer matched.
|
180
|
+
def on_kw(token)
|
181
|
+
log "KW: #{token}"
|
182
|
+
current_line = LexedLine.new(super, lineno)
|
183
|
+
|
184
|
+
l_token = Tailor::Lexer::Token.new(token,
|
185
|
+
{
|
186
|
+
loop_with_do: current_line.loop_with_do?,
|
187
|
+
full_line_of_text: current_line_of_text
|
188
|
+
}
|
189
|
+
)
|
190
|
+
|
191
|
+
kw_changed
|
192
|
+
notify_kw_observers(l_token, current_line, lineno, column)
|
193
|
+
|
194
|
+
super(token)
|
195
|
+
end
|
196
|
+
|
197
|
+
def on_label(token)
|
198
|
+
log "LABEL: '#{token}'"
|
199
|
+
super(token)
|
200
|
+
end
|
201
|
+
|
202
|
+
# Called when the lexer matches a {. Note a #{ match calls
|
203
|
+
# {on_embexpr_beg}.
|
204
|
+
#
|
205
|
+
# @param [String] token The token that the lexer matched.
|
206
|
+
def on_lbrace(token)
|
207
|
+
log "LBRACE: '#{token}'"
|
208
|
+
current_line = LexedLine.new(super, lineno)
|
209
|
+
lbrace_changed
|
210
|
+
notify_lbrace_observers(current_line, lineno, column)
|
211
|
+
super(token)
|
212
|
+
end
|
213
|
+
|
214
|
+
# Called when the lexer matches a [.
|
215
|
+
#
|
216
|
+
# @param [String] token The token that the lexer matched.
|
217
|
+
def on_lbracket(token)
|
218
|
+
log "LBRACKET: '#{token}'"
|
219
|
+
current_line = LexedLine.new(super, lineno)
|
220
|
+
lbracket_changed
|
221
|
+
notify_lbracket_observers(current_line, lineno, column)
|
222
|
+
super(token)
|
223
|
+
end
|
224
|
+
|
225
|
+
def on_lparen(token)
|
226
|
+
log "LPAREN: '#{token}'"
|
227
|
+
lparen_changed
|
228
|
+
notify_lparen_observers(lineno, column)
|
229
|
+
super(token)
|
230
|
+
end
|
231
|
+
|
232
|
+
# This is the first thing that exists on a new line--NOT the last!
|
233
|
+
def on_nl(token)
|
234
|
+
log "NL"
|
235
|
+
current_line = LexedLine.new(super, lineno)
|
236
|
+
|
237
|
+
nl_changed
|
238
|
+
notify_nl_observers(current_line, lineno, column)
|
239
|
+
|
240
|
+
super(token)
|
241
|
+
end
|
242
|
+
|
243
|
+
# Operators
|
244
|
+
def on_op(token)
|
245
|
+
log "OP: '#{token}'"
|
246
|
+
super(token)
|
247
|
+
end
|
248
|
+
|
249
|
+
def on_period(token)
|
250
|
+
log "PERIOD: '#{token}'"
|
251
|
+
|
252
|
+
period_changed
|
253
|
+
notify_period_observers(current_line_of_text.length, lineno, column)
|
254
|
+
|
255
|
+
super(token)
|
256
|
+
end
|
257
|
+
|
258
|
+
def on_qwords_beg(token)
|
259
|
+
log "QWORDS_BEG: '#{token}'"
|
260
|
+
super(token)
|
261
|
+
end
|
262
|
+
|
263
|
+
# Called when the lexer matches a }.
|
264
|
+
#
|
265
|
+
# @param [String] token The token that the lexer matched.
|
266
|
+
def on_rbrace(token)
|
267
|
+
log "RBRACE: '#{token}'"
|
268
|
+
|
269
|
+
current_line = LexedLine.new(super, lineno)
|
270
|
+
rbrace_changed
|
271
|
+
notify_rbrace_observers(current_line, lineno, column)
|
272
|
+
|
273
|
+
super(token)
|
274
|
+
end
|
275
|
+
|
276
|
+
# Called when the lexer matches a ].
|
277
|
+
#
|
278
|
+
# @param [String] token The token that the lexer matched.
|
279
|
+
def on_rbracket(token)
|
280
|
+
log "RBRACKET: '#{token}'"
|
281
|
+
|
282
|
+
current_line = LexedLine.new(super, lineno)
|
283
|
+
rbracket_changed
|
284
|
+
notify_rbracket_observers(current_line, lineno, column)
|
285
|
+
|
286
|
+
super(token)
|
287
|
+
end
|
288
|
+
|
289
|
+
def on_regexp_beg(token)
|
290
|
+
log "REGEXP_BEG: '#{token}'"
|
291
|
+
super(token)
|
292
|
+
end
|
293
|
+
|
294
|
+
def on_regexp_end(token)
|
295
|
+
log "REGEXP_END: '#{token}'"
|
296
|
+
super(token)
|
297
|
+
end
|
298
|
+
|
299
|
+
def on_rparen(token)
|
300
|
+
log "RPAREN: '#{token}'"
|
301
|
+
|
302
|
+
current_line = LexedLine.new(super, lineno)
|
303
|
+
rparen_changed
|
304
|
+
notify_rparen_observers(current_line, lineno, column)
|
305
|
+
|
306
|
+
super(token)
|
307
|
+
end
|
308
|
+
|
309
|
+
def on_semicolon(token)
|
310
|
+
log "SEMICOLON: '#{token}'"
|
311
|
+
super(token)
|
312
|
+
end
|
313
|
+
|
314
|
+
def on_sp(token)
|
315
|
+
log "SP: '#{token}'; size: #{token.size}"
|
316
|
+
l_token = Tailor::Lexer::Token.new(token)
|
317
|
+
sp_changed
|
318
|
+
notify_sp_observers(l_token, lineno, column)
|
319
|
+
super(token)
|
320
|
+
end
|
321
|
+
|
322
|
+
def on_symbeg(token)
|
323
|
+
log "SYMBEG: '#{token}'"
|
324
|
+
super(token)
|
325
|
+
end
|
326
|
+
|
327
|
+
def on_tlambda(token)
|
328
|
+
log "TLAMBDA: '#{token}'"
|
329
|
+
super(token)
|
330
|
+
end
|
331
|
+
|
332
|
+
def on_tlambeg(token)
|
333
|
+
log "TLAMBEG: '#{token}'"
|
334
|
+
super(token)
|
335
|
+
end
|
336
|
+
|
337
|
+
def on_tstring_beg(token)
|
338
|
+
log "TSTRING_BEG: '#{token}'"
|
339
|
+
tstring_beg_changed
|
340
|
+
notify_tstring_beg_observers(lineno)
|
341
|
+
super(token)
|
342
|
+
end
|
343
|
+
|
344
|
+
def on_tstring_content(token)
|
345
|
+
log "TSTRING_CONTENT: '#{token}'"
|
346
|
+
super(token)
|
347
|
+
end
|
348
|
+
|
349
|
+
def on_tstring_end(token)
|
350
|
+
log "TSTRING_END: '#{token}'"
|
351
|
+
tstring_end_changed
|
352
|
+
notify_tstring_end_observers
|
353
|
+
super(token)
|
354
|
+
end
|
355
|
+
|
356
|
+
def on_words_beg(token)
|
357
|
+
log "WORDS_BEG: '#{token}'"
|
358
|
+
super(token)
|
359
|
+
end
|
360
|
+
|
361
|
+
def on_words_sep(token)
|
362
|
+
log "WORDS_SEP: '#{token}'"
|
363
|
+
super(token)
|
364
|
+
end
|
365
|
+
|
366
|
+
def on___end__(token)
|
367
|
+
log "__END__: '#{token}'"
|
368
|
+
super(token)
|
369
|
+
end
|
370
|
+
|
371
|
+
def on_CHAR(token)
|
372
|
+
log "CHAR: '#{token}'"
|
373
|
+
super(token)
|
374
|
+
end
|
375
|
+
|
376
|
+
# The current line of text being examined.
|
377
|
+
#
|
378
|
+
# @return [String] The current line of text.
|
379
|
+
def current_line_of_text
|
380
|
+
@file_text.split("\n").at(lineno - 1) || ''
|
381
|
+
end
|
382
|
+
|
383
|
+
# Counts the number of newlines at the end of the file.
|
384
|
+
#
|
385
|
+
# @param [String] text The file's text.
|
386
|
+
# @return [Fixnum] The number of \n at the end of the file.
|
387
|
+
def count_trailing_newlines(text)
|
388
|
+
if text.end_with? "\n"
|
389
|
+
count = 0
|
390
|
+
|
391
|
+
text.reverse.chars do |c|
|
392
|
+
if c == "\n"
|
393
|
+
count += 1
|
394
|
+
else
|
395
|
+
break
|
396
|
+
end
|
397
|
+
end
|
398
|
+
|
399
|
+
count
|
400
|
+
else
|
401
|
+
0
|
402
|
+
end
|
403
|
+
end
|
404
|
+
|
405
|
+
# Adds a newline to the end of the test if one doesn't exist. Without doing
|
406
|
+
# this, Ripper won't trigger a newline event for the last line of the file,
|
407
|
+
# which is required for some rulers to do their thing.
|
408
|
+
#
|
409
|
+
# @param [String] file_text The text to check.
|
410
|
+
# @return [String] The file text with a newline at the end.
|
411
|
+
def ensure_trailing_newline(file_text)
|
412
|
+
count_trailing_newlines(file_text) > 0 ? file_text : (file_text + "\n")
|
413
|
+
end
|
414
|
+
|
415
|
+
#---------------------------------------------------------------------------
|
416
|
+
# Privates!
|
417
|
+
#---------------------------------------------------------------------------
|
418
|
+
private
|
419
|
+
|
420
|
+
def log(*args)
|
421
|
+
l = begin; lineno; rescue; "<EOF>"; end
|
422
|
+
c = begin; column; rescue; "<EOF>"; end
|
423
|
+
subclass_name = self.class.to_s.sub(/^Tailor::/, '')
|
424
|
+
args.first.insert(0, "<#{subclass_name}> #{l}[#{c}]: ")
|
425
|
+
Tailor::Logger.log(*args)
|
426
|
+
end
|
427
|
+
end
|
428
|
+
end
|
@@ -0,0 +1,103 @@
|
|
1
|
+
require 'ripper'
|
2
|
+
require_relative '../lexer_constants'
|
3
|
+
require_relative '../logger'
|
4
|
+
|
5
|
+
class Tailor
|
6
|
+
class Lexer < ::Ripper::Lexer
|
7
|
+
class Token < String
|
8
|
+
include LexerConstants
|
9
|
+
include Tailor::Logger::Mixin
|
10
|
+
|
11
|
+
def initialize(the_token, options={})
|
12
|
+
super(the_token)
|
13
|
+
@options = options
|
14
|
+
end
|
15
|
+
|
16
|
+
# Checks if +self+ is in +{KEYWORDS_TO_INDENT}+.
|
17
|
+
#
|
18
|
+
# @return [Boolean]
|
19
|
+
def keyword_to_indent?
|
20
|
+
KEYWORDS_TO_INDENT.include? self
|
21
|
+
end
|
22
|
+
|
23
|
+
# Checks if +self+ is in +{CONTINUATION_KEYWORDS}+.
|
24
|
+
#
|
25
|
+
# @return [Boolean]
|
26
|
+
def continuation_keyword?
|
27
|
+
CONTINUATION_KEYWORDS.include? self
|
28
|
+
end
|
29
|
+
|
30
|
+
# @return [Boolean]
|
31
|
+
def ends_with_newline?
|
32
|
+
self =~ /\n$/
|
33
|
+
end
|
34
|
+
|
35
|
+
# Checks if +self+ is "do" and +@options[:loop_with_do] is true.
|
36
|
+
#
|
37
|
+
# @return [Boolean]
|
38
|
+
def do_is_for_a_loop?
|
39
|
+
self == "do" && @options[:loop_with_do]
|
40
|
+
end
|
41
|
+
|
42
|
+
# @return [Boolean]
|
43
|
+
def screaming_snake_case?
|
44
|
+
self =~ /[A-Z].*_/
|
45
|
+
end
|
46
|
+
|
47
|
+
# @return [Boolean]
|
48
|
+
def contains_capital_letter?
|
49
|
+
self =~ /[A-Z]/
|
50
|
+
end
|
51
|
+
|
52
|
+
# @return [Boolean]
|
53
|
+
def contains_hard_tab?
|
54
|
+
self =~ /\t/
|
55
|
+
end
|
56
|
+
|
57
|
+
# Checks the current line to see if +self+ is being used as a modifier.
|
58
|
+
#
|
59
|
+
# @return [Boolean] True if there's a modifier in the current line that
|
60
|
+
# is the same type as +token+.
|
61
|
+
def modifier_keyword?
|
62
|
+
return false if not keyword_to_indent?
|
63
|
+
|
64
|
+
line_of_text = @options[:full_line_of_text]
|
65
|
+
log "Line of text: #{line_of_text}"
|
66
|
+
|
67
|
+
result = catch(:result) do
|
68
|
+
sexp_line = Ripper.sexp(line_of_text)
|
69
|
+
|
70
|
+
if sexp_line.nil?
|
71
|
+
msg = "sexp line was nil. "
|
72
|
+
msg << "Perhaps that line is part of a multi-line statement?"
|
73
|
+
log msg
|
74
|
+
log "Trying again with the last char removed from the line..."
|
75
|
+
line_of_text.chop!
|
76
|
+
sexp_line = Ripper.sexp(line_of_text)
|
77
|
+
end
|
78
|
+
|
79
|
+
if sexp_line.nil?
|
80
|
+
log "sexp line was nil again."
|
81
|
+
log "Trying one more time with the last char removed from the line..."
|
82
|
+
line_of_text.chop!
|
83
|
+
sexp_line = Ripper.sexp(line_of_text)
|
84
|
+
end
|
85
|
+
|
86
|
+
if sexp_line.is_a? Array
|
87
|
+
log "sexp_line.flatten: #{sexp_line.flatten}"
|
88
|
+
log "sexp_line.last.first: #{sexp_line.last.first}"
|
89
|
+
|
90
|
+
begin
|
91
|
+
throw(:result, sexp_line.flatten.compact.any? do |s|
|
92
|
+
s == MODIFIERS[self]
|
93
|
+
end)
|
94
|
+
rescue NoMethodError
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
result
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|