anbt-sql-formatter 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +4 -0
- data/Gemfile +4 -0
- data/Rakefile +1 -0
- data/anbt-sql-formatter.gemspec +24 -0
- data/bin/anbt-sql-formatter +50 -0
- data/lgpl-2.1.txt +504 -0
- data/lib/anbt-sql-formatter/coarse-tokenizer.rb +174 -0
- data/lib/anbt-sql-formatter/constants.rb +81 -0
- data/lib/anbt-sql-formatter/exception.rb +30 -0
- data/lib/anbt-sql-formatter/formatter.rb +409 -0
- data/lib/anbt-sql-formatter/helper.rb +73 -0
- data/lib/anbt-sql-formatter/parser.rb +327 -0
- data/lib/anbt-sql-formatter/rule.rb +121 -0
- data/lib/anbt-sql-formatter/token.rb +79 -0
- data/lib/anbt-sql-formatter/version.rb +7 -0
- data/misc/anbt-sql-formatter-customize-example +65 -0
- data/misc/anbt-sql-formatter-for-sakura-editor.js +165 -0
- data/readme.ja.txt +107 -0
- data/readme.txt +58 -0
- data/sample.sql +120 -0
- data/setup.rb +1585 -0
- data/test/helper.rb +17 -0
- data/test/test_coarse-tokenizer.rb +360 -0
- data/test/test_formatter.rb +489 -0
- data/test/test_helper.rb +23 -0
- data/test/test_parser.rb +370 -0
- data/test/test_rule.rb +30 -0
- data/uninstall.rb +20 -0
- metadata +84 -0
data/test/helper.rb
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
require "test/unit"
|
2
|
+
require "pp"
|
3
|
+
|
4
|
+
$LOAD_PATH.unshift File.join(File.dirname(__FILE__), "..", "lib")
|
5
|
+
|
6
|
+
class Helper
|
7
|
+
def Helper.format_tokens(list)
|
8
|
+
list.map{|token|
|
9
|
+
"<#{token._type}>#{token.string}</>"
|
10
|
+
}.join("\n")
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
|
15
|
+
def assert_equals(a,b,c)
|
16
|
+
assert_equal(b,c,a)
|
17
|
+
end
|
@@ -0,0 +1,360 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require File.join(File.expand_path(File.dirname(__FILE__)), "helper")
|
4
|
+
|
5
|
+
require "anbt-sql-formatter/coarse-tokenizer"
|
6
|
+
|
7
|
+
class CoarseTokenizer
|
8
|
+
attr_accessor :buf, :str, :result
|
9
|
+
end
|
10
|
+
|
11
|
+
|
12
|
+
def format(tokens)
|
13
|
+
tokens.map{|t| t.to_s }.join("\n")
|
14
|
+
end
|
15
|
+
|
16
|
+
|
17
|
+
class TestCoarseTokenizer < Test::Unit::TestCase
|
18
|
+
def setup
|
19
|
+
@tok = CoarseTokenizer.new
|
20
|
+
end
|
21
|
+
|
22
|
+
|
23
|
+
def test_shift_to_buf
|
24
|
+
@tok.buf = ""
|
25
|
+
@tok.str = "abcdefg"
|
26
|
+
|
27
|
+
msg = "shift_to_buf - "
|
28
|
+
@tok.shift_to_buf(1)
|
29
|
+
assert_equals( msg, "a", @tok.buf )
|
30
|
+
assert_equals( msg, "bcdefg", @tok.str )
|
31
|
+
|
32
|
+
@tok.shift_to_buf(2)
|
33
|
+
assert_equals( msg, "abc", @tok.buf )
|
34
|
+
assert_equals( msg, "defg", @tok.str )
|
35
|
+
end
|
36
|
+
|
37
|
+
|
38
|
+
def test_shift_token
|
39
|
+
@tok.result = []
|
40
|
+
@tok.buf = "ABC"
|
41
|
+
@tok.str = "'def'"
|
42
|
+
|
43
|
+
msg = "shift_token - "
|
44
|
+
@tok.shift_token(1, :plain, :comment, :start)
|
45
|
+
assert_equals( msg, :plain, @tok.result.last._type)
|
46
|
+
assert_equals( msg, "ABC", @tok.result.last.string)
|
47
|
+
assert_equals( msg, "'", @tok.buf)
|
48
|
+
assert_equals( msg, "def'", @tok.str)
|
49
|
+
|
50
|
+
@tok.result = []
|
51
|
+
@tok.buf = "'ABC"
|
52
|
+
@tok.str = "'def"
|
53
|
+
|
54
|
+
@tok.shift_token(1, :comment, :plain, :end)
|
55
|
+
assert_equals( msg, :comment, @tok.result.last._type)
|
56
|
+
assert_equals( msg, "'ABC'", @tok.result.last.string)
|
57
|
+
assert_equals( msg, "", @tok.buf)
|
58
|
+
assert_equals( msg, "def", @tok.str)
|
59
|
+
end
|
60
|
+
|
61
|
+
|
62
|
+
def test_tokenize
|
63
|
+
msg = "tokenize - "
|
64
|
+
|
65
|
+
assert_equals( msg, (<<EOB
|
66
|
+
<plain>aa</>
|
67
|
+
EOB
|
68
|
+
).chomp,
|
69
|
+
format(@tok.tokenize((<<EOB
|
70
|
+
aa
|
71
|
+
EOB
|
72
|
+
).chomp))
|
73
|
+
)
|
74
|
+
|
75
|
+
########
|
76
|
+
assert_equals( msg, (<<EOB
|
77
|
+
<plain>aa </>
|
78
|
+
<quote_double>"bb"</>
|
79
|
+
EOB
|
80
|
+
).chomp,
|
81
|
+
format(@tok.tokenize((<<EOB
|
82
|
+
aa "bb"
|
83
|
+
EOB
|
84
|
+
).chomp))
|
85
|
+
)
|
86
|
+
|
87
|
+
########
|
88
|
+
assert_equals( msg, (<<EOB
|
89
|
+
<plain>aa </>
|
90
|
+
<quote_single>'bb'</>
|
91
|
+
EOB
|
92
|
+
).chomp,
|
93
|
+
format(@tok.tokenize((<<EOB
|
94
|
+
aa 'bb'
|
95
|
+
EOB
|
96
|
+
).chomp))
|
97
|
+
)
|
98
|
+
|
99
|
+
########
|
100
|
+
assert_equals( msg, (<<EOB
|
101
|
+
<plain>aa </>
|
102
|
+
<comment_single>--bb<br></>
|
103
|
+
<plain>cc</>
|
104
|
+
EOB
|
105
|
+
).chomp,
|
106
|
+
format(@tok.tokenize((<<EOB
|
107
|
+
aa --bb
|
108
|
+
cc
|
109
|
+
EOB
|
110
|
+
).chomp))
|
111
|
+
)
|
112
|
+
|
113
|
+
########
|
114
|
+
assert_equals( msg, (<<EOB
|
115
|
+
<plain>aa </>
|
116
|
+
<comment_multi>/* bb */</>
|
117
|
+
<plain> cc</>
|
118
|
+
EOB
|
119
|
+
).chomp,
|
120
|
+
format(@tok.tokenize((<<EOB
|
121
|
+
aa /* bb */ cc
|
122
|
+
EOB
|
123
|
+
).chomp))
|
124
|
+
)
|
125
|
+
|
126
|
+
########
|
127
|
+
assert_equals( msg + "begin with multiline comment", (<<EOB
|
128
|
+
<comment_multi>/* bb */</>
|
129
|
+
<plain> cc</>
|
130
|
+
EOB
|
131
|
+
).chomp,
|
132
|
+
format(@tok.tokenize((<<EOB
|
133
|
+
/* bb */ cc
|
134
|
+
EOB
|
135
|
+
).chomp))
|
136
|
+
)
|
137
|
+
end
|
138
|
+
|
139
|
+
|
140
|
+
def test_string_in_string
|
141
|
+
msg = "string_in_string"
|
142
|
+
|
143
|
+
########
|
144
|
+
assert_equals( msg, (<<EOB
|
145
|
+
<quote_double>"aa'bb'cc"</>
|
146
|
+
EOB
|
147
|
+
).chomp,
|
148
|
+
format(@tok.tokenize((<<EOB
|
149
|
+
"aa'bb'cc"
|
150
|
+
EOB
|
151
|
+
).chomp))
|
152
|
+
)
|
153
|
+
|
154
|
+
########
|
155
|
+
assert_equals( msg, (<<EOB
|
156
|
+
<quote_single>'aa"bb"cc'</>
|
157
|
+
EOB
|
158
|
+
).chomp,
|
159
|
+
format(@tok.tokenize((<<EOB
|
160
|
+
'aa"bb"cc'
|
161
|
+
EOB
|
162
|
+
).chomp))
|
163
|
+
)
|
164
|
+
end
|
165
|
+
|
166
|
+
|
167
|
+
def test_comment_in_comment
|
168
|
+
msg = "comment_in_comment - "
|
169
|
+
########
|
170
|
+
assert_equals( msg, (<<EOB
|
171
|
+
<comment_single>--a--b</>
|
172
|
+
EOB
|
173
|
+
).chomp,
|
174
|
+
format(@tok.tokenize((<<EOB
|
175
|
+
--a--b
|
176
|
+
EOB
|
177
|
+
).chomp))
|
178
|
+
)
|
179
|
+
|
180
|
+
########
|
181
|
+
assert_equals( msg, (<<EOB
|
182
|
+
<comment_single>-- aa /* bb */</>
|
183
|
+
EOB
|
184
|
+
).chomp,
|
185
|
+
format(@tok.tokenize((<<EOB
|
186
|
+
-- aa /* bb */
|
187
|
+
EOB
|
188
|
+
).chomp))
|
189
|
+
)
|
190
|
+
|
191
|
+
########
|
192
|
+
assert_equals( msg, (<<EOB
|
193
|
+
<comment_multi>/* aa /* bb */</>
|
194
|
+
EOB
|
195
|
+
).chomp,
|
196
|
+
format(@tok.tokenize((<<EOB
|
197
|
+
/* aa /* bb */
|
198
|
+
EOB
|
199
|
+
).chomp))
|
200
|
+
)
|
201
|
+
|
202
|
+
########
|
203
|
+
assert_equals( msg, (<<EOB
|
204
|
+
<comment_single>-- aa /* bb */</>
|
205
|
+
EOB
|
206
|
+
).chomp,
|
207
|
+
format(@tok.tokenize((<<EOB
|
208
|
+
-- aa /* bb */
|
209
|
+
EOB
|
210
|
+
).chomp))
|
211
|
+
)
|
212
|
+
end
|
213
|
+
|
214
|
+
|
215
|
+
def test_string_in_comment
|
216
|
+
msg = "string_in_comment - "
|
217
|
+
|
218
|
+
########
|
219
|
+
assert_equals( msg, (<<EOB
|
220
|
+
<comment_single>-- aa "bb" cc</>
|
221
|
+
EOB
|
222
|
+
).chomp,
|
223
|
+
format(@tok.tokenize((<<EOB
|
224
|
+
-- aa "bb" cc
|
225
|
+
EOB
|
226
|
+
).chomp))
|
227
|
+
)
|
228
|
+
|
229
|
+
########
|
230
|
+
assert_equals( msg, (<<EOB
|
231
|
+
<comment_single>-- aa 'bb' cc</>
|
232
|
+
EOB
|
233
|
+
).chomp,
|
234
|
+
format(@tok.tokenize((<<EOB
|
235
|
+
-- aa 'bb' cc
|
236
|
+
EOB
|
237
|
+
).chomp))
|
238
|
+
)
|
239
|
+
|
240
|
+
########
|
241
|
+
assert_equals( msg, (<<EOB
|
242
|
+
<comment_multi>/* aa "bb" cc */</>
|
243
|
+
EOB
|
244
|
+
).chomp,
|
245
|
+
format(@tok.tokenize((<<EOB
|
246
|
+
/* aa "bb" cc */
|
247
|
+
EOB
|
248
|
+
).chomp))
|
249
|
+
)
|
250
|
+
|
251
|
+
########
|
252
|
+
assert_equals( msg, (<<EOB
|
253
|
+
<comment_multi>/* aa 'bb' cc */</>
|
254
|
+
EOB
|
255
|
+
).chomp,
|
256
|
+
format(@tok.tokenize((<<EOB
|
257
|
+
/* aa 'bb' cc */
|
258
|
+
EOB
|
259
|
+
).chomp))
|
260
|
+
)
|
261
|
+
end
|
262
|
+
|
263
|
+
|
264
|
+
def test_comment_in_string
|
265
|
+
msg = "comment_in_string - "
|
266
|
+
|
267
|
+
########
|
268
|
+
assert_equals( msg + "comment_single in quote_single", (<<EOB
|
269
|
+
<quote_single>'aa--bb'</>
|
270
|
+
EOB
|
271
|
+
).chomp,
|
272
|
+
format(@tok.tokenize((<<EOB
|
273
|
+
'aa--bb'
|
274
|
+
EOB
|
275
|
+
).chomp))
|
276
|
+
)
|
277
|
+
|
278
|
+
########
|
279
|
+
assert_equals( msg + "comment_single in quote_double", (<<EOB
|
280
|
+
<quote_double>"aa--bb"</>
|
281
|
+
EOB
|
282
|
+
).chomp,
|
283
|
+
format(@tok.tokenize((<<EOB
|
284
|
+
"aa--bb"
|
285
|
+
EOB
|
286
|
+
).chomp))
|
287
|
+
)
|
288
|
+
|
289
|
+
########
|
290
|
+
assert_equals( msg + "comment_multi in quote_double", (<<EOB
|
291
|
+
<quote_double>"aa /* bb */ cc"</>
|
292
|
+
EOB
|
293
|
+
).chomp,
|
294
|
+
format(@tok.tokenize((<<EOB
|
295
|
+
"aa /* bb */ cc"
|
296
|
+
EOB
|
297
|
+
).chomp))
|
298
|
+
)
|
299
|
+
|
300
|
+
########
|
301
|
+
assert_equals( msg + "comment_multi in quote_double", (<<EOB
|
302
|
+
<quote_single>'aa /* bb */ cc'</>
|
303
|
+
EOB
|
304
|
+
).chomp,
|
305
|
+
format(@tok.tokenize((<<EOB
|
306
|
+
'aa /* bb */ cc'
|
307
|
+
EOB
|
308
|
+
).chomp))
|
309
|
+
)
|
310
|
+
end
|
311
|
+
|
312
|
+
|
313
|
+
def test_string_escape
|
314
|
+
msg = "string_escape"
|
315
|
+
|
316
|
+
########
|
317
|
+
assert_equals( msg, (<<EOB
|
318
|
+
<quote_double>"_a_\\\\_b_<br>_c_\\'_d_"</>
|
319
|
+
EOB
|
320
|
+
).chomp,
|
321
|
+
format(@tok.tokenize((<<EOB
|
322
|
+
"_a_\\\\_b_\n_c_\\'_d_"
|
323
|
+
EOB
|
324
|
+
).chomp))
|
325
|
+
)
|
326
|
+
|
327
|
+
########
|
328
|
+
assert_equals( msg, (<<EOB
|
329
|
+
<quote_single>'_a_\\\\_b_<br>_c_\\'_d_'</>
|
330
|
+
EOB
|
331
|
+
).chomp,
|
332
|
+
format(@tok.tokenize((<<EOB
|
333
|
+
'_a_\\\\_b_\n_c_\\'_d_'
|
334
|
+
EOB
|
335
|
+
).chomp))
|
336
|
+
)
|
337
|
+
|
338
|
+
########
|
339
|
+
assert_equals( msg, (<<EOB
|
340
|
+
<quote_double>"_a_""_b_"</>
|
341
|
+
EOB
|
342
|
+
).chomp,
|
343
|
+
format(@tok.tokenize((<<EOB
|
344
|
+
"_a_""_b_"
|
345
|
+
EOB
|
346
|
+
).chomp))
|
347
|
+
)
|
348
|
+
|
349
|
+
########
|
350
|
+
assert_equals( msg, (<<EOB
|
351
|
+
<quote_single>'_a_''_b_'</>
|
352
|
+
EOB
|
353
|
+
).chomp,
|
354
|
+
format(@tok.tokenize((<<EOB
|
355
|
+
'_a_''_b_'
|
356
|
+
EOB
|
357
|
+
).chomp))
|
358
|
+
)
|
359
|
+
end
|
360
|
+
end
|
@@ -0,0 +1,489 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require File.join(File.expand_path(File.dirname(__FILE__)), "helper")
|
4
|
+
|
5
|
+
require "anbt-sql-formatter/formatter"
|
6
|
+
|
7
|
+
|
8
|
+
class TestAnbtSqlFormatter < Test::Unit::TestCase
|
9
|
+
INDENT_STR = "<-indent->"
|
10
|
+
|
11
|
+
def setup
|
12
|
+
@rule = AnbtSql::Rule.new
|
13
|
+
@rule.indent_string = INDENT_STR
|
14
|
+
@parser = AnbtSql::Parser.new(@rule)
|
15
|
+
|
16
|
+
@fmt = AnbtSql::Formatter.new(@rule)
|
17
|
+
end
|
18
|
+
|
19
|
+
|
20
|
+
def test_modify_keyword_case
|
21
|
+
msg = "upcase"
|
22
|
+
|
23
|
+
########
|
24
|
+
@rule.keyword = AnbtSql::Rule::KEYWORD_UPPER_CASE
|
25
|
+
|
26
|
+
tokens = @parser.parse("select")
|
27
|
+
@fmt.modify_keyword_case(tokens)
|
28
|
+
assert_equals( msg + "", (<<EOB
|
29
|
+
<keyword>SELECT</>
|
30
|
+
EOB
|
31
|
+
).chop,
|
32
|
+
Helper.format_tokens(tokens)
|
33
|
+
)
|
34
|
+
|
35
|
+
########
|
36
|
+
msg = "downcase"
|
37
|
+
@rule.keyword = AnbtSql::Rule::KEYWORD_LOWER_CASE
|
38
|
+
|
39
|
+
tokens = @parser.parse("SELECT")
|
40
|
+
@fmt.modify_keyword_case(tokens)
|
41
|
+
assert_equals( msg + "", (<<EOB
|
42
|
+
<keyword>select</>
|
43
|
+
EOB
|
44
|
+
).chop,
|
45
|
+
Helper.format_tokens(tokens)
|
46
|
+
)
|
47
|
+
end
|
48
|
+
|
49
|
+
|
50
|
+
def test_concat_operator_for_oracle
|
51
|
+
msg = "concat_operator_for_oracle - "
|
52
|
+
|
53
|
+
########
|
54
|
+
tokens = @parser.parse("a+")
|
55
|
+
@fmt.concat_operator_for_oracle(tokens)
|
56
|
+
assert_equals( msg + "length is less than 3, should do nothing",
|
57
|
+
(<<EOB
|
58
|
+
<name>a</>
|
59
|
+
<symbol>+</>
|
60
|
+
EOB
|
61
|
+
).chop,
|
62
|
+
Helper.format_tokens(tokens)
|
63
|
+
)
|
64
|
+
|
65
|
+
########
|
66
|
+
tokens = @parser.parse("(+)")
|
67
|
+
@fmt.concat_operator_for_oracle(tokens)
|
68
|
+
assert_equals( msg + "", (<<EOB
|
69
|
+
<symbol>(+)</>
|
70
|
+
EOB
|
71
|
+
).chop,
|
72
|
+
Helper.format_tokens(tokens)
|
73
|
+
)
|
74
|
+
|
75
|
+
########
|
76
|
+
tokens = @parser.parse("(+)")
|
77
|
+
tokens = @fmt.format_list(tokens)
|
78
|
+
assert_equals( msg + "format_list()", (<<EOB
|
79
|
+
<symbol>(+)</>
|
80
|
+
EOB
|
81
|
+
).chop,
|
82
|
+
Helper.format_tokens(tokens)
|
83
|
+
)
|
84
|
+
end
|
85
|
+
|
86
|
+
|
87
|
+
def test_remove_symbol_side_space
|
88
|
+
msg = "remove_symbol_side_space - "
|
89
|
+
|
90
|
+
########
|
91
|
+
tokens = @parser.parse("a (b")
|
92
|
+
@fmt.remove_symbol_side_space(tokens)
|
93
|
+
assert_equals( msg + "", (<<EOB
|
94
|
+
<name>a</>
|
95
|
+
<symbol>(</>
|
96
|
+
<name>b</>
|
97
|
+
EOB
|
98
|
+
).chop,
|
99
|
+
Helper.format_tokens(tokens)
|
100
|
+
)
|
101
|
+
|
102
|
+
|
103
|
+
########
|
104
|
+
tokens = @parser.parse("a( b")
|
105
|
+
@fmt.remove_symbol_side_space(tokens)
|
106
|
+
assert_equals( msg + "", (<<EOB
|
107
|
+
<name>a</>
|
108
|
+
<symbol>(</>
|
109
|
+
<name>b</>
|
110
|
+
EOB
|
111
|
+
).chop,
|
112
|
+
Helper.format_tokens(tokens)
|
113
|
+
)
|
114
|
+
|
115
|
+
|
116
|
+
########
|
117
|
+
tokens = @parser.parse("a ( b")
|
118
|
+
@fmt.remove_symbol_side_space(tokens)
|
119
|
+
assert_equals( msg + "", (<<EOB
|
120
|
+
<name>a</>
|
121
|
+
<symbol>(</>
|
122
|
+
<name>b</>
|
123
|
+
EOB
|
124
|
+
).chop,
|
125
|
+
Helper.format_tokens(tokens)
|
126
|
+
)
|
127
|
+
end
|
128
|
+
|
129
|
+
|
130
|
+
def test_special_treatment_for_parenthesis_with_one_element
|
131
|
+
msg = "special_treatment_for_parenthesis_with_one_element - "
|
132
|
+
|
133
|
+
########
|
134
|
+
tokens = @parser.parse("( 1 )")
|
135
|
+
@fmt.special_treatment_for_parenthesis_with_one_element(tokens)
|
136
|
+
assert_equals( msg + "one element, should not separate", (<<EOB
|
137
|
+
<symbol>(1)</>
|
138
|
+
EOB
|
139
|
+
).chop,
|
140
|
+
Helper.format_tokens(tokens)
|
141
|
+
)
|
142
|
+
|
143
|
+
|
144
|
+
########
|
145
|
+
tokens = @parser.parse("(1,2)")
|
146
|
+
@fmt.special_treatment_for_parenthesis_with_one_element(tokens)
|
147
|
+
assert_equals( msg + "more than one element, should separate", (<<EOB
|
148
|
+
<symbol>(</>
|
149
|
+
<value>1</>
|
150
|
+
<symbol>,</>
|
151
|
+
<value>2</>
|
152
|
+
<symbol>)</>
|
153
|
+
EOB
|
154
|
+
).chop,
|
155
|
+
Helper.format_tokens(tokens)
|
156
|
+
)
|
157
|
+
end
|
158
|
+
|
159
|
+
|
160
|
+
def test_insert_space_between_tokens
|
161
|
+
msg = "insert_space_between_tokens - "
|
162
|
+
|
163
|
+
########
|
164
|
+
tokens = @parser.parse("a=")
|
165
|
+
@fmt.insert_space_between_tokens(tokens)
|
166
|
+
assert_equals(msg, (<<EOB
|
167
|
+
<name>a</>
|
168
|
+
<space> </>
|
169
|
+
<symbol>=</>
|
170
|
+
EOB
|
171
|
+
).chop,
|
172
|
+
Helper.format_tokens(tokens)
|
173
|
+
)
|
174
|
+
|
175
|
+
########
|
176
|
+
tokens = @parser.parse("=b")
|
177
|
+
@fmt.insert_space_between_tokens(tokens)
|
178
|
+
assert_equals(msg, (<<EOB
|
179
|
+
<symbol>=</>
|
180
|
+
<space> </>
|
181
|
+
<name>b</>
|
182
|
+
EOB
|
183
|
+
).chop,
|
184
|
+
Helper.format_tokens(tokens)
|
185
|
+
)
|
186
|
+
end
|
187
|
+
|
188
|
+
|
189
|
+
def test_insert_return_and_indent
|
190
|
+
msg = "insert_return_and_indent - "
|
191
|
+
|
192
|
+
########
|
193
|
+
tokens = @parser.parse("foo bar")
|
194
|
+
|
195
|
+
index, indent_depth = 1, 1
|
196
|
+
|
197
|
+
assert_equals( msg + "before", (<<EOB
|
198
|
+
<name>foo</>
|
199
|
+
<space> </>
|
200
|
+
<name>bar</>
|
201
|
+
EOB
|
202
|
+
).chop,
|
203
|
+
Helper.format_tokens(tokens)
|
204
|
+
)
|
205
|
+
|
206
|
+
result = @fmt.insert_return_and_indent(tokens, index, indent_depth)
|
207
|
+
|
208
|
+
assert_equals( msg + "index: #{index} / indent depth: #{indent_depth}",
|
209
|
+
(<<EOB
|
210
|
+
<name>foo</>
|
211
|
+
<space>\n#{INDENT_STR}</>
|
212
|
+
<name>bar</>
|
213
|
+
EOB
|
214
|
+
).chop,
|
215
|
+
Helper.format_tokens(tokens)
|
216
|
+
)
|
217
|
+
|
218
|
+
########
|
219
|
+
# msg = "" #"後の空白を置き換え"
|
220
|
+
tokens = @parser.parse("select foo")
|
221
|
+
|
222
|
+
index, indent_depth = 1, 1
|
223
|
+
|
224
|
+
assert_equals( msg + "before", (<<EOB
|
225
|
+
<keyword>select</>
|
226
|
+
<space> </>
|
227
|
+
<name>foo</>
|
228
|
+
EOB
|
229
|
+
).chop,
|
230
|
+
Helper.format_tokens(tokens)
|
231
|
+
)
|
232
|
+
|
233
|
+
result = @fmt.insert_return_and_indent(tokens, index, indent_depth)
|
234
|
+
|
235
|
+
assert_equals( msg + "#{msg}: index: #{index} / indent depth: #{indent_depth}",
|
236
|
+
(<<EOB
|
237
|
+
<keyword>select</>
|
238
|
+
<space>\n#{INDENT_STR}</>
|
239
|
+
<name>foo</>
|
240
|
+
EOB
|
241
|
+
).chop,
|
242
|
+
Helper.format_tokens(tokens)
|
243
|
+
)
|
244
|
+
|
245
|
+
########
|
246
|
+
msg = "" #"前の空白を置き換え"
|
247
|
+
tokens = @parser.parse("select foo")
|
248
|
+
index, indent_depth = 2, 1
|
249
|
+
|
250
|
+
assert_equals( msg + "before", (<<EOB
|
251
|
+
<keyword>select</>
|
252
|
+
<space> </>
|
253
|
+
<name>foo</>
|
254
|
+
EOB
|
255
|
+
).chop,
|
256
|
+
Helper.format_tokens(tokens)
|
257
|
+
)
|
258
|
+
|
259
|
+
result = @fmt.insert_return_and_indent(tokens, index, indent_depth)
|
260
|
+
assert_equals( msg + "", 0, result)
|
261
|
+
|
262
|
+
assert_equals( msg + "#{msg}: index: #{index} / indent depth: #{indent_depth}",
|
263
|
+
(<<EOB
|
264
|
+
<keyword>select</>
|
265
|
+
<space>\n#{INDENT_STR}</>
|
266
|
+
<name>foo</>
|
267
|
+
EOB
|
268
|
+
).chop,
|
269
|
+
Helper.format_tokens(tokens)
|
270
|
+
)
|
271
|
+
|
272
|
+
########
|
273
|
+
msg = "indent depth = 2"
|
274
|
+
tokens = @parser.parse("foo bar")
|
275
|
+
index, indent_depth = 1, 2
|
276
|
+
|
277
|
+
assert_equals( msg + "before", (<<EOB
|
278
|
+
<name>foo</>
|
279
|
+
<space> </>
|
280
|
+
<name>bar</>
|
281
|
+
EOB
|
282
|
+
).chop,
|
283
|
+
Helper.format_tokens(tokens)
|
284
|
+
)
|
285
|
+
|
286
|
+
result = @fmt.insert_return_and_indent(tokens, index, indent_depth)
|
287
|
+
|
288
|
+
assert_equals( msg + "#{msg}: index: #{index} / indent depth: #{indent_depth}",
|
289
|
+
(<<EOB
|
290
|
+
<name>foo</>
|
291
|
+
<space>\n#{INDENT_STR}#{INDENT_STR}</>
|
292
|
+
<name>bar</>
|
293
|
+
EOB
|
294
|
+
).chop,
|
295
|
+
Helper.format_tokens(tokens)
|
296
|
+
)
|
297
|
+
|
298
|
+
########
|
299
|
+
msg = "kw, nl, kw"
|
300
|
+
tokens = @parser.parse("select\ncase")
|
301
|
+
|
302
|
+
assert_equals( msg + "", (<<EOB
|
303
|
+
<keyword>select</>
|
304
|
+
<space>\n</>
|
305
|
+
<keyword>case</>
|
306
|
+
EOB
|
307
|
+
).chop,
|
308
|
+
Helper.format_tokens(tokens)
|
309
|
+
)
|
310
|
+
|
311
|
+
########
|
312
|
+
=begin
|
313
|
+
msg = "FROM の前で改行"
|
314
|
+
|
315
|
+
assert_equals( msg + "", (<<EOB
|
316
|
+
SELECT
|
317
|
+
<-indent-><-indent->aa
|
318
|
+
<-indent-><-indent->,bb
|
319
|
+
<-indent-><-indent->,cc
|
320
|
+
<-indent-><-indent->,dd
|
321
|
+
<-indent-><-indent->,ee
|
322
|
+
<-indent->FROM
|
323
|
+
<-indent-><-indent->foo
|
324
|
+
;
|
325
|
+
EOB
|
326
|
+
).chop,
|
327
|
+
# Helper.format_tokens(tokens),
|
328
|
+
@fmt.format("SELECT aa ,bb ,cc ,dd ,ee FROM foo;"),
|
329
|
+
"#{msg}")
|
330
|
+
=end
|
331
|
+
|
332
|
+
# ########
|
333
|
+
# msg = "指定した index に対して tokens[index] が存在するので 1 を返すべき"
|
334
|
+
# 間違い。tokens[index] が存在していても 1 を返すとは限らない。
|
335
|
+
# tokens = parser.parse("foo bar")
|
336
|
+
# #pp tokens
|
337
|
+
# index = 1
|
338
|
+
# result = @fmt.insert_return_and_indent(tokens, index, 1)
|
339
|
+
|
340
|
+
# assert_equals( msg + "", 1, result, msg)
|
341
|
+
|
342
|
+
########
|
343
|
+
msg = "指定した index に対して tokens[index] が存在しないので 0 を返すべき"
|
344
|
+
tokens = @parser.parse("foo bar")
|
345
|
+
|
346
|
+
index = 10
|
347
|
+
result = @fmt.insert_return_and_indent(tokens, index, 1)
|
348
|
+
|
349
|
+
assert_equals( msg + "", 0, result)
|
350
|
+
end ## insert_return_and_indent
|
351
|
+
|
352
|
+
|
353
|
+
def test_format
|
354
|
+
msg = "format - "
|
355
|
+
|
356
|
+
########
|
357
|
+
func_name = "TEST_FUNCTION"
|
358
|
+
@rule.function_names << func_name
|
359
|
+
|
360
|
+
assert_equals( msg + "function with parenthesis", (<<EOB
|
361
|
+
SELECT
|
362
|
+
<-indent-><-indent->#{func_name}( * )
|
363
|
+
EOB
|
364
|
+
).chop,
|
365
|
+
@fmt.format("select #{func_name}(*)")
|
366
|
+
)
|
367
|
+
|
368
|
+
@rule.function_names.delete func_name
|
369
|
+
|
370
|
+
########
|
371
|
+
assert_equals( msg + "Next line of single commnet", (<<EOB
|
372
|
+
SELECT
|
373
|
+
<-indent-><-indent->-- comment
|
374
|
+
<-indent-><-indent->name
|
375
|
+
EOB
|
376
|
+
).chop,
|
377
|
+
@fmt.format(<<EOB
|
378
|
+
select
|
379
|
+
-- comment
|
380
|
+
name
|
381
|
+
EOB
|
382
|
+
)
|
383
|
+
)
|
384
|
+
|
385
|
+
########
|
386
|
+
assert_equals( msg + "new line after single line comment",
|
387
|
+
(<<EOB
|
388
|
+
--a
|
389
|
+
b
|
390
|
+
EOB
|
391
|
+
).chop,
|
392
|
+
@fmt.format(<<EOB
|
393
|
+
--a
|
394
|
+
b
|
395
|
+
EOB
|
396
|
+
)
|
397
|
+
);
|
398
|
+
|
399
|
+
########
|
400
|
+
assert_equals( msg + "two line breaks after semicolon",
|
401
|
+
(<<EOB
|
402
|
+
a
|
403
|
+
;
|
404
|
+
|
405
|
+
b
|
406
|
+
EOB
|
407
|
+
).chop,
|
408
|
+
@fmt.format(<<EOB
|
409
|
+
a;b
|
410
|
+
EOB
|
411
|
+
)
|
412
|
+
);
|
413
|
+
|
414
|
+
########
|
415
|
+
assert_equals( msg + "two line breaks after semicolon",
|
416
|
+
(<<EOB
|
417
|
+
a
|
418
|
+
;
|
419
|
+
EOB
|
420
|
+
).chop,
|
421
|
+
@fmt.format("a;")
|
422
|
+
);
|
423
|
+
end
|
424
|
+
|
425
|
+
|
426
|
+
def test_split_by_semicolon
|
427
|
+
msg = "split_by_semicolon - "
|
428
|
+
|
429
|
+
########
|
430
|
+
tokens = @parser.parse("a;b")
|
431
|
+
|
432
|
+
assert_equals( msg + "first statement",
|
433
|
+
"a",
|
434
|
+
@fmt.split_by_semicolon(tokens)[0][0].string
|
435
|
+
)
|
436
|
+
assert_equals( msg + "second statement",
|
437
|
+
"b",
|
438
|
+
@fmt.split_by_semicolon(tokens)[1][0].string
|
439
|
+
)
|
440
|
+
|
441
|
+
########
|
442
|
+
tokens = @parser.parse(";")
|
443
|
+
statements = @fmt.split_by_semicolon(tokens)
|
444
|
+
assert_equals( msg,
|
445
|
+
[],
|
446
|
+
statements[0]
|
447
|
+
)
|
448
|
+
assert_equals( msg,
|
449
|
+
[],
|
450
|
+
statements[1]
|
451
|
+
)
|
452
|
+
|
453
|
+
########
|
454
|
+
tokens = @parser.parse("a;")
|
455
|
+
statements = @fmt.split_by_semicolon(tokens)
|
456
|
+
assert_equals( msg,
|
457
|
+
"<name>a</>",
|
458
|
+
Helper.format_tokens( statements[0] )
|
459
|
+
)
|
460
|
+
assert_equals( msg,
|
461
|
+
[],
|
462
|
+
statements[1]
|
463
|
+
)
|
464
|
+
|
465
|
+
########
|
466
|
+
tokens = @parser.parse(";a")
|
467
|
+
statements = @fmt.split_by_semicolon(tokens)
|
468
|
+
assert_equals( msg,
|
469
|
+
[],
|
470
|
+
statements[0]
|
471
|
+
)
|
472
|
+
assert_equals( msg,
|
473
|
+
"<name>a</>",
|
474
|
+
Helper.format_tokens( statements[1] )
|
475
|
+
)
|
476
|
+
|
477
|
+
########
|
478
|
+
tokens = @parser.parse("a;b")
|
479
|
+
statements = @fmt.split_by_semicolon(tokens)
|
480
|
+
assert_equals( msg,
|
481
|
+
"<name>a</>",
|
482
|
+
Helper.format_tokens( statements[0] )
|
483
|
+
)
|
484
|
+
assert_equals( msg,
|
485
|
+
"<name>b</>",
|
486
|
+
Helper.format_tokens( statements[1] )
|
487
|
+
)
|
488
|
+
end
|
489
|
+
end
|