bijou 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/ChangeLog.txt +4 -0
- data/LICENSE.txt +58 -0
- data/README.txt +48 -0
- data/Rakefile +105 -0
- data/doc/INSTALL.rdoc +260 -0
- data/doc/README.rdoc +314 -0
- data/doc/releases/bijou-0.1.0.rdoc +60 -0
- data/examples/birthday/birthday.rb +34 -0
- data/examples/holiday/holiday.rb +61 -0
- data/examples/holiday/letterhead.txt +4 -0
- data/examples/holiday/signature.txt +9 -0
- data/examples/phishing/letter.txt +29 -0
- data/examples/phishing/letterhead.txt +4 -0
- data/examples/phishing/phishing.rb +21 -0
- data/examples/phishing/signature.txt +9 -0
- data/examples/profile/profile.rb +46 -0
- data/lib/bijou.rb +15 -0
- data/lib/bijou/backend.rb +542 -0
- data/lib/bijou/cgi/adapter.rb +201 -0
- data/lib/bijou/cgi/handler.rb +5 -0
- data/lib/bijou/cgi/request.rb +37 -0
- data/lib/bijou/common.rb +12 -0
- data/lib/bijou/component.rb +108 -0
- data/lib/bijou/config.rb +60 -0
- data/lib/bijou/console/adapter.rb +167 -0
- data/lib/bijou/console/handler.rb +4 -0
- data/lib/bijou/console/request.rb +26 -0
- data/lib/bijou/context.rb +431 -0
- data/lib/bijou/diagnostics.rb +87 -0
- data/lib/bijou/errorformatter.rb +322 -0
- data/lib/bijou/exception.rb +39 -0
- data/lib/bijou/filters.rb +107 -0
- data/lib/bijou/httprequest.rb +108 -0
- data/lib/bijou/httpresponse.rb +268 -0
- data/lib/bijou/lexer.rb +513 -0
- data/lib/bijou/minicgi.rb +159 -0
- data/lib/bijou/parser.rb +1026 -0
- data/lib/bijou/processor.rb +404 -0
- data/lib/bijou/prstringio.rb +400 -0
- data/lib/bijou/webrick/adapter.rb +174 -0
- data/lib/bijou/webrick/handler.rb +32 -0
- data/lib/bijou/webrick/request.rb +45 -0
- data/script/cgi.rb +25 -0
- data/script/console.rb +7 -0
- data/script/server.rb +7 -0
- data/test/t1.cfg +5 -0
- data/test/tc_config.rb +26 -0
- data/test/tc_filter.rb +25 -0
- data/test/tc_lexer.rb +120 -0
- data/test/tc_response.rb +103 -0
- data/test/tc_ruby.rb +62 -0
- data/test/tc_stack.rb +50 -0
- metadata +121 -0
@@ -0,0 +1,159 @@
|
|
1
|
+
#
|
2
|
+
# Copyright (c) 2007-2008 Todd Lucas. All rights reserved.
|
3
|
+
#
|
4
|
+
# minicgi.rb - A lightweight version of Ruby's built-in CGI class
|
5
|
+
#
|
6
|
+
require 'cgi'
|
7
|
+
require 'bijou/common'
|
8
|
+
|
9
|
+
module Bijou
|
10
|
+
#
|
11
|
+
# MiniCGI provides some of the functionality of the Ruby CGI class in cgi.rb
|
12
|
+
# in a way that makes it accessible from the HttpRequest interface.
|
13
|
+
# The Ruby CGI class is multipurpose and provides featurs such as HTML
|
14
|
+
# rendering, which isn't useful within the Bijou framework (in part, because
|
15
|
+
# it would bypass the Bijou output rendering mechanism).
|
16
|
+
#
|
17
|
+
# One significant difference is that MiniCGI always preserves the query
|
18
|
+
# string values separately from any posted values. This is similar to the
|
19
|
+
# way that PHP and ASP work. Ruby CGI merges all paramters together,
|
20
|
+
# regardless of their origin. This feature is also useful, so MiniCGI
|
21
|
+
# provides a merged parameter set as well, which is also available via
|
22
|
+
# the HttpRequest interface.
|
23
|
+
#
|
24
|
+
class MiniCGI
|
25
|
+
#
|
26
|
+
# Set single to true to have request parameters with single values
|
27
|
+
# be converted from an array to a single value. Otherwise, all parameter
|
28
|
+
# values will be arrays.
|
29
|
+
#
|
30
|
+
def initialize(single = true)
|
31
|
+
@single = single
|
32
|
+
|
33
|
+
# Defines @params, @cookies, @multipart
|
34
|
+
extend ::CGI::QueryExtension
|
35
|
+
@multipart = false
|
36
|
+
|
37
|
+
# NOTE: We use alternate names for these variables (e.g., get instead
|
38
|
+
# of query_string) because CGI::QueryExtension takes the liberty of
|
39
|
+
# defining methods on this class for a large set of HTTP-oriented
|
40
|
+
# environment variables.
|
41
|
+
@get = nil
|
42
|
+
@post = nil
|
43
|
+
@server = nil
|
44
|
+
@method = nil # from REQUEST_METHOD
|
45
|
+
|
46
|
+
initialize_params()
|
47
|
+
initialize_server()
|
48
|
+
end
|
49
|
+
|
50
|
+
attr_reader :get, :post, :method, :server
|
51
|
+
|
52
|
+
def env_table
|
53
|
+
ENV
|
54
|
+
end
|
55
|
+
|
56
|
+
def stdinput
|
57
|
+
$stdin
|
58
|
+
end
|
59
|
+
|
60
|
+
def stdoutput
|
61
|
+
$DEFAULT_OUTPUT
|
62
|
+
end
|
63
|
+
|
64
|
+
#
|
65
|
+
# Initialize the data from the query.
|
66
|
+
#
|
67
|
+
# NOTE: This overridden version of the CGI equivalent reads data into
|
68
|
+
# separate query_string and form tables and, optionally, changes the
|
69
|
+
# storage format of parameter values.
|
70
|
+
#
|
71
|
+
# Handles multipart forms (in particular, forms that involve file uploads).
|
72
|
+
# Reads query parameters into the @params field, and cookies into @cookies.
|
73
|
+
#
|
74
|
+
def initialize_params()
|
75
|
+
@method = env_table['REQUEST_METHOD']
|
76
|
+
@multipart = false
|
77
|
+
|
78
|
+
qs_params = ''
|
79
|
+
form_params = ''
|
80
|
+
|
81
|
+
case @method
|
82
|
+
when "GET", "HEAD", "POST"
|
83
|
+
if defined?(MOD_RUBY)
|
84
|
+
qs_params = (Apache::request.args or '')
|
85
|
+
else
|
86
|
+
qs_params = (env_table['QUERY_STRING'] or '')
|
87
|
+
end
|
88
|
+
|
89
|
+
if @method == 'POST'
|
90
|
+
if %r|\Amultipart/form-data.*boundary=\"?([^\";,]+)\"?|n.match(env_table['CONTENT_TYPE'])
|
91
|
+
boundary = $1.dup
|
92
|
+
@multipart = true
|
93
|
+
form_params = read_multipart(boundary, Integer(env_table['CONTENT_LENGTH']))
|
94
|
+
else
|
95
|
+
stdinput.binmode if defined? stdinput.binmode
|
96
|
+
form_params = (stdinput.read(Integer(env_table['CONTENT_LENGTH'])) or '')
|
97
|
+
end
|
98
|
+
end
|
99
|
+
else
|
100
|
+
qs_params = read_from_cmdline
|
101
|
+
end
|
102
|
+
|
103
|
+
query_string = ::CGI::parse(qs_params) || {}
|
104
|
+
form = ::CGI::parse(form_params) || {}
|
105
|
+
cookies = ::CGI::Cookie::parse((env_table['HTTP_COOKIE'] or
|
106
|
+
env_table['COOKIE']))
|
107
|
+
|
108
|
+
# If requested, convert all single-element arrays to their values.
|
109
|
+
if @single
|
110
|
+
@get = Bijou::MiniCGI.singularize(query_string)
|
111
|
+
@post = Bijou::MiniCGI.singularize(form)
|
112
|
+
@cookies = Bijou::MiniCGI.singularize(cookies)
|
113
|
+
else
|
114
|
+
@get = query_string
|
115
|
+
@post = form
|
116
|
+
@cookies = cookies
|
117
|
+
end
|
118
|
+
|
119
|
+
@params = @post.clone
|
120
|
+
@params.merge!(@get)
|
121
|
+
@params.merge!(@cookies)
|
122
|
+
end
|
123
|
+
|
124
|
+
def initialize_server
|
125
|
+
@server = {}
|
126
|
+
|
127
|
+
%w[ AUTH_TYPE CONTENT_TYPE GATEWAY_INTERFACE PATH_INFO
|
128
|
+
PATH_TRANSLATED QUERY_STRING REMOTE_ADDR REMOTE_HOST
|
129
|
+
REMOTE_IDENT REMOTE_USER REQUEST_METHOD SCRIPT_NAME
|
130
|
+
SERVER_NAME SERVER_PROTOCOL SERVER_SOFTWARE
|
131
|
+
|
132
|
+
REQUEST_URI
|
133
|
+
|
134
|
+
HTTP_ACCEPT HTTP_ACCEPT_CHARSET HTTP_ACCEPT_ENCODING
|
135
|
+
HTTP_ACCEPT_LANGUAGE HTTP_CACHE_CONTROL HTTP_FROM HTTP_HOST
|
136
|
+
HTTP_NEGOTIATE HTTP_PRAGMA HTTP_REFERER HTTP_USER_AGENT
|
137
|
+
].each { |env|
|
138
|
+
@server[env] = env_table[env]
|
139
|
+
}
|
140
|
+
end
|
141
|
+
|
142
|
+
# Converts a hash of arrays to a hash of arrays and values whereby
|
143
|
+
# any single item arrays in the original hash become values in the
|
144
|
+
# result hash.
|
145
|
+
def self.singularize(hash)
|
146
|
+
result = {}
|
147
|
+
|
148
|
+
# Extract any single-item values from their arrays.
|
149
|
+
hash.each {|k,v|
|
150
|
+
if v.length == 1
|
151
|
+
result[k] = v[0]
|
152
|
+
else
|
153
|
+
result[k] = v
|
154
|
+
end
|
155
|
+
}
|
156
|
+
return result
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
data/lib/bijou/parser.rb
ADDED
@@ -0,0 +1,1026 @@
|
|
1
|
+
#
|
2
|
+
# Copyright (c) 2007-2008 Todd Lucas. All rights reserved.
|
3
|
+
#
|
4
|
+
# parser.rb - The Bijou component parser
|
5
|
+
#
|
6
|
+
require 'bijou/common'
|
7
|
+
require 'bijou/lexer'
|
8
|
+
require 'bijou/backend'
|
9
|
+
|
10
|
+
#--
|
11
|
+
#
|
12
|
+
# Use stringio, if available. Otherwise, use the alternative.
|
13
|
+
#
|
14
|
+
#++
|
15
|
+
begin
|
16
|
+
require 'stringio'
|
17
|
+
rescue LoadError
|
18
|
+
require 'bijou/prstringio'
|
19
|
+
|
20
|
+
class StringIO < PureRubyStringIO
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
#--
|
25
|
+
# TODO:
|
26
|
+
# Handle nested tags <% <% %> %> with an error
|
27
|
+
# Disallow tags in defs (init/fini)
|
28
|
+
#++
|
29
|
+
|
30
|
+
#
|
31
|
+
# The Bijou parser is a bi-modal top-down LL(1) parser. It is
|
32
|
+
# bi-modal in that it uses two lexers. One for markup and the
|
33
|
+
# other for Bijou tags.
|
34
|
+
#
|
35
|
+
class Bijou::Parser
|
36
|
+
include Bijou::Parse
|
37
|
+
|
38
|
+
# The filename is written to line marker comments in the output.
|
39
|
+
attr_reader :filename
|
40
|
+
|
41
|
+
attr_reader :diagnostics
|
42
|
+
|
43
|
+
def initialize
|
44
|
+
@debug = false
|
45
|
+
# REVIEW: Make this an option
|
46
|
+
@strip = true
|
47
|
+
@strippingTag = false
|
48
|
+
|
49
|
+
# @debug = true
|
50
|
+
@diagnostics = nil
|
51
|
+
@input = nil
|
52
|
+
@backend = nil
|
53
|
+
detach_file
|
54
|
+
end
|
55
|
+
|
56
|
+
def detach_file
|
57
|
+
@filename = ''
|
58
|
+
|
59
|
+
# We keep the diagnostics and the backend
|
60
|
+
# @diagnostics
|
61
|
+
# @backend
|
62
|
+
|
63
|
+
@input = nil
|
64
|
+
|
65
|
+
@topLexer = nil
|
66
|
+
@tagLexer = nil
|
67
|
+
|
68
|
+
@lexer = nil
|
69
|
+
@parsingArgs = false
|
70
|
+
@namedSection = nil
|
71
|
+
end
|
72
|
+
|
73
|
+
def attach_file(file, filename)
|
74
|
+
@filename = filename
|
75
|
+
|
76
|
+
@diagnostics = Diagnostics.new
|
77
|
+
|
78
|
+
trace = false
|
79
|
+
use_markers = true # The Perl-oriented '#line' marker, for stack traces.
|
80
|
+
|
81
|
+
@input = LexerInput.new(file, @diagnostics)
|
82
|
+
@backend = Backend.new(@diagnostics, trace, use_markers)
|
83
|
+
|
84
|
+
@topLexer = TextLexer.new(@input)
|
85
|
+
@tagLexer = TagLexer.new(@input)
|
86
|
+
|
87
|
+
@lexer = @topLexer
|
88
|
+
@parsingArgs = false
|
89
|
+
@namedSection = nil
|
90
|
+
end
|
91
|
+
|
92
|
+
def diagnostic(m, l, c)
|
93
|
+
if !l; l = @lexer.line; end
|
94
|
+
if !c; c = @lexer.column; end
|
95
|
+
m.at(l, c)
|
96
|
+
m
|
97
|
+
end
|
98
|
+
|
99
|
+
def message(s, line=nil, column=nil)
|
100
|
+
m = Message.new
|
101
|
+
m << s
|
102
|
+
@input.diagnostics.add_message(diagnostic(m, line, column))
|
103
|
+
end
|
104
|
+
|
105
|
+
def warning(s, line=nil, column=nil)
|
106
|
+
m = Warning.new
|
107
|
+
m << s
|
108
|
+
@input.diagnostics.add_warning(diagnostic(m, line, column))
|
109
|
+
end
|
110
|
+
|
111
|
+
def error(s, line=nil, column=nil)
|
112
|
+
m = Error.new
|
113
|
+
m << s
|
114
|
+
@input.diagnostics.add_error(diagnostic(m, line, column))
|
115
|
+
end
|
116
|
+
|
117
|
+
def debug_(str)
|
118
|
+
print str if @debug
|
119
|
+
end
|
120
|
+
|
121
|
+
def debug(str)
|
122
|
+
puts str if @debug
|
123
|
+
end
|
124
|
+
|
125
|
+
def assert(c)
|
126
|
+
if (!c)
|
127
|
+
error("assert failed on line " + @lexer.line.to_s)
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
def parseTagName
|
132
|
+
debug_("parseTagName: ")
|
133
|
+
|
134
|
+
# If the first character isn't a word character, return nil
|
135
|
+
tag = ''
|
136
|
+
while tok = @lexer.next_token
|
137
|
+
if tok != Token::Char || /\w/ !~ @lexer.text
|
138
|
+
@lexer.pop_token
|
139
|
+
debug(tag)
|
140
|
+
return tag
|
141
|
+
end
|
142
|
+
tag << @lexer.text
|
143
|
+
end
|
144
|
+
debug(tag + "<eof>")
|
145
|
+
end
|
146
|
+
|
147
|
+
def eatWhitespace()
|
148
|
+
while tok = @lexer.next_token
|
149
|
+
if tok != Token::Char || @lexer.text !~ /\s/
|
150
|
+
@lexer.pop_token
|
151
|
+
return true
|
152
|
+
end
|
153
|
+
end
|
154
|
+
return nil
|
155
|
+
end
|
156
|
+
|
157
|
+
def eatLineWhitespace()
|
158
|
+
while tok = @lexer.next_token
|
159
|
+
if tok != Token::Char || @lexer.text !~ /\s/ || @lexer.text == '\n'
|
160
|
+
@lexer.pop_token
|
161
|
+
return true
|
162
|
+
end
|
163
|
+
end
|
164
|
+
return nil
|
165
|
+
end
|
166
|
+
|
167
|
+
def eatArgumentWhitespace()
|
168
|
+
lineComment = false
|
169
|
+
|
170
|
+
while tok = @lexer.next_token
|
171
|
+
|
172
|
+
if lineComment
|
173
|
+
if tok == Token::Char && @lexer.text == "\n"
|
174
|
+
# warning("eat arg crlf")
|
175
|
+
lineComment = false
|
176
|
+
next
|
177
|
+
else
|
178
|
+
# Eat comments
|
179
|
+
next
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
if tok == Token::Char
|
184
|
+
str = @lexer.text
|
185
|
+
|
186
|
+
if str == '#'
|
187
|
+
# warning("eat arg comment")
|
188
|
+
lineComment = true
|
189
|
+
elsif str == "\n"
|
190
|
+
# warning("eat arg crlf")
|
191
|
+
# lineComment = false
|
192
|
+
elsif @lexer.text =~ /\s/
|
193
|
+
# Eat whitespace
|
194
|
+
else # non-whitespace
|
195
|
+
# This should be an identifier
|
196
|
+
# warning("eat arg ws 1 #{@lexer.text}")
|
197
|
+
@lexer.pop_token
|
198
|
+
return true
|
199
|
+
end
|
200
|
+
else
|
201
|
+
# warning("eat arg ws 3 #{@lexer.text} #{tok}")
|
202
|
+
@lexer.pop_token
|
203
|
+
return true
|
204
|
+
end
|
205
|
+
end
|
206
|
+
|
207
|
+
# EOF
|
208
|
+
return false
|
209
|
+
end
|
210
|
+
|
211
|
+
def parseCharSequence(re)
|
212
|
+
debug_("parseCharSequence: ")
|
213
|
+
name = ''
|
214
|
+
|
215
|
+
while tok = @lexer.next_token
|
216
|
+
text = @lexer.text
|
217
|
+
if tok == Token::Char && text =~ re
|
218
|
+
name << text
|
219
|
+
else
|
220
|
+
@lexer.pop_token
|
221
|
+
debug(name)
|
222
|
+
return name
|
223
|
+
end
|
224
|
+
end
|
225
|
+
|
226
|
+
debug(name + "<eof>")
|
227
|
+
return name
|
228
|
+
end
|
229
|
+
|
230
|
+
def parseIdentifier
|
231
|
+
parseCharSequence(/\w/)
|
232
|
+
end
|
233
|
+
|
234
|
+
def parseFilename
|
235
|
+
parseCharSequence(/[\\\/\w\.]/)
|
236
|
+
end
|
237
|
+
|
238
|
+
def expectChar(ch, msg='')
|
239
|
+
if tok = @lexer.next_token
|
240
|
+
if tok == Token::Char
|
241
|
+
if @lexer.text == '='
|
242
|
+
return true
|
243
|
+
end
|
244
|
+
end
|
245
|
+
error("expected #{ch}#{msg}")
|
246
|
+
else
|
247
|
+
error("expected #{ch}#{msg} at end of file")
|
248
|
+
end
|
249
|
+
false
|
250
|
+
end
|
251
|
+
|
252
|
+
def findTagClose(openToken, closeToken)
|
253
|
+
while tok = @lexer.next_token
|
254
|
+
if tok == Token::TagClose
|
255
|
+
tagClose = @lexer.text
|
256
|
+
if tagClose != closeToken
|
257
|
+
error("expected '#{closeToken}' to close '#{openToken}' tag")
|
258
|
+
end
|
259
|
+
return
|
260
|
+
end
|
261
|
+
end
|
262
|
+
error("expected close tag for '#{openToken}' at end of file")
|
263
|
+
end
|
264
|
+
|
265
|
+
# Returns a two item array as a result, the string following the pipe
|
266
|
+
# operator '|' and a hash containing filters, if found.
|
267
|
+
def parseOutputFlags
|
268
|
+
result = ''
|
269
|
+
filters = []
|
270
|
+
while tok = @lexer.next_token
|
271
|
+
if tok == Token::Char
|
272
|
+
if @lexer.text =~ /[a-z]/
|
273
|
+
filters.push @lexer.text
|
274
|
+
elsif @lexer.text !~ /\s/
|
275
|
+
@lexer.pop_token
|
276
|
+
return [result, []]
|
277
|
+
end
|
278
|
+
result << @lexer.text
|
279
|
+
elsif tok == Token::TagClose
|
280
|
+
@lexer.pop_token
|
281
|
+
return [result, filters]
|
282
|
+
else
|
283
|
+
@lexer.pop_token
|
284
|
+
return [result, []]
|
285
|
+
end
|
286
|
+
end
|
287
|
+
error("expected '%>' to close output tag '<%=' at end of file")
|
288
|
+
return [result, []]
|
289
|
+
end
|
290
|
+
|
291
|
+
# Parses named tags of the form <%name> and </%name>. Due to the ambiguity
|
292
|
+
# that the start token causes between inline tags and named tags, this
|
293
|
+
# routine performs a lookahead and dispatches to the correct parsing routine.
|
294
|
+
def parseNamedTag(tagStart)
|
295
|
+
debug("parseNamedTag #{tagStart}")
|
296
|
+
nameIdentifier = nil
|
297
|
+
|
298
|
+
if tagStart == '<%'
|
299
|
+
tagName = parseTagName
|
300
|
+
case tagName
|
301
|
+
when 'init'
|
302
|
+
@namedSection = tagName
|
303
|
+
|
304
|
+
when 'fini'
|
305
|
+
@namedSection = tagName
|
306
|
+
|
307
|
+
when 'method'
|
308
|
+
@namedSection = tagName
|
309
|
+
|
310
|
+
when 'args'
|
311
|
+
if @parsingArgs
|
312
|
+
error("args may not be nested within other args sections")
|
313
|
+
end
|
314
|
+
@parsingArgs = true
|
315
|
+
else
|
316
|
+
@lexer.start(tagStart, TagType::Inline)
|
317
|
+
parseInlineTag(tagStart)
|
318
|
+
return
|
319
|
+
end
|
320
|
+
|
321
|
+
@lexer.start(tagStart, TagType::Named)
|
322
|
+
|
323
|
+
start_line = @lexer.line
|
324
|
+
start_column = @lexer.column
|
325
|
+
|
326
|
+
if tagName == 'method'
|
327
|
+
# <%method name...
|
328
|
+
# ^
|
329
|
+
debug_("ws(")
|
330
|
+
eatWhitespace
|
331
|
+
debug(")")
|
332
|
+
|
333
|
+
nameIdentifier = parseIdentifier
|
334
|
+
if nameIdentifier.empty?
|
335
|
+
error("expected method name")
|
336
|
+
end
|
337
|
+
end
|
338
|
+
else
|
339
|
+
assert(tagStart == '</%')
|
340
|
+
@lexer.start(tagStart, TagType::Named)
|
341
|
+
|
342
|
+
start_line = @lexer.line
|
343
|
+
start_column = @lexer.column
|
344
|
+
|
345
|
+
# Whitespace following this tag may be stripped
|
346
|
+
@strippingTag = true
|
347
|
+
|
348
|
+
tagName = parseTagName
|
349
|
+
if tagName
|
350
|
+
case tagName
|
351
|
+
when 'init'
|
352
|
+
when 'fini'
|
353
|
+
when 'args'
|
354
|
+
@parsingArgs = false
|
355
|
+
when 'method'
|
356
|
+
else
|
357
|
+
error("section close type '</%#{tagName}' is unrecognized")
|
358
|
+
end
|
359
|
+
|
360
|
+
@namedSection = nil
|
361
|
+
else
|
362
|
+
error("expected name after '</%'")
|
363
|
+
end
|
364
|
+
end
|
365
|
+
|
366
|
+
# <%name ...
|
367
|
+
# ^
|
368
|
+
debug_("eatws(")
|
369
|
+
eatWhitespace
|
370
|
+
debug(")")
|
371
|
+
|
372
|
+
#
|
373
|
+
# NOTE: The token may be a Char, even though it's for a tag close.
|
374
|
+
#
|
375
|
+
# This is a special case for when a tag name goes against the close
|
376
|
+
# tag token (e.g., <%init>). Because of the ambiguity caused by the two
|
377
|
+
# different tag types opening with the same token, we must lookahead while
|
378
|
+
# we parse the identifier. Since we haven't switched into TagType::Named
|
379
|
+
# yet, '>' is regarded as a Char instead of a TagClose.
|
380
|
+
#
|
381
|
+
|
382
|
+
tok = @lexer.next_token
|
383
|
+
tagClose = @lexer.text
|
384
|
+
|
385
|
+
if tok == Token::TagClose || tok == Token::Char
|
386
|
+
if tagClose == '>'
|
387
|
+
# Send to the backend
|
388
|
+
if tagStart == '<%'
|
389
|
+
@backend.named_start_tag(tagName, nameIdentifier,
|
390
|
+
start_line, start_column)
|
391
|
+
else
|
392
|
+
assert(tagStart == '</%')
|
393
|
+
@backend.named_end_tag(tagName)
|
394
|
+
end
|
395
|
+
else
|
396
|
+
error("expected '>' to close #{tagName} tag")
|
397
|
+
end
|
398
|
+
else
|
399
|
+
error("unexpected token '#{@lexer.text}' in " +
|
400
|
+
"#{tagName} tag", start_line, start_column)
|
401
|
+
end
|
402
|
+
end
|
403
|
+
|
404
|
+
# Parses tags of the form <% ... %>, which may contain any valid Ruby
|
405
|
+
# statements. The contents of the inline tag are evaluated at runtime
|
406
|
+
# during the render phase. The puts and print methods may be used to
|
407
|
+
# render to the output stream.
|
408
|
+
def parseInlineTag(tagStart)
|
409
|
+
@lexer.start(tagStart, TagType::Inline)
|
410
|
+
|
411
|
+
debug("parseInlineTag #{tagStart}")
|
412
|
+
|
413
|
+
# BUGBUG: Turn off quote handling in comments.
|
414
|
+
# BUGBUG: We may want to turn of string parsing here (at the cost of not
|
415
|
+
# handling quoted close tokens).
|
416
|
+
start_line = @lexer.line
|
417
|
+
start_column = @lexer.column
|
418
|
+
|
419
|
+
result = ''
|
420
|
+
|
421
|
+
while tok = @lexer.next_token
|
422
|
+
if tok == Token::Char
|
423
|
+
result << @lexer.text
|
424
|
+
elsif tok == Token::String
|
425
|
+
result << @lexer.text
|
426
|
+
elsif tok == Token::TagClose
|
427
|
+
tagClose = @lexer.text
|
428
|
+
if tagClose != '%>'
|
429
|
+
error("expected '%>' to close inline '<%' tag at (#{start_line}, #{start_column})")
|
430
|
+
end
|
431
|
+
|
432
|
+
debug("#{result}%>")
|
433
|
+
|
434
|
+
# Send to the backend
|
435
|
+
@backend.inline_tag(result, @filename, start_line)
|
436
|
+
return
|
437
|
+
else
|
438
|
+
error("unexpected tag token '#{@lexer.text}'", start_line, start_column)
|
439
|
+
end
|
440
|
+
end
|
441
|
+
|
442
|
+
if @lexer.line != start_line
|
443
|
+
error("unclosed inline tag (was this supposed to be a named tag?)",
|
444
|
+
start_line, start_column)
|
445
|
+
else
|
446
|
+
error("unclosed inline tag (was this supposed to be a named tag?)")
|
447
|
+
end
|
448
|
+
end
|
449
|
+
|
450
|
+
# Parses tags of the form <%= ... %> where the contents may be any valid
|
451
|
+
# Ruby expression. This expression is evaluated at runtime in a string
|
452
|
+
# context and the results are rendered to the output stream.
|
453
|
+
def parseOutputTag(tagStart)
|
454
|
+
@lexer.start(tagStart, TagType::Output)
|
455
|
+
|
456
|
+
debug("parseOutputTag #{tagStart}")
|
457
|
+
|
458
|
+
start_line = @lexer.line
|
459
|
+
start_column = @lexer.column
|
460
|
+
|
461
|
+
result = ''
|
462
|
+
filters = nil
|
463
|
+
|
464
|
+
while tok = @lexer.next_token
|
465
|
+
if tok == Token::Char
|
466
|
+
if @lexer.text == '|'
|
467
|
+
# Returns an array containing [string, array]
|
468
|
+
flags = parseOutputFlags
|
469
|
+
if flags[1].length > 0
|
470
|
+
# Flags were found.
|
471
|
+
filters = flags[1]
|
472
|
+
else
|
473
|
+
# False alarm
|
474
|
+
result << '|' + flags[0]
|
475
|
+
end
|
476
|
+
else
|
477
|
+
result << @lexer.text
|
478
|
+
end
|
479
|
+
elsif tok == Token::String
|
480
|
+
result << @lexer.text
|
481
|
+
elsif tok == Token::TagClose
|
482
|
+
tagClose = @lexer.text
|
483
|
+
if tagClose != '%>'
|
484
|
+
error("expected '%>' to close output '<%=' tag")
|
485
|
+
end
|
486
|
+
|
487
|
+
# Send to the backend
|
488
|
+
@backend.output_tag(result, filters, @filename, start_line)
|
489
|
+
return
|
490
|
+
else
|
491
|
+
error("invalid output tag syntax", start_line, start_column)
|
492
|
+
error("unexpected token '#{@lexer.text}'")
|
493
|
+
end
|
494
|
+
end
|
495
|
+
|
496
|
+
error("unclosed output tag")
|
497
|
+
end
|
498
|
+
|
499
|
+
# Parse until ',' or '&>' is found.
|
500
|
+
def parseCallArgValue
|
501
|
+
result = ''
|
502
|
+
|
503
|
+
start_line = @lexer.line
|
504
|
+
start_column = @lexer.column
|
505
|
+
|
506
|
+
while tok = @lexer.next_token
|
507
|
+
if tok == Token::Char
|
508
|
+
if @lexer.text == ','
|
509
|
+
@lexer.pop_token
|
510
|
+
return result.strip
|
511
|
+
end
|
512
|
+
result << @lexer.text
|
513
|
+
elsif tok == Token::String
|
514
|
+
result << @lexer.text
|
515
|
+
elsif tok == Token::Operator
|
516
|
+
# We have to include this in case it is within another hash.
|
517
|
+
result << @lexer.text
|
518
|
+
elsif tok == Token::TagClose
|
519
|
+
tagClose = @lexer.text
|
520
|
+
if tagClose != '&>'
|
521
|
+
error("expected '&>' to close call tag at (#{start_line}, #{start_column})")
|
522
|
+
end
|
523
|
+
|
524
|
+
@lexer.pop_token
|
525
|
+
return result.strip
|
526
|
+
else
|
527
|
+
error("unexpected token '#{@lexer.text}'", start_line, start_column)
|
528
|
+
return nil
|
529
|
+
end
|
530
|
+
end
|
531
|
+
|
532
|
+
error("unclosed call tag after assignment '=>'")
|
533
|
+
return nil
|
534
|
+
end
|
535
|
+
|
536
|
+
def parseCallArgList
|
537
|
+
debug("parseCallArgList")
|
538
|
+
|
539
|
+
start_line = @lexer.line
|
540
|
+
start_column = @lexer.column
|
541
|
+
|
542
|
+
arg_list = []
|
543
|
+
arg_name = nil
|
544
|
+
arg_value = nil
|
545
|
+
|
546
|
+
loop do
|
547
|
+
eatWhitespace
|
548
|
+
arg_name = parseFilename
|
549
|
+
if !arg_name || arg_name.empty?
|
550
|
+
error("Argument name expected")
|
551
|
+
return nil
|
552
|
+
end
|
553
|
+
|
554
|
+
eatWhitespace
|
555
|
+
if tok = @lexer.next_token
|
556
|
+
if tok == Token::Operator && @lexer.text == '=>'
|
557
|
+
arg_value = parseCallArgValue
|
558
|
+
debug "arg_value #{arg_value}"
|
559
|
+
if arg_value && !arg_value.empty?
|
560
|
+
# if arg_list.has_key?(arg_name)
|
561
|
+
# warning("argument '#{arg_name}' duplicated")
|
562
|
+
# end
|
563
|
+
|
564
|
+
arg_list.push [arg_name, arg_value]
|
565
|
+
|
566
|
+
arg_name = nil
|
567
|
+
arg_value = nil
|
568
|
+
else
|
569
|
+
error("empty argument value after assignment operator '=>'")
|
570
|
+
return nil
|
571
|
+
end
|
572
|
+
# look for , or %>
|
573
|
+
else
|
574
|
+
error("expected assignment operator '=>' after argument identifier #{@lexer.text}")
|
575
|
+
@lexer.pop_token
|
576
|
+
return nil
|
577
|
+
end
|
578
|
+
else
|
579
|
+
error("unclosed call tag")
|
580
|
+
return nil
|
581
|
+
end
|
582
|
+
|
583
|
+
eatWhitespace
|
584
|
+
if tok = @lexer.next_token
|
585
|
+
if tok == Token::TagClose
|
586
|
+
tagClose = @lexer.text
|
587
|
+
if tagClose == '&>'
|
588
|
+
# End of successful parse
|
589
|
+
@lexer.pop_token
|
590
|
+
return arg_list
|
591
|
+
else
|
592
|
+
error("expected '&>' to close call tag at (#{start_line}, #{start_column})")
|
593
|
+
@lexer.pop_token
|
594
|
+
return nil
|
595
|
+
end
|
596
|
+
elsif tok != Token::Char || @lexer.text != ','
|
597
|
+
error("unexpected token '#{@lexer.text}' after argument value", start_line, start_column)
|
598
|
+
@lexer.pop_token
|
599
|
+
# End of argument list
|
600
|
+
return nil
|
601
|
+
end
|
602
|
+
end
|
603
|
+
|
604
|
+
end
|
605
|
+
end
|
606
|
+
|
607
|
+
# Parses a tag of the form <& name { arg-list } &> where arg-list is
|
608
|
+
# an optional Ruby hash.
|
609
|
+
def parseCallTag(tagStart)
|
610
|
+
@lexer.start(tagStart, TagType::Call)
|
611
|
+
|
612
|
+
debug("parseCallTag #{tagStart}")
|
613
|
+
|
614
|
+
start_line = @lexer.line
|
615
|
+
start_column = @lexer.column
|
616
|
+
|
617
|
+
# Is this an indirect call?
|
618
|
+
indirect = false
|
619
|
+
|
620
|
+
if tok = @lexer.next_token
|
621
|
+
if tok == Token::Char && @lexer.text == '='
|
622
|
+
indirect = true
|
623
|
+
else
|
624
|
+
@lexer.pop_token
|
625
|
+
end
|
626
|
+
end
|
627
|
+
|
628
|
+
# REVIEW: Should we allow indirect calls with @member syntax?
|
629
|
+
eatWhitespace
|
630
|
+
callIdentifier = parseFilename
|
631
|
+
if !callIdentifier || callIdentifier.empty?
|
632
|
+
error("Identifier expected in call tag")
|
633
|
+
end
|
634
|
+
|
635
|
+
arg_list = nil
|
636
|
+
|
637
|
+
eatWhitespace
|
638
|
+
if tok = @lexer.next_token
|
639
|
+
if tok == Token::Char
|
640
|
+
if @lexer.text == ','
|
641
|
+
@lexer.tokenize_arguments = true
|
642
|
+
arg_list = parseCallArgList
|
643
|
+
@lexer.tokenize_arguments = false
|
644
|
+
|
645
|
+
findTagClose('<&', '&>')
|
646
|
+
|
647
|
+
if arg_list
|
648
|
+
# Send to the backend
|
649
|
+
@backend.call_tag(callIdentifier, arg_list, indirect,
|
650
|
+
@filename, start_line)
|
651
|
+
end
|
652
|
+
|
653
|
+
return
|
654
|
+
else
|
655
|
+
error("unexpected token '#{@lexer.text}' after call identifier", start_line, start_column)
|
656
|
+
|
657
|
+
findTagClose('<&', '&>')
|
658
|
+
return
|
659
|
+
end
|
660
|
+
else
|
661
|
+
tagClose = @lexer.text
|
662
|
+
if tagClose != '&>'
|
663
|
+
error("expected '&>' to close call tag at (#{start_line}, #{start_column})")
|
664
|
+
end
|
665
|
+
|
666
|
+
result = {}
|
667
|
+
|
668
|
+
# Send to the backend
|
669
|
+
@backend.call_tag(callIdentifier, result, indirect,
|
670
|
+
@filename, start_line)
|
671
|
+
return
|
672
|
+
end
|
673
|
+
end
|
674
|
+
|
675
|
+
error("unclosed call tag")
|
676
|
+
return
|
677
|
+
|
678
|
+
|
679
|
+
while tok = @lexer.next_token
|
680
|
+
if tok == Token::Char
|
681
|
+
result << @lexer.text
|
682
|
+
elsif tok == Token::String
|
683
|
+
result << @lexer.text
|
684
|
+
elsif tok == Token::TagClose
|
685
|
+
tagClose = @lexer.text
|
686
|
+
# BUGBUG <& X %> doesn't fire.
|
687
|
+
if tagClose != '&>'
|
688
|
+
error("expected '&>' to close call tag at (#{start_line}, #{start_column})")
|
689
|
+
end
|
690
|
+
|
691
|
+
if result.strip.empty?
|
692
|
+
result = '{}'
|
693
|
+
end
|
694
|
+
|
695
|
+
# Send to the backend
|
696
|
+
@backend.call_tag(callIdentifier, result, indirect,
|
697
|
+
@filename, start_line)
|
698
|
+
return
|
699
|
+
else
|
700
|
+
error("unexpected tag token '#{@lexer.text}'", start_line, start_column)
|
701
|
+
end
|
702
|
+
end
|
703
|
+
|
704
|
+
error("unclosed call tag")
|
705
|
+
end
|
706
|
+
|
707
|
+
# Parses tags of the form <%! name1="value1" name2 = "value2" ... %>, which
|
708
|
+
# control compile time and runtime behavior. These tags are usually placed
|
709
|
+
# near the top of the file.
|
710
|
+
def parseDirectiveTag(tagStart)
|
711
|
+
@lexer.start(tagStart, TagType::Directive)
|
712
|
+
|
713
|
+
start_line = @lexer.line
|
714
|
+
start_column = @lexer.column
|
715
|
+
|
716
|
+
here = 'in <%! directive'
|
717
|
+
|
718
|
+
errors = 0
|
719
|
+
directives = {}
|
720
|
+
|
721
|
+
while true
|
722
|
+
eatWhitespace
|
723
|
+
|
724
|
+
if tok = @lexer.next_token
|
725
|
+
if tok == Token::TagClose
|
726
|
+
if @lexer.text == '%>'
|
727
|
+
if directives.length > 0
|
728
|
+
# Send to the backend
|
729
|
+
@backend.directive_tag(directives, start_line, start_column)
|
730
|
+
|
731
|
+
# Whitespace following this tag may be stripped
|
732
|
+
@strippingTag = true
|
733
|
+
end
|
734
|
+
else
|
735
|
+
error("expected '%>' to close directive '<%!' tag")
|
736
|
+
end
|
737
|
+
return # Main loop exit
|
738
|
+
else
|
739
|
+
@lexer.pop_token
|
740
|
+
end
|
741
|
+
end
|
742
|
+
|
743
|
+
identifierName = parseIdentifier
|
744
|
+
if identifierName.empty?
|
745
|
+
error("expected identifier in directive")
|
746
|
+
end
|
747
|
+
|
748
|
+
eatWhitespace
|
749
|
+
|
750
|
+
if !expectChar('=', " after literal #{here}")
|
751
|
+
findTagClose('<%!', '%>')
|
752
|
+
return
|
753
|
+
end
|
754
|
+
|
755
|
+
eatWhitespace
|
756
|
+
|
757
|
+
if tok = @lexer.next_token
|
758
|
+
if tok == Token::String
|
759
|
+
directives[identifierName] = @lexer.text
|
760
|
+
else
|
761
|
+
error("unexpected token #{@lexer.text} after assignment #{here}")
|
762
|
+
findTagClose('<%!', '%>')
|
763
|
+
return
|
764
|
+
end
|
765
|
+
end
|
766
|
+
end
|
767
|
+
|
768
|
+
end
|
769
|
+
|
770
|
+
def parseArgsSection
|
771
|
+
# @lexer.start(tagStart, TagType::Call)
|
772
|
+
|
773
|
+
debug("parseArgsSection")
|
774
|
+
|
775
|
+
start_line = @lexer.line
|
776
|
+
start_column = @lexer.column
|
777
|
+
|
778
|
+
loop do
|
779
|
+
lineComment = false
|
780
|
+
|
781
|
+
# warning("before whitespace")
|
782
|
+
eatArgumentWhitespace
|
783
|
+
# warning("after whitespace")
|
784
|
+
|
785
|
+
tok = @lexer.next_token
|
786
|
+
if tok == Token::TagOpen
|
787
|
+
# Normal argument end case.
|
788
|
+
@lexer.pop_token
|
789
|
+
return
|
790
|
+
else
|
791
|
+
@lexer.pop_token
|
792
|
+
end
|
793
|
+
|
794
|
+
# warning('after whitespace')
|
795
|
+
argName = parseIdentifier
|
796
|
+
if argName.empty?
|
797
|
+
error('expected argument identifier')
|
798
|
+
end
|
799
|
+
|
800
|
+
# warning("after identifier '#{argName}'")
|
801
|
+
# Eat whitespace only up until a linefeed.
|
802
|
+
eatLineWhitespace
|
803
|
+
|
804
|
+
# NOTE: We require that any value specified be a single-line expression.
|
805
|
+
# Although the backend may be able to render a multi-line assignment,
|
806
|
+
# it is not possible to distinguish between a lone identifier and an
|
807
|
+
# expression continuation. (In addition, our lexer does not tokenize
|
808
|
+
# identifiers, [the parser does], which would preclude a single-token
|
809
|
+
# lookahead. This would only be useful if the assignment operator were
|
810
|
+
# required.)
|
811
|
+
|
812
|
+
tok = @lexer.next_token
|
813
|
+
if tok == Token::TagOpen
|
814
|
+
# warning('args end 2')
|
815
|
+
# The last argument has no value.
|
816
|
+
@backend.add_argument(argName, nil, @filename,
|
817
|
+
@lexer.line, @lexer.column)
|
818
|
+
@lexer.pop_token
|
819
|
+
return
|
820
|
+
elsif tok == Token::Operator && @lexer.text == '=>'
|
821
|
+
# warning('=>')
|
822
|
+
|
823
|
+
argValue = ''
|
824
|
+
|
825
|
+
while tok = @lexer.next_token
|
826
|
+
if tok == Token::Char
|
827
|
+
str = @lexer.text
|
828
|
+
if str == '#' || str == "\n"
|
829
|
+
@lexer.pop_token
|
830
|
+
|
831
|
+
argValue = argValue.strip
|
832
|
+
if argValue.empty?
|
833
|
+
error('An argument assignment has no value')
|
834
|
+
else
|
835
|
+
# warning "comment/eol #{argName} => #{argValue}"
|
836
|
+
@backend.add_argument(argName, argValue, @filename,
|
837
|
+
@lexer.line, @lexer.column)
|
838
|
+
end
|
839
|
+
|
840
|
+
argName = nil
|
841
|
+
argValue = ''
|
842
|
+
break
|
843
|
+
else
|
844
|
+
argValue << str
|
845
|
+
end
|
846
|
+
elsif tok == Token::String
|
847
|
+
argValue << @lexer.text
|
848
|
+
elsif tok == Token::TagOpen
|
849
|
+
# Syntax error of the form:
|
850
|
+
# arg-name => </%args>
|
851
|
+
error('An argument assignment has no value')
|
852
|
+
@lexer.pop_token
|
853
|
+
return
|
854
|
+
end
|
855
|
+
end
|
856
|
+
elsif tok == Token::Char && (@lexer.text == '#' || @lexer.text =~ /\w/)
|
857
|
+
# warning("comment or new identifier")
|
858
|
+
@backend.add_argument(argName, nil, @filename,
|
859
|
+
@lexer.line, @lexer.column)
|
860
|
+
@lexer.pop_token
|
861
|
+
else
|
862
|
+
error("unexpected token '#{@lexer.text}' after argument '#{argName}'")
|
863
|
+
@lexer.pop_token
|
864
|
+
|
865
|
+
# Wait for the end token
|
866
|
+
while tok = @lexer.next_token
|
867
|
+
if tok == Token::TagOpen
|
868
|
+
@lexer.pop_token
|
869
|
+
return
|
870
|
+
end
|
871
|
+
end
|
872
|
+
|
873
|
+
return
|
874
|
+
end
|
875
|
+
end # while true
|
876
|
+
|
877
|
+
end
|
878
|
+
|
879
|
+
# This routine handles the non-markup tags that are meaningful to
|
880
|
+
# the Bijou processor. It dispatches to the specific tag handlers.
|
881
|
+
# Parsing is done with the specialized tag lexer. Normal markup is
|
882
|
+
# parsed using a lexer that only recognizes Bijou tag open tokens.
|
883
|
+
# This bi-modal system allows us to ignore the complex parsing that
|
884
|
+
# would be required to handle HTML, scripts, style sheets, etc.
|
885
|
+
def parseTag(tagStart)
|
886
|
+
tagName = nil
|
887
|
+
|
888
|
+
@backend.tag_open(tagStart)
|
889
|
+
|
890
|
+
case tagStart
|
891
|
+
when '<%';
|
892
|
+
# This handler will determine whether it's named or inline.
|
893
|
+
parseNamedTag(tagStart)
|
894
|
+
return
|
895
|
+
when '<%='
|
896
|
+
parseOutputTag(tagStart)
|
897
|
+
return
|
898
|
+
when '<&';
|
899
|
+
parseCallTag(tagStart)
|
900
|
+
return
|
901
|
+
when '</%';
|
902
|
+
# This handler will determine whether it's named or inline.
|
903
|
+
parseNamedTag(tagStart)
|
904
|
+
return
|
905
|
+
when '<%!'
|
906
|
+
parseDirectiveTag(tagStart)
|
907
|
+
return
|
908
|
+
else;
|
909
|
+
raise "unexpected tag type #{tagStart}"
|
910
|
+
end
|
911
|
+
end
|
912
|
+
|
913
|
+
def render(component, source_filename=nil, cache_filename=nil,
|
914
|
+
base=nil, require_list=nil)
|
915
|
+
@backend.render(component, source_filename, cache_filename,
|
916
|
+
base, require_list)
|
917
|
+
end
|
918
|
+
|
919
|
+
def parse(component, file, filename='')
|
920
|
+
if !file.respond_to?(:read)
|
921
|
+
if file.respond_to?(:to_s)
|
922
|
+
# This is a fallback; use StringIO for better performance.
|
923
|
+
file = StringIO.new(file.dup)
|
924
|
+
else
|
925
|
+
raise ArgumentError, "must be a file or a string"
|
926
|
+
end
|
927
|
+
end
|
928
|
+
|
929
|
+
parse_file(component, file, filename='')
|
930
|
+
|
931
|
+
render(component)
|
932
|
+
end
|
933
|
+
|
934
|
+
def parse_string(component, text)
|
935
|
+
file = StringIO.new(text.dup)
|
936
|
+
parse_file(component, file)
|
937
|
+
file.close
|
938
|
+
end
|
939
|
+
|
940
|
+
# The main top-level parsing loop looks for the special Bijou start tags,
|
941
|
+
# dispatches to the specialized parsing routines, and manages the lexer
|
942
|
+
# switching process.
|
943
|
+
def parse_file(component, file, filename='')
|
944
|
+
#
|
945
|
+
# Initialize parser members.
|
946
|
+
#
|
947
|
+
attach_file(file, filename)
|
948
|
+
|
949
|
+
#
|
950
|
+
# Main parsing loop
|
951
|
+
#
|
952
|
+
source_line = @lexer.line
|
953
|
+
buffer = ''
|
954
|
+
while tok = @lexer.next_token
|
955
|
+
if tok == Token::Char
|
956
|
+
buffer << @lexer.text
|
957
|
+
elsif tok == Token::TagOpen
|
958
|
+
if @strip && @strippingTag
|
959
|
+
buffer = stripLeader(buffer)
|
960
|
+
end
|
961
|
+
@strippingTag = false
|
962
|
+
|
963
|
+
@backend.markup_section(buffer, @filename, source_line)
|
964
|
+
buffer = ''
|
965
|
+
|
966
|
+
# NOTE: Care must be taked in the implementation of the parsing
|
967
|
+
# routines because the lookahead token may buffer characters from
|
968
|
+
# the input stream. If this causes problems, then the lexer should
|
969
|
+
# should flush (push) the lookahead characters.
|
970
|
+
tagStart = @lexer.text
|
971
|
+
|
972
|
+
@lexer = @tagLexer
|
973
|
+
parseTag(tagStart)
|
974
|
+
@lexer = @topLexer
|
975
|
+
|
976
|
+
if @parsingArgs
|
977
|
+
@topLexer.tokenize_arguments = true
|
978
|
+
parseArgsSection
|
979
|
+
@topLexer.tokenize_arguments = false
|
980
|
+
end
|
981
|
+
|
982
|
+
source_line = @lexer.line
|
983
|
+
else
|
984
|
+
# puts "Token: " + @lexer.text
|
985
|
+
error("Unexpected token #{@lexer.text}")
|
986
|
+
end
|
987
|
+
end
|
988
|
+
|
989
|
+
if @parsingArgs
|
990
|
+
error("no end tag for args section at end of file")
|
991
|
+
@parsingArgs = false
|
992
|
+
end
|
993
|
+
|
994
|
+
# Final section
|
995
|
+
if !buffer.empty?
|
996
|
+
# The last blank lines are stripped
|
997
|
+
if @strip
|
998
|
+
buffer = stripTrailer(buffer)
|
999
|
+
end
|
1000
|
+
|
1001
|
+
@backend.markup_section(buffer, @filename, source_line)
|
1002
|
+
buffer = ''
|
1003
|
+
end
|
1004
|
+
|
1005
|
+
# print "Chars: " + @input.character.to_s
|
1006
|
+
detach_file
|
1007
|
+
end
|
1008
|
+
|
1009
|
+
# Whitespace formatting helper that removes a single leading blank line
|
1010
|
+
# when preceded by a tag.
|
1011
|
+
def stripLeader(buffer)
|
1012
|
+
if buffer =~ /\A\s*?\n(.*)\Z/m
|
1013
|
+
return $1
|
1014
|
+
end
|
1015
|
+
return buffer
|
1016
|
+
end
|
1017
|
+
|
1018
|
+
# Whitespace formatting helper that removes any whitespace at the end of
|
1019
|
+
# a component.
|
1020
|
+
def stripTrailer(buffer)
|
1021
|
+
if buffer =~ /\A(.*?)\s*\Z/m
|
1022
|
+
return $1
|
1023
|
+
end
|
1024
|
+
return buffer
|
1025
|
+
end
|
1026
|
+
end
|