tina4ruby 3.11.13 → 3.11.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +80 -80
- data/LICENSE.txt +21 -21
- data/README.md +137 -137
- data/exe/tina4ruby +5 -5
- data/lib/tina4/ai.rb +696 -696
- data/lib/tina4/api.rb +189 -189
- data/lib/tina4/auth.rb +305 -305
- data/lib/tina4/auto_crud.rb +244 -244
- data/lib/tina4/cache.rb +154 -154
- data/lib/tina4/cli.rb +1449 -1449
- data/lib/tina4/constants.rb +46 -46
- data/lib/tina4/container.rb +74 -74
- data/lib/tina4/cors.rb +74 -74
- data/lib/tina4/crud.rb +692 -692
- data/lib/tina4/database/sqlite3_adapter.rb +165 -165
- data/lib/tina4/database.rb +625 -625
- data/lib/tina4/database_result.rb +208 -208
- data/lib/tina4/debug.rb +8 -8
- data/lib/tina4/dev.rb +14 -14
- data/lib/tina4/dev_admin.rb +935 -935
- data/lib/tina4/dev_mailbox.rb +191 -191
- data/lib/tina4/drivers/firebird_driver.rb +124 -110
- data/lib/tina4/drivers/mongodb_driver.rb +561 -561
- data/lib/tina4/drivers/mssql_driver.rb +112 -112
- data/lib/tina4/drivers/mysql_driver.rb +90 -90
- data/lib/tina4/drivers/odbc_driver.rb +191 -191
- data/lib/tina4/drivers/postgres_driver.rb +116 -106
- data/lib/tina4/drivers/sqlite_driver.rb +122 -122
- data/lib/tina4/env.rb +95 -95
- data/lib/tina4/error_overlay.rb +252 -252
- data/lib/tina4/events.rb +109 -109
- data/lib/tina4/field_types.rb +154 -154
- data/lib/tina4/frond.rb +2025 -2025
- data/lib/tina4/gallery/auth/meta.json +1 -1
- data/lib/tina4/gallery/auth/src/routes/api/gallery_auth.rb +114 -114
- data/lib/tina4/gallery/database/meta.json +1 -1
- data/lib/tina4/gallery/database/src/routes/api/gallery_db.rb +43 -43
- data/lib/tina4/gallery/error-overlay/meta.json +1 -1
- data/lib/tina4/gallery/error-overlay/src/routes/api/gallery_crash.rb +17 -17
- data/lib/tina4/gallery/orm/meta.json +1 -1
- data/lib/tina4/gallery/orm/src/routes/api/gallery_products.rb +16 -16
- data/lib/tina4/gallery/queue/meta.json +1 -1
- data/lib/tina4/gallery/queue/src/routes/api/gallery_queue.rb +325 -325
- data/lib/tina4/gallery/rest-api/meta.json +1 -1
- data/lib/tina4/gallery/rest-api/src/routes/api/gallery_hello.rb +14 -14
- data/lib/tina4/gallery/templates/meta.json +1 -1
- data/lib/tina4/gallery/templates/src/routes/gallery_page.rb +12 -12
- data/lib/tina4/gallery/templates/src/templates/gallery_page.twig +257 -257
- data/lib/tina4/graphql.rb +966 -966
- data/lib/tina4/health.rb +39 -39
- data/lib/tina4/html_element.rb +170 -170
- data/lib/tina4/job.rb +80 -80
- data/lib/tina4/localization.rb +168 -168
- data/lib/tina4/log.rb +203 -203
- data/lib/tina4/mcp.rb +696 -696
- data/lib/tina4/messenger.rb +587 -587
- data/lib/tina4/metrics.rb +793 -793
- data/lib/tina4/middleware.rb +445 -445
- data/lib/tina4/migration.rb +451 -451
- data/lib/tina4/orm.rb +790 -790
- data/lib/tina4/public/css/tina4.css +2463 -2463
- data/lib/tina4/public/css/tina4.min.css +1 -1
- data/lib/tina4/public/images/logo.svg +5 -5
- data/lib/tina4/public/js/frond.min.js +2 -2
- data/lib/tina4/public/js/tina4-dev-admin.js +565 -565
- data/lib/tina4/public/js/tina4-dev-admin.min.js +480 -480
- data/lib/tina4/public/js/tina4.min.js +92 -92
- data/lib/tina4/public/js/tina4js.min.js +48 -48
- data/lib/tina4/public/swagger/index.html +90 -90
- data/lib/tina4/public/swagger/oauth2-redirect.html +63 -63
- data/lib/tina4/query_builder.rb +380 -380
- data/lib/tina4/queue.rb +366 -366
- data/lib/tina4/queue_backends/kafka_backend.rb +80 -80
- data/lib/tina4/queue_backends/lite_backend.rb +298 -298
- data/lib/tina4/queue_backends/mongo_backend.rb +126 -126
- data/lib/tina4/queue_backends/rabbitmq_backend.rb +73 -73
- data/lib/tina4/rack_app.rb +817 -817
- data/lib/tina4/rate_limiter.rb +130 -130
- data/lib/tina4/request.rb +268 -255
- data/lib/tina4/response.rb +346 -346
- data/lib/tina4/response_cache.rb +551 -551
- data/lib/tina4/router.rb +406 -406
- data/lib/tina4/scss/tina4css/_alerts.scss +34 -34
- data/lib/tina4/scss/tina4css/_badges.scss +22 -22
- data/lib/tina4/scss/tina4css/_buttons.scss +69 -69
- data/lib/tina4/scss/tina4css/_cards.scss +49 -49
- data/lib/tina4/scss/tina4css/_forms.scss +156 -156
- data/lib/tina4/scss/tina4css/_grid.scss +81 -81
- data/lib/tina4/scss/tina4css/_modals.scss +84 -84
- data/lib/tina4/scss/tina4css/_nav.scss +149 -149
- data/lib/tina4/scss/tina4css/_reset.scss +94 -94
- data/lib/tina4/scss/tina4css/_tables.scss +54 -54
- data/lib/tina4/scss/tina4css/_typography.scss +55 -55
- data/lib/tina4/scss/tina4css/_utilities.scss +197 -197
- data/lib/tina4/scss/tina4css/_variables.scss +117 -117
- data/lib/tina4/scss/tina4css/base.scss +1 -1
- data/lib/tina4/scss/tina4css/colors.scss +48 -48
- data/lib/tina4/scss/tina4css/tina4.scss +17 -17
- data/lib/tina4/scss_compiler.rb +178 -178
- data/lib/tina4/seeder.rb +567 -567
- data/lib/tina4/service_runner.rb +303 -303
- data/lib/tina4/session.rb +297 -297
- data/lib/tina4/session_handlers/database_handler.rb +72 -72
- data/lib/tina4/session_handlers/file_handler.rb +67 -67
- data/lib/tina4/session_handlers/mongo_handler.rb +49 -49
- data/lib/tina4/session_handlers/redis_handler.rb +43 -43
- data/lib/tina4/session_handlers/valkey_handler.rb +43 -43
- data/lib/tina4/shutdown.rb +84 -84
- data/lib/tina4/sql_translation.rb +158 -158
- data/lib/tina4/swagger.rb +124 -124
- data/lib/tina4/template.rb +894 -894
- data/lib/tina4/templates/base.twig +26 -26
- data/lib/tina4/templates/errors/302.twig +14 -14
- data/lib/tina4/templates/errors/401.twig +9 -9
- data/lib/tina4/templates/errors/403.twig +29 -29
- data/lib/tina4/templates/errors/404.twig +29 -29
- data/lib/tina4/templates/errors/500.twig +38 -38
- data/lib/tina4/templates/errors/502.twig +9 -9
- data/lib/tina4/templates/errors/503.twig +12 -12
- data/lib/tina4/templates/errors/base.twig +37 -37
- data/lib/tina4/test_client.rb +159 -159
- data/lib/tina4/testing.rb +340 -340
- data/lib/tina4/validator.rb +174 -174
- data/lib/tina4/version.rb +1 -1
- data/lib/tina4/webserver.rb +312 -312
- data/lib/tina4/websocket.rb +343 -343
- data/lib/tina4/websocket_backplane.rb +190 -190
- data/lib/tina4/wsdl.rb +564 -564
- data/lib/tina4.rb +458 -458
- data/lib/tina4ruby.rb +4 -4
- metadata +3 -3
data/lib/tina4/frond.rb
CHANGED
|
@@ -1,2025 +1,2025 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
# Tina4 Frond Engine -- Lexer, parser, and runtime.
|
|
4
|
-
# Zero-dependency twig-like template engine.
|
|
5
|
-
# Supports: variables, filters, if/elseif/else/endif, for/else/endfor,
|
|
6
|
-
# extends/block, include, macro, set, comments, whitespace control, tests,
|
|
7
|
-
# fragment caching, sandboxing, auto-escaping, custom filters/tests/globals.
|
|
8
|
-
|
|
9
|
-
require "json"
|
|
10
|
-
require "digest"
|
|
11
|
-
require "base64"
|
|
12
|
-
require "cgi"
|
|
13
|
-
require "uri"
|
|
14
|
-
require "date"
|
|
15
|
-
require "time"
|
|
16
|
-
require "securerandom"
|
|
17
|
-
|
|
18
|
-
module Tina4
|
|
19
|
-
# Marker class for strings that should not be auto-escaped in Frond.
|
|
20
|
-
class SafeString < String
|
|
21
|
-
end
|
|
22
|
-
|
|
23
|
-
class Frond
|
|
24
|
-
# -- Token types ----------------------------------------------------------
|
|
25
|
-
TEXT = :text
|
|
26
|
-
VAR = :var # {{ ... }}
|
|
27
|
-
BLOCK = :block # {% ... %}
|
|
28
|
-
COMMENT = :comment # {# ... #}
|
|
29
|
-
|
|
30
|
-
# Regex to split template source into tokens
|
|
31
|
-
TOKEN_RE = /(\{%-?\s*.*?\s*-?%\})|(\{\{-?\s*.*?\s*-?\}\})|(\{#.*?#\})/m
|
|
32
|
-
|
|
33
|
-
# HTML escape table
|
|
34
|
-
HTML_ESCAPE_MAP = { "&" => "&", "<" => "<", ">" => ">",
|
|
35
|
-
'"' => """, "'" => "'" }.freeze
|
|
36
|
-
HTML_ESCAPE_RE = /[&<>"']/
|
|
37
|
-
|
|
38
|
-
# -- Compiled regex constants (optimization: avoid re-compiling in methods) --
|
|
39
|
-
EXTENDS_RE = /\{%-?\s*extends\s+["'](.+?)["']\s*-?%\}/
|
|
40
|
-
BLOCK_RE = /\{%-?\s*block\s+(\w+)\s*-?%\}(.*?)\{%-?\s*endblock\s*-?%\}/m
|
|
41
|
-
STRING_LIT_RE = /\A["'](.*)["']\z/
|
|
42
|
-
INTEGER_RE = /\A-?\d+\z/
|
|
43
|
-
FLOAT_RE = /\A-?\d+\.\d+\z/
|
|
44
|
-
ARRAY_LIT_RE = /\A\[(.+)\]\z/m
|
|
45
|
-
HASH_LIT_RE = /\A\{(.+)\}\z/m
|
|
46
|
-
HASH_PAIR_RE = /\A\s*(?:["']([^"']+)["']|(\w+))\s*:\s*(.+)\z/
|
|
47
|
-
RANGE_LIT_RE = /\A(\d+)\.\.(\d+)\z/
|
|
48
|
-
ARITHMETIC_OPS = [" + ", " - ", " * ", " // ", " / ", " % ", " ** "].freeze
|
|
49
|
-
FUNC_CALL_RE = /\A(\w+)\s*\((.*)\)\z/m
|
|
50
|
-
FILTER_WITH_ARGS_RE = /\A(\w+)\s*\((.*)\)\z/m
|
|
51
|
-
FILTER_CMP_RE = /\A(\w+)\s*(!=|==|>=|<=|>|<)\s*(.+)\z/
|
|
52
|
-
OR_SPLIT_RE = /\s+or\s+/
|
|
53
|
-
AND_SPLIT_RE = /\s+and\s+/
|
|
54
|
-
IS_NOT_RE = /\A(.+?)\s+is\s+not\s+(\w+)(.*)\z/
|
|
55
|
-
IS_RE = /\A(.+?)\s+is\s+(\w+)(.*)\z/
|
|
56
|
-
NOT_IN_RE = /\A(.+?)\s+not\s+in\s+(.+)\z/
|
|
57
|
-
IN_RE = /\A(.+?)\s+in\s+(.+)\z/
|
|
58
|
-
DIVISIBLE_BY_RE = /\s*by\s*\(\s*(\d+)\s*\)/
|
|
59
|
-
RESOLVE_SPLIT_RE = /\.|\[([^\]]+)\]/
|
|
60
|
-
RESOLVE_STRIP_RE = /\A["']|["']\z/
|
|
61
|
-
DIGIT_RE = /\A\d+\z/
|
|
62
|
-
FOR_RE = /\Afor\s+(\w+)(?:\s*,\s*(\w+))?\s+in\s+(.+)\z/
|
|
63
|
-
SET_RE = /\Aset\s+(\w+)\s*=\s*(.+)\z/m
|
|
64
|
-
INCLUDE_RE = /\Ainclude\s+["'](.+?)["'](?:\s+with\s+(.+))?\z/
|
|
65
|
-
MACRO_RE = /\Amacro\s+(\w+)\s*\(([^)]*)\)/
|
|
66
|
-
FROM_IMPORT_RE = /\Afrom\s+["'](.+?)["']\s+import\s+(.+)/
|
|
67
|
-
CACHE_RE = /\Acache\s+["'](.+?)["']\s*(\d+)?/
|
|
68
|
-
SPACELESS_RE = />\s+</
|
|
69
|
-
AUTOESCAPE_RE = /\Aautoescape\s+(false|true)/
|
|
70
|
-
STRIPTAGS_RE = /<[^>]+>/
|
|
71
|
-
THOUSANDS_RE = /(\d)(?=(\d{3})+(?!\d))/
|
|
72
|
-
SLUG_CLEAN_RE = /[^a-z0-9]+/
|
|
73
|
-
SLUG_TRIM_RE = /\A-|-\z/
|
|
74
|
-
|
|
75
|
-
# Set of common no-arg filter names that can be inlined for speed
|
|
76
|
-
INLINE_FILTERS = %w[upper lower length trim capitalize title string int escape e].each_with_object({}) { |f, h| h[f] = true }.freeze
|
|
77
|
-
|
|
78
|
-
# -- Lazy context overlay for for-loops (avoids full Hash#dup) --
|
|
79
|
-
class LoopContext
|
|
80
|
-
def initialize(parent)
|
|
81
|
-
@parent = parent
|
|
82
|
-
@local = {}
|
|
83
|
-
end
|
|
84
|
-
|
|
85
|
-
def [](key)
|
|
86
|
-
@local.key?(key) ? @local[key] : @parent[key]
|
|
87
|
-
end
|
|
88
|
-
|
|
89
|
-
def []=(key, value)
|
|
90
|
-
@local[key] = value
|
|
91
|
-
end
|
|
92
|
-
|
|
93
|
-
def key?(key)
|
|
94
|
-
@local.key?(key) || @parent.key?(key)
|
|
95
|
-
end
|
|
96
|
-
alias include? key?
|
|
97
|
-
alias has_key? key?
|
|
98
|
-
|
|
99
|
-
def fetch(key, *args, &block)
|
|
100
|
-
if @local.key?(key)
|
|
101
|
-
@local[key]
|
|
102
|
-
elsif @parent.key?(key)
|
|
103
|
-
@parent[key]
|
|
104
|
-
elsif block
|
|
105
|
-
yield key
|
|
106
|
-
elsif !args.empty?
|
|
107
|
-
args[0]
|
|
108
|
-
else
|
|
109
|
-
raise KeyError, "key not found: #{key.inspect}"
|
|
110
|
-
end
|
|
111
|
-
end
|
|
112
|
-
|
|
113
|
-
def merge(other)
|
|
114
|
-
dup_hash = to_h
|
|
115
|
-
dup_hash.merge!(other)
|
|
116
|
-
dup_hash
|
|
117
|
-
end
|
|
118
|
-
|
|
119
|
-
def merge!(other)
|
|
120
|
-
other.each { |k, v| @local[k] = v }
|
|
121
|
-
self
|
|
122
|
-
end
|
|
123
|
-
|
|
124
|
-
def dup
|
|
125
|
-
copy = LoopContext.new(@parent)
|
|
126
|
-
@local.each { |k, v| copy[k] = v }
|
|
127
|
-
copy
|
|
128
|
-
end
|
|
129
|
-
|
|
130
|
-
def to_h
|
|
131
|
-
h = @parent.is_a?(LoopContext) ? @parent.to_h : @parent.dup
|
|
132
|
-
@local.each { |k, v| h[k] = v }
|
|
133
|
-
h
|
|
134
|
-
end
|
|
135
|
-
|
|
136
|
-
def each(&block)
|
|
137
|
-
to_h.each(&block)
|
|
138
|
-
end
|
|
139
|
-
|
|
140
|
-
def respond_to_missing?(name, include_private = false)
|
|
141
|
-
@parent.respond_to?(name, include_private) || super
|
|
142
|
-
end
|
|
143
|
-
|
|
144
|
-
def is_a?(klass)
|
|
145
|
-
klass == Hash || super
|
|
146
|
-
end
|
|
147
|
-
|
|
148
|
-
def keys
|
|
149
|
-
(@parent.is_a?(LoopContext) ? @parent.keys : @parent.keys) | @local.keys
|
|
150
|
-
end
|
|
151
|
-
end
|
|
152
|
-
|
|
153
|
-
# -----------------------------------------------------------------------
|
|
154
|
-
# Public API
|
|
155
|
-
# -----------------------------------------------------------------------
|
|
156
|
-
|
|
157
|
-
attr_reader :template_dir
|
|
158
|
-
|
|
159
|
-
def initialize(template_dir: "src/templates")
|
|
160
|
-
@template_dir = template_dir
|
|
161
|
-
@filters = default_filters
|
|
162
|
-
@globals = {}
|
|
163
|
-
@tests = default_tests
|
|
164
|
-
@auto_escape = true
|
|
165
|
-
|
|
166
|
-
# Sandboxing
|
|
167
|
-
@sandbox = false
|
|
168
|
-
@allowed_filters = nil
|
|
169
|
-
@allowed_tags = nil
|
|
170
|
-
@allowed_vars = nil
|
|
171
|
-
|
|
172
|
-
# Fragment cache: key => [html, expires_at]
|
|
173
|
-
@fragment_cache = {}
|
|
174
|
-
|
|
175
|
-
# Token pre-compilation cache
|
|
176
|
-
@compiled = {} # {template_name => [tokens, mtime]}
|
|
177
|
-
@compiled_strings = {} # {md5_hash => tokens}
|
|
178
|
-
|
|
179
|
-
# Parsed filter chain cache: expr_string => [variable, filters]
|
|
180
|
-
@filter_chain_cache = {}
|
|
181
|
-
|
|
182
|
-
# Resolved dotted-path split cache: expr_string => parts_array
|
|
183
|
-
@resolve_cache = {}
|
|
184
|
-
|
|
185
|
-
# Sandbox root-var split cache: var_name => root_var_string
|
|
186
|
-
@dotted_split_cache = {}
|
|
187
|
-
|
|
188
|
-
# Built-in global functions
|
|
189
|
-
register_builtin_globals
|
|
190
|
-
end
|
|
191
|
-
|
|
192
|
-
# Render a template file with data. Uses token caching for performance.
|
|
193
|
-
def render(template, data = {})
|
|
194
|
-
context = @globals.merge(stringify_keys(data))
|
|
195
|
-
|
|
196
|
-
path = File.join(@template_dir, template)
|
|
197
|
-
raise "Template not found: #{path}" unless File.exist?(path)
|
|
198
|
-
|
|
199
|
-
debug_mode = ENV.fetch("TINA4_DEBUG", "").downcase == "true"
|
|
200
|
-
|
|
201
|
-
unless debug_mode
|
|
202
|
-
# Production: use permanent cache (no filesystem checks)
|
|
203
|
-
cached = @compiled[template]
|
|
204
|
-
return execute_cached(cached[0], context) if cached
|
|
205
|
-
end
|
|
206
|
-
# Dev mode: skip cache entirely — always re-read and re-tokenize
|
|
207
|
-
# so edits to partials and extended base templates are detected
|
|
208
|
-
|
|
209
|
-
# Cache miss — load, tokenize, cache
|
|
210
|
-
source = File.read(path, encoding: "utf-8")
|
|
211
|
-
mtime = File.mtime(path)
|
|
212
|
-
tokens = tokenize(source)
|
|
213
|
-
@compiled[template] = [tokens, mtime]
|
|
214
|
-
execute_with_tokens(source, tokens, context)
|
|
215
|
-
end
|
|
216
|
-
|
|
217
|
-
# Render a template string directly. Uses token caching for performance.
|
|
218
|
-
def render_string(source, data = {})
|
|
219
|
-
context = @globals.merge(stringify_keys(data))
|
|
220
|
-
|
|
221
|
-
key = Digest::MD5.hexdigest(source)
|
|
222
|
-
cached_tokens = @compiled_strings[key]
|
|
223
|
-
|
|
224
|
-
if cached_tokens
|
|
225
|
-
return execute_cached(cached_tokens, context)
|
|
226
|
-
end
|
|
227
|
-
|
|
228
|
-
tokens = tokenize(source)
|
|
229
|
-
@compiled_strings[key] = tokens
|
|
230
|
-
execute_cached(tokens, context)
|
|
231
|
-
end
|
|
232
|
-
|
|
233
|
-
# Clear all compiled template caches.
|
|
234
|
-
def clear_cache
|
|
235
|
-
@compiled.clear
|
|
236
|
-
@compiled_strings.clear
|
|
237
|
-
@filter_chain_cache.clear
|
|
238
|
-
@resolve_cache.clear
|
|
239
|
-
@dotted_split_cache.clear
|
|
240
|
-
end
|
|
241
|
-
|
|
242
|
-
# Register a custom filter.
|
|
243
|
-
def add_filter(name, &blk)
|
|
244
|
-
@filters[name.to_s] = blk
|
|
245
|
-
end
|
|
246
|
-
|
|
247
|
-
# Register a custom test.
|
|
248
|
-
def add_test(name, &blk)
|
|
249
|
-
@tests[name.to_s] = blk
|
|
250
|
-
end
|
|
251
|
-
|
|
252
|
-
# Register a global variable available in all templates.
|
|
253
|
-
def add_global(name, value)
|
|
254
|
-
@globals[name.to_s] = value
|
|
255
|
-
end
|
|
256
|
-
|
|
257
|
-
# Enable sandbox mode.
|
|
258
|
-
def sandbox(filters: nil, tags: nil, vars: nil)
|
|
259
|
-
@sandbox = true
|
|
260
|
-
@allowed_filters = filters ? filters.map(&:to_s) : nil
|
|
261
|
-
@allowed_tags = tags ? tags.map(&:to_s) : nil
|
|
262
|
-
@allowed_vars = vars ? vars.map(&:to_s) : nil
|
|
263
|
-
self
|
|
264
|
-
end
|
|
265
|
-
|
|
266
|
-
# Disable sandbox mode.
|
|
267
|
-
def unsandbox
|
|
268
|
-
@sandbox = false
|
|
269
|
-
@allowed_filters = nil
|
|
270
|
-
@allowed_tags = nil
|
|
271
|
-
@allowed_vars = nil
|
|
272
|
-
self
|
|
273
|
-
end
|
|
274
|
-
|
|
275
|
-
# Utility: HTML escape
|
|
276
|
-
def self.escape_html(str)
|
|
277
|
-
str.to_s.gsub(HTML_ESCAPE_RE, HTML_ESCAPE_MAP)
|
|
278
|
-
end
|
|
279
|
-
|
|
280
|
-
private
|
|
281
|
-
|
|
282
|
-
# -----------------------------------------------------------------------
|
|
283
|
-
# Tokenizer
|
|
284
|
-
# -----------------------------------------------------------------------
|
|
285
|
-
|
|
286
|
-
# Regex to extract {% raw %}...{% endraw %} blocks before tokenizing
|
|
287
|
-
RAW_BLOCK_RE = /\{%-?\s*raw\s*-?%\}(.*?)\{%-?\s*endraw\s*-?%\}/m
|
|
288
|
-
|
|
289
|
-
def tokenize(source)
|
|
290
|
-
# 1. Extract raw blocks and replace with placeholders
|
|
291
|
-
raw_blocks = []
|
|
292
|
-
source = source.gsub(RAW_BLOCK_RE) do
|
|
293
|
-
idx = raw_blocks.length
|
|
294
|
-
raw_blocks << Regexp.last_match(1)
|
|
295
|
-
"\x00RAW_#{idx}\x00"
|
|
296
|
-
end
|
|
297
|
-
|
|
298
|
-
# 2. Normal tokenization
|
|
299
|
-
tokens = []
|
|
300
|
-
pos = 0
|
|
301
|
-
source.scan(TOKEN_RE) do
|
|
302
|
-
m = Regexp.last_match
|
|
303
|
-
start = m.begin(0)
|
|
304
|
-
tokens << [TEXT, source[pos...start]] if start > pos
|
|
305
|
-
|
|
306
|
-
raw = m[0]
|
|
307
|
-
if raw.start_with?("{#")
|
|
308
|
-
tokens << [COMMENT, raw]
|
|
309
|
-
elsif raw.start_with?("{{")
|
|
310
|
-
tokens << [VAR, raw]
|
|
311
|
-
elsif raw.start_with?("{%")
|
|
312
|
-
tokens << [BLOCK, raw]
|
|
313
|
-
end
|
|
314
|
-
pos = m.end(0)
|
|
315
|
-
end
|
|
316
|
-
tokens << [TEXT, source[pos..]] if pos < source.length
|
|
317
|
-
|
|
318
|
-
# 3. Restore raw block placeholders as literal TEXT
|
|
319
|
-
unless raw_blocks.empty?
|
|
320
|
-
tokens = tokens.map do |ttype, value|
|
|
321
|
-
if ttype == TEXT && value.include?("\x00RAW_")
|
|
322
|
-
raw_blocks.each_with_index do |content, idx|
|
|
323
|
-
value = value.gsub("\x00RAW_#{idx}\x00", content)
|
|
324
|
-
end
|
|
325
|
-
end
|
|
326
|
-
[ttype, value]
|
|
327
|
-
end
|
|
328
|
-
end
|
|
329
|
-
|
|
330
|
-
tokens
|
|
331
|
-
end
|
|
332
|
-
|
|
333
|
-
# Strip delimiters from a tag and detect whitespace control markers.
|
|
334
|
-
# Returns [content, strip_before, strip_after].
|
|
335
|
-
def strip_tag(raw)
|
|
336
|
-
inner = raw[2..-3] # remove {{ }} or {% %} or {# #}
|
|
337
|
-
strip_before = false
|
|
338
|
-
strip_after = false
|
|
339
|
-
|
|
340
|
-
if inner.start_with?("-")
|
|
341
|
-
strip_before = true
|
|
342
|
-
inner = inner[1..]
|
|
343
|
-
end
|
|
344
|
-
if inner.end_with?("-")
|
|
345
|
-
strip_after = true
|
|
346
|
-
inner = inner[0..-2]
|
|
347
|
-
end
|
|
348
|
-
|
|
349
|
-
[inner.strip, strip_before, strip_after]
|
|
350
|
-
end
|
|
351
|
-
|
|
352
|
-
# -----------------------------------------------------------------------
|
|
353
|
-
# Template loading
|
|
354
|
-
# -----------------------------------------------------------------------
|
|
355
|
-
|
|
356
|
-
def load_template(name)
|
|
357
|
-
path = File.join(@template_dir, name)
|
|
358
|
-
raise "Template not found: #{path}" unless File.exist?(path)
|
|
359
|
-
|
|
360
|
-
File.read(path, encoding: "utf-8")
|
|
361
|
-
end
|
|
362
|
-
|
|
363
|
-
# -----------------------------------------------------------------------
|
|
364
|
-
# Execution
|
|
365
|
-
# -----------------------------------------------------------------------
|
|
366
|
-
|
|
367
|
-
def execute_cached(tokens, context)
|
|
368
|
-
# Check if first non-text token is an extends block
|
|
369
|
-
tokens.each do |ttype, raw|
|
|
370
|
-
next if ttype == TEXT && raw.strip.empty?
|
|
371
|
-
if ttype == BLOCK
|
|
372
|
-
content, _, _ = strip_tag(raw)
|
|
373
|
-
if content.start_with?("extends ")
|
|
374
|
-
# Extends requires source-based execution for block extraction
|
|
375
|
-
source = tokens.map { |_, v| v }.join
|
|
376
|
-
return execute(source, context)
|
|
377
|
-
end
|
|
378
|
-
end
|
|
379
|
-
break
|
|
380
|
-
end
|
|
381
|
-
render_tokens(tokens, context)
|
|
382
|
-
end
|
|
383
|
-
|
|
384
|
-
def execute_with_tokens(source, tokens, context)
|
|
385
|
-
# Handle extends first
|
|
386
|
-
if source =~ EXTENDS_RE
|
|
387
|
-
parent_name = Regexp.last_match(1)
|
|
388
|
-
parent_source = load_template(parent_name)
|
|
389
|
-
child_blocks = extract_blocks(source)
|
|
390
|
-
return render_with_blocks(parent_source, context, child_blocks)
|
|
391
|
-
end
|
|
392
|
-
|
|
393
|
-
render_tokens(tokens, context)
|
|
394
|
-
end
|
|
395
|
-
|
|
396
|
-
def execute(source, context)
|
|
397
|
-
# Handle extends first
|
|
398
|
-
if source =~ EXTENDS_RE
|
|
399
|
-
parent_name = Regexp.last_match(1)
|
|
400
|
-
parent_source = load_template(parent_name)
|
|
401
|
-
child_blocks = extract_blocks(source)
|
|
402
|
-
return render_with_blocks(parent_source, context, child_blocks)
|
|
403
|
-
end
|
|
404
|
-
|
|
405
|
-
render_tokens(tokenize(source), context)
|
|
406
|
-
end
|
|
407
|
-
|
|
408
|
-
def extract_blocks(source)
|
|
409
|
-
blocks = {}
|
|
410
|
-
source.scan(BLOCK_RE) do
|
|
411
|
-
blocks[Regexp.last_match(1)] = Regexp.last_match(2)
|
|
412
|
-
end
|
|
413
|
-
blocks
|
|
414
|
-
end
|
|
415
|
-
|
|
416
|
-
def render_with_blocks(parent_source, context, child_blocks)
|
|
417
|
-
engine = self
|
|
418
|
-
result = parent_source.gsub(BLOCK_RE) do
|
|
419
|
-
name = Regexp.last_match(1)
|
|
420
|
-
parent_content = Regexp.last_match(2)
|
|
421
|
-
block_source = child_blocks.fetch(name, parent_content)
|
|
422
|
-
|
|
423
|
-
# Make parent() and super() available inside child blocks
|
|
424
|
-
rendered_parent = nil
|
|
425
|
-
get_parent = lambda do
|
|
426
|
-
rendered_parent ||= Tina4::SafeString.new(
|
|
427
|
-
engine.send(:render_tokens, tokenize(parent_content), context)
|
|
428
|
-
)
|
|
429
|
-
rendered_parent
|
|
430
|
-
end
|
|
431
|
-
|
|
432
|
-
block_ctx = context.merge("parent" => get_parent, "super" => get_parent)
|
|
433
|
-
render_tokens(tokenize(block_source), block_ctx)
|
|
434
|
-
end
|
|
435
|
-
render_tokens(tokenize(result), context)
|
|
436
|
-
end
|
|
437
|
-
|
|
438
|
-
# -----------------------------------------------------------------------
|
|
439
|
-
# Token renderer
|
|
440
|
-
# -----------------------------------------------------------------------
|
|
441
|
-
|
|
442
|
-
def render_tokens(tokens, context)
|
|
443
|
-
output = []
|
|
444
|
-
i = 0
|
|
445
|
-
|
|
446
|
-
while i < tokens.length
|
|
447
|
-
ttype, raw = tokens[i]
|
|
448
|
-
|
|
449
|
-
case ttype
|
|
450
|
-
when TEXT
|
|
451
|
-
output << raw
|
|
452
|
-
i += 1
|
|
453
|
-
|
|
454
|
-
when COMMENT
|
|
455
|
-
i += 1
|
|
456
|
-
|
|
457
|
-
when VAR
|
|
458
|
-
content, strip_b, strip_a = strip_tag(raw)
|
|
459
|
-
output[-1] = output[-1].rstrip if strip_b && !output.empty?
|
|
460
|
-
|
|
461
|
-
result = eval_var(content, context)
|
|
462
|
-
output << (result.nil? ? "" : result.to_s)
|
|
463
|
-
|
|
464
|
-
if strip_a && i + 1 < tokens.length && tokens[i + 1][0] == TEXT
|
|
465
|
-
tokens[i + 1] = [TEXT, tokens[i + 1][1].lstrip]
|
|
466
|
-
end
|
|
467
|
-
i += 1
|
|
468
|
-
|
|
469
|
-
when BLOCK
|
|
470
|
-
content, strip_b, strip_a = strip_tag(raw)
|
|
471
|
-
output[-1] = output[-1].rstrip if strip_b && !output.empty?
|
|
472
|
-
|
|
473
|
-
tag = content.split[0] || ""
|
|
474
|
-
|
|
475
|
-
case tag
|
|
476
|
-
when "if"
|
|
477
|
-
result, i = handle_if(tokens, i, context)
|
|
478
|
-
output << result
|
|
479
|
-
when "for"
|
|
480
|
-
result, i = handle_for(tokens, i, context)
|
|
481
|
-
output << result
|
|
482
|
-
when "set"
|
|
483
|
-
handle_set(content, context)
|
|
484
|
-
i += 1
|
|
485
|
-
when "include"
|
|
486
|
-
if @sandbox && @allowed_tags && !@allowed_tags.include?("include")
|
|
487
|
-
i += 1
|
|
488
|
-
else
|
|
489
|
-
output << handle_include(content, context)
|
|
490
|
-
i += 1
|
|
491
|
-
end
|
|
492
|
-
when "macro"
|
|
493
|
-
i = handle_macro(tokens, i, context)
|
|
494
|
-
when "from"
|
|
495
|
-
handle_from_import(content, context)
|
|
496
|
-
i += 1
|
|
497
|
-
when "cache"
|
|
498
|
-
result, i = handle_cache(tokens, i, context)
|
|
499
|
-
output << result
|
|
500
|
-
when "spaceless"
|
|
501
|
-
result, i = handle_spaceless(tokens, i, context)
|
|
502
|
-
output << result
|
|
503
|
-
when "autoescape"
|
|
504
|
-
result, i = handle_autoescape(tokens, i, context)
|
|
505
|
-
output << result
|
|
506
|
-
when "block", "endblock", "extends"
|
|
507
|
-
i += 1
|
|
508
|
-
else
|
|
509
|
-
i += 1
|
|
510
|
-
end
|
|
511
|
-
|
|
512
|
-
if strip_a && i < tokens.length && tokens[i][0] == TEXT
|
|
513
|
-
tokens[i] = [TEXT, tokens[i][1].lstrip]
|
|
514
|
-
end
|
|
515
|
-
else
|
|
516
|
-
i += 1
|
|
517
|
-
end
|
|
518
|
-
end
|
|
519
|
-
|
|
520
|
-
output.join
|
|
521
|
-
end
|
|
522
|
-
|
|
523
|
-
# -----------------------------------------------------------------------
|
|
524
|
-
# Variable evaluation
|
|
525
|
-
# -----------------------------------------------------------------------
|
|
526
|
-
|
|
527
|
-
def eval_var(expr, context)
|
|
528
|
-
# Check for top-level ternary BEFORE splitting filters so that
|
|
529
|
-
# expressions like ``products|length != 1 ? "s" : ""`` work correctly.
|
|
530
|
-
ternary_pos = find_ternary(expr)
|
|
531
|
-
if ternary_pos != -1
|
|
532
|
-
cond_part = expr[0...ternary_pos].strip
|
|
533
|
-
rest = expr[(ternary_pos + 1)..]
|
|
534
|
-
colon_pos = find_colon(rest)
|
|
535
|
-
if colon_pos != -1
|
|
536
|
-
true_part = rest[0...colon_pos].strip
|
|
537
|
-
false_part = rest[(colon_pos + 1)..].strip
|
|
538
|
-
cond = eval_var_raw(cond_part, context)
|
|
539
|
-
return truthy?(cond) ? eval_var(true_part, context) : eval_var(false_part, context)
|
|
540
|
-
end
|
|
541
|
-
end
|
|
542
|
-
|
|
543
|
-
eval_var_inner(expr, context)
|
|
544
|
-
end
|
|
545
|
-
|
|
546
|
-
def eval_var_raw(expr, context)
|
|
547
|
-
var_name, filters = parse_filter_chain(expr)
|
|
548
|
-
value = eval_expr(var_name, context)
|
|
549
|
-
filters.each do |fname, args|
|
|
550
|
-
next if fname == "raw" || fname == "safe"
|
|
551
|
-
fn = @filters[fname]
|
|
552
|
-
if fn
|
|
553
|
-
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
554
|
-
value = fn.call(value, *evaluated_args)
|
|
555
|
-
else
|
|
556
|
-
# The filter name may include a trailing comparison operator,
|
|
557
|
-
# e.g. "length != 1". Extract the real filter name and the
|
|
558
|
-
# comparison suffix, apply the filter, then evaluate the comparison.
|
|
559
|
-
m = fname.match(FILTER_CMP_RE)
|
|
560
|
-
if m
|
|
561
|
-
real_filter = m[1]
|
|
562
|
-
op = m[2]
|
|
563
|
-
right_expr = m[3].strip
|
|
564
|
-
fn2 = @filters[real_filter]
|
|
565
|
-
if fn2
|
|
566
|
-
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
567
|
-
value = fn2.call(value, *evaluated_args)
|
|
568
|
-
end
|
|
569
|
-
right = eval_expr(right_expr, context)
|
|
570
|
-
value = case op
|
|
571
|
-
when "!=" then value != right
|
|
572
|
-
when "==" then value == right
|
|
573
|
-
when ">=" then value >= right
|
|
574
|
-
when "<=" then value <= right
|
|
575
|
-
when ">" then value > right
|
|
576
|
-
when "<" then value < right
|
|
577
|
-
else false
|
|
578
|
-
end rescue false
|
|
579
|
-
else
|
|
580
|
-
value = eval_expr(fname, context)
|
|
581
|
-
end
|
|
582
|
-
end
|
|
583
|
-
end
|
|
584
|
-
value
|
|
585
|
-
end
|
|
586
|
-
|
|
587
|
-
def eval_var_inner(expr, context)
|
|
588
|
-
var_name, filters = parse_filter_chain(expr)
|
|
589
|
-
|
|
590
|
-
# Sandbox: check variable access
|
|
591
|
-
if @sandbox && @allowed_vars
|
|
592
|
-
root_var = @dotted_split_cache[var_name]
|
|
593
|
-
unless root_var
|
|
594
|
-
root_var = var_name.split(".")[0].split("[")[0].strip
|
|
595
|
-
@dotted_split_cache[var_name] = root_var
|
|
596
|
-
end
|
|
597
|
-
return "" if !root_var.empty? && !@allowed_vars.include?(root_var) && root_var != "loop"
|
|
598
|
-
end
|
|
599
|
-
|
|
600
|
-
value = eval_expr(var_name, context)
|
|
601
|
-
|
|
602
|
-
is_safe = false
|
|
603
|
-
filters.each do |fname, args|
|
|
604
|
-
if fname == "raw" || fname == "safe"
|
|
605
|
-
is_safe = true
|
|
606
|
-
next
|
|
607
|
-
end
|
|
608
|
-
|
|
609
|
-
# Sandbox: check filter access
|
|
610
|
-
if @sandbox && @allowed_filters && !@allowed_filters.include?(fname)
|
|
611
|
-
next
|
|
612
|
-
end
|
|
613
|
-
|
|
614
|
-
# Inline common no-arg filters for speed (skip generic dispatch)
|
|
615
|
-
if args.empty? && INLINE_FILTERS.include?(fname)
|
|
616
|
-
value = case fname
|
|
617
|
-
when "upper" then value.to_s.upcase
|
|
618
|
-
when "lower" then value.to_s.downcase
|
|
619
|
-
when "length" then value.respond_to?(:length) ? value.length : value.to_s.length
|
|
620
|
-
when "trim" then value.to_s.strip
|
|
621
|
-
when "capitalize" then value.to_s.capitalize
|
|
622
|
-
when "title" then value.to_s.split.map(&:capitalize).join(" ")
|
|
623
|
-
when "string" then value.to_s
|
|
624
|
-
when "int" then value.to_i
|
|
625
|
-
when "escape", "e" then Frond.escape_html(value.to_s)
|
|
626
|
-
else value
|
|
627
|
-
end
|
|
628
|
-
next
|
|
629
|
-
end
|
|
630
|
-
|
|
631
|
-
fn = @filters[fname]
|
|
632
|
-
if fn
|
|
633
|
-
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
634
|
-
value = fn.call(value, *evaluated_args)
|
|
635
|
-
end
|
|
636
|
-
end
|
|
637
|
-
|
|
638
|
-
# Auto-escape HTML unless marked safe or SafeString
|
|
639
|
-
if @auto_escape && !is_safe && value.is_a?(String) && !value.is_a?(SafeString)
|
|
640
|
-
value = Frond.escape_html(value)
|
|
641
|
-
end
|
|
642
|
-
|
|
643
|
-
value
|
|
644
|
-
end
|
|
645
|
-
|
|
646
|
-
def eval_filter_arg(arg, context)
|
|
647
|
-
return Regexp.last_match(1) if arg =~ STRING_LIT_RE
|
|
648
|
-
return arg.to_i if arg =~ INTEGER_RE
|
|
649
|
-
return arg.to_f if arg =~ FLOAT_RE
|
|
650
|
-
eval_expr(arg, context)
|
|
651
|
-
end
|
|
652
|
-
|
|
653
|
-
# Find the first occurrence of +needle+ that is not inside quotes or
|
|
654
|
-
# parentheses. Returns the index, or -1 if not found.
|
|
655
|
-
def find_outside_quotes(expr, needle)
|
|
656
|
-
in_q = nil
|
|
657
|
-
depth = 0
|
|
658
|
-
bracket_depth = 0
|
|
659
|
-
i = 0
|
|
660
|
-
nlen = needle.length
|
|
661
|
-
while i <= expr.length - nlen
|
|
662
|
-
ch = expr[i]
|
|
663
|
-
if (ch == '"' || ch == "'") && depth == 0
|
|
664
|
-
if in_q.nil?
|
|
665
|
-
in_q = ch
|
|
666
|
-
elsif ch == in_q
|
|
667
|
-
in_q = nil
|
|
668
|
-
end
|
|
669
|
-
i += 1
|
|
670
|
-
next
|
|
671
|
-
end
|
|
672
|
-
if in_q
|
|
673
|
-
i += 1
|
|
674
|
-
next
|
|
675
|
-
end
|
|
676
|
-
if ch == "("
|
|
677
|
-
depth += 1
|
|
678
|
-
elsif ch == ")"
|
|
679
|
-
depth -= 1
|
|
680
|
-
elsif ch == "["
|
|
681
|
-
bracket_depth += 1
|
|
682
|
-
elsif ch == "]"
|
|
683
|
-
bracket_depth -= 1
|
|
684
|
-
end
|
|
685
|
-
if depth == 0 && bracket_depth == 0 && expr[i, nlen] == needle
|
|
686
|
-
return i
|
|
687
|
-
end
|
|
688
|
-
i += 1
|
|
689
|
-
end
|
|
690
|
-
-1
|
|
691
|
-
end
|
|
692
|
-
|
|
693
|
-
# Find the index of a top-level ``?`` that is part of a ternary operator.
|
|
694
|
-
# Respects quoted strings, parentheses, and skips ``??`` (null coalesce).
|
|
695
|
-
# Returns -1 if not found.
|
|
696
|
-
def find_ternary(expr)
|
|
697
|
-
depth = 0
|
|
698
|
-
in_quote = nil
|
|
699
|
-
i = 0
|
|
700
|
-
len = expr.length
|
|
701
|
-
while i < len
|
|
702
|
-
ch = expr[i]
|
|
703
|
-
if in_quote
|
|
704
|
-
in_quote = nil if ch == in_quote
|
|
705
|
-
i += 1
|
|
706
|
-
next
|
|
707
|
-
end
|
|
708
|
-
if ch == '"' || ch == "'"
|
|
709
|
-
in_quote = ch
|
|
710
|
-
i += 1
|
|
711
|
-
next
|
|
712
|
-
end
|
|
713
|
-
if ch == "("
|
|
714
|
-
depth += 1
|
|
715
|
-
elsif ch == ")"
|
|
716
|
-
depth -= 1
|
|
717
|
-
elsif ch == "?" && depth == 0
|
|
718
|
-
# Skip ``??`` (null coalesce)
|
|
719
|
-
if i + 1 < len && expr[i + 1] == "?"
|
|
720
|
-
i += 2
|
|
721
|
-
next
|
|
722
|
-
end
|
|
723
|
-
return i
|
|
724
|
-
end
|
|
725
|
-
i += 1
|
|
726
|
-
end
|
|
727
|
-
-1
|
|
728
|
-
end
|
|
729
|
-
|
|
730
|
-
# Find the index of the top-level ``:`` that separates the true/false
|
|
731
|
-
# branches of a ternary. Respects quotes and parentheses.
|
|
732
|
-
def find_colon(expr)
|
|
733
|
-
depth = 0
|
|
734
|
-
in_quote = nil
|
|
735
|
-
expr.each_char.with_index do |ch, i|
|
|
736
|
-
if in_quote
|
|
737
|
-
in_quote = nil if ch == in_quote
|
|
738
|
-
next
|
|
739
|
-
end
|
|
740
|
-
if ch == '"' || ch == "'"
|
|
741
|
-
in_quote = ch
|
|
742
|
-
next
|
|
743
|
-
end
|
|
744
|
-
if ch == "("
|
|
745
|
-
depth += 1
|
|
746
|
-
elsif ch == ")"
|
|
747
|
-
depth -= 1
|
|
748
|
-
elsif ch == ":" && depth == 0
|
|
749
|
-
return i
|
|
750
|
-
end
|
|
751
|
-
end
|
|
752
|
-
-1
|
|
753
|
-
end
|
|
754
|
-
|
|
755
|
-
# -----------------------------------------------------------------------
|
|
756
|
-
# Filter chain parser
|
|
757
|
-
# -----------------------------------------------------------------------
|
|
758
|
-
|
|
759
|
-
def parse_filter_chain(expr)
|
|
760
|
-
cached = @filter_chain_cache[expr]
|
|
761
|
-
return cached if cached
|
|
762
|
-
|
|
763
|
-
parts = split_on_pipe(expr)
|
|
764
|
-
variable = parts[0].strip
|
|
765
|
-
filters = []
|
|
766
|
-
|
|
767
|
-
parts[1..].each do |f|
|
|
768
|
-
f = f.strip
|
|
769
|
-
if f =~ FILTER_WITH_ARGS_RE
|
|
770
|
-
name = Regexp.last_match(1)
|
|
771
|
-
raw_args = Regexp.last_match(2).strip
|
|
772
|
-
args = raw_args.empty? ? [] : parse_args(raw_args)
|
|
773
|
-
filters << [name, args]
|
|
774
|
-
else
|
|
775
|
-
filters << [f.strip, []]
|
|
776
|
-
end
|
|
777
|
-
end
|
|
778
|
-
|
|
779
|
-
result = [variable, filters].freeze
|
|
780
|
-
@filter_chain_cache[expr] = result
|
|
781
|
-
result
|
|
782
|
-
end
|
|
783
|
-
|
|
784
|
-
# Split expression on | but not inside quotes or parens.
|
|
785
|
-
def split_on_pipe(expr)
|
|
786
|
-
parts = []
|
|
787
|
-
current = +""
|
|
788
|
-
in_quote = nil
|
|
789
|
-
depth = 0
|
|
790
|
-
|
|
791
|
-
expr.each_char do |ch|
|
|
792
|
-
if in_quote
|
|
793
|
-
current << ch
|
|
794
|
-
in_quote = nil if ch == in_quote
|
|
795
|
-
elsif ch == '"' || ch == "'"
|
|
796
|
-
in_quote = ch
|
|
797
|
-
current << ch
|
|
798
|
-
elsif ch == "("
|
|
799
|
-
depth += 1
|
|
800
|
-
current << ch
|
|
801
|
-
elsif ch == ")"
|
|
802
|
-
depth -= 1
|
|
803
|
-
current << ch
|
|
804
|
-
elsif ch == "|" && depth == 0
|
|
805
|
-
parts << current
|
|
806
|
-
current = +""
|
|
807
|
-
else
|
|
808
|
-
current << ch
|
|
809
|
-
end
|
|
810
|
-
end
|
|
811
|
-
parts << current unless current.empty?
|
|
812
|
-
parts
|
|
813
|
-
end
|
|
814
|
-
|
|
815
|
-
def parse_args(raw)
|
|
816
|
-
args = []
|
|
817
|
-
current = +""
|
|
818
|
-
in_quote = nil
|
|
819
|
-
depth = 0
|
|
820
|
-
|
|
821
|
-
raw.each_char do |ch|
|
|
822
|
-
if in_quote
|
|
823
|
-
if ch == in_quote
|
|
824
|
-
in_quote = nil
|
|
825
|
-
end
|
|
826
|
-
current << ch
|
|
827
|
-
elsif ch == '"' || ch == "'"
|
|
828
|
-
in_quote = ch
|
|
829
|
-
current << ch
|
|
830
|
-
elsif ch == "(" || ch == "{" || ch == "["
|
|
831
|
-
depth += 1
|
|
832
|
-
current << ch
|
|
833
|
-
elsif ch == ")" || ch == "}" || ch == "]"
|
|
834
|
-
depth -= 1
|
|
835
|
-
current << ch
|
|
836
|
-
elsif ch == "," && depth == 0
|
|
837
|
-
args << current.strip
|
|
838
|
-
current = +""
|
|
839
|
-
else
|
|
840
|
-
current << ch
|
|
841
|
-
end
|
|
842
|
-
end
|
|
843
|
-
args << current.strip unless current.strip.empty?
|
|
844
|
-
args
|
|
845
|
-
end
|
|
846
|
-
|
|
847
|
-
# -----------------------------------------------------------------------
|
|
848
|
-
# Expression evaluator
|
|
849
|
-
# -----------------------------------------------------------------------
|
|
850
|
-
|
|
851
|
-
# ── Expression evaluator (dispatcher) ──────────────────────────────
|
|
852
|
-
# Each expression type is handled by a focused helper method.
|
|
853
|
-
# Helpers return :not_matched when the expression doesn't match their
|
|
854
|
-
# type, so the dispatcher falls through to the next handler.
|
|
855
|
-
|
|
856
|
-
def eval_expr(expr, context)
|
|
857
|
-
expr = expr.strip
|
|
858
|
-
return nil if expr.empty?
|
|
859
|
-
|
|
860
|
-
result = eval_literal(expr)
|
|
861
|
-
return result unless result == :not_literal
|
|
862
|
-
|
|
863
|
-
result = eval_collection_literal(expr, context)
|
|
864
|
-
return result unless result == :not_collection
|
|
865
|
-
|
|
866
|
-
return eval_expr(expr[1..-2], context) if matched_parens?(expr)
|
|
867
|
-
|
|
868
|
-
result = eval_ternary(expr, context)
|
|
869
|
-
return result unless result == :not_ternary
|
|
870
|
-
|
|
871
|
-
result = eval_inline_if(expr, context)
|
|
872
|
-
return result unless result == :not_inline_if
|
|
873
|
-
|
|
874
|
-
result = eval_null_coalesce(expr, context)
|
|
875
|
-
return result unless result == :not_coalesce
|
|
876
|
-
|
|
877
|
-
result = eval_concat(expr, context)
|
|
878
|
-
return result unless result == :not_concat
|
|
879
|
-
|
|
880
|
-
return eval_comparison(expr, context) if has_comparison?(expr)
|
|
881
|
-
|
|
882
|
-
result = eval_arithmetic(expr, context)
|
|
883
|
-
return result unless result == :not_arithmetic
|
|
884
|
-
|
|
885
|
-
result = eval_function_call(expr, context)
|
|
886
|
-
return result unless result == :not_function
|
|
887
|
-
|
|
888
|
-
resolve(expr, context)
|
|
889
|
-
end
|
|
890
|
-
|
|
891
|
-
# ── Literal values: strings, numbers, booleans, null ──
|
|
892
|
-
|
|
893
|
-
def eval_literal(expr)
|
|
894
|
-
if (expr.start_with?('"') && expr.end_with?('"')) ||
|
|
895
|
-
(expr.start_with?("'") && expr.end_with?("'"))
|
|
896
|
-
return expr[1..-2]
|
|
897
|
-
end
|
|
898
|
-
return expr.to_i if expr =~ INTEGER_RE
|
|
899
|
-
return expr.to_f if expr =~ FLOAT_RE
|
|
900
|
-
return true if expr == "true"
|
|
901
|
-
return false if expr == "false"
|
|
902
|
-
return nil if expr == "null" || expr == "none" || expr == "nil"
|
|
903
|
-
:not_literal
|
|
904
|
-
end
|
|
905
|
-
|
|
906
|
-
# ── Collection literals: arrays, hashes, ranges ──
|
|
907
|
-
|
|
908
|
-
def eval_collection_literal(expr, context)
|
|
909
|
-
if expr =~ ARRAY_LIT_RE
|
|
910
|
-
inner = Regexp.last_match(1)
|
|
911
|
-
return split_args_toplevel(inner).map { |item| eval_expr(item.strip, context) }
|
|
912
|
-
end
|
|
913
|
-
if expr =~ HASH_LIT_RE
|
|
914
|
-
inner = Regexp.last_match(1)
|
|
915
|
-
hash = {}
|
|
916
|
-
split_args_toplevel(inner).each do |pair|
|
|
917
|
-
if pair =~ HASH_PAIR_RE
|
|
918
|
-
key = Regexp.last_match(1) || Regexp.last_match(2)
|
|
919
|
-
hash[key] = eval_expr(Regexp.last_match(3).strip, context)
|
|
920
|
-
end
|
|
921
|
-
end
|
|
922
|
-
return hash
|
|
923
|
-
end
|
|
924
|
-
if expr =~ RANGE_LIT_RE
|
|
925
|
-
return (Regexp.last_match(1).to_i..Regexp.last_match(2).to_i).to_a
|
|
926
|
-
end
|
|
927
|
-
:not_collection
|
|
928
|
-
end
|
|
929
|
-
|
|
930
|
-
# ── Parenthesized sub-expression check ──
|
|
931
|
-
|
|
932
|
-
def matched_parens?(expr)
|
|
933
|
-
return false unless expr.start_with?("(") && expr.end_with?(")")
|
|
934
|
-
depth = 0
|
|
935
|
-
expr.each_char.with_index do |ch, pi|
|
|
936
|
-
depth += 1 if ch == "("
|
|
937
|
-
depth -= 1 if ch == ")"
|
|
938
|
-
return false if depth == 0 && pi < expr.length - 1
|
|
939
|
-
end
|
|
940
|
-
true
|
|
941
|
-
end
|
|
942
|
-
|
|
943
|
-
# ── Ternary: condition ? "yes" : "no" ──
|
|
944
|
-
|
|
945
|
-
def eval_ternary(expr, context)
|
|
946
|
-
q_pos = find_outside_quotes(expr, "?")
|
|
947
|
-
return :not_ternary unless q_pos && q_pos > 0
|
|
948
|
-
cond_part = expr[0...q_pos].strip
|
|
949
|
-
rest = expr[(q_pos + 1)..]
|
|
950
|
-
c_pos = find_outside_quotes(rest, ":")
|
|
951
|
-
return :not_ternary unless c_pos && c_pos >= 0
|
|
952
|
-
true_part = rest[0...c_pos].strip
|
|
953
|
-
false_part = rest[(c_pos + 1)..].strip
|
|
954
|
-
cond = eval_expr(cond_part, context)
|
|
955
|
-
truthy?(cond) ? eval_expr(true_part, context) : eval_expr(false_part, context)
|
|
956
|
-
end
|
|
957
|
-
|
|
958
|
-
# ── Inline if: value if condition else other_value ──
|
|
959
|
-
|
|
960
|
-
def eval_inline_if(expr, context)
|
|
961
|
-
if_pos = find_outside_quotes(expr, " if ")
|
|
962
|
-
return :not_inline_if unless if_pos && if_pos >= 0
|
|
963
|
-
else_pos = find_outside_quotes(expr, " else ")
|
|
964
|
-
return :not_inline_if unless else_pos && else_pos > if_pos
|
|
965
|
-
value_part = expr[0...if_pos].strip
|
|
966
|
-
cond_part = expr[(if_pos + 4)...else_pos].strip
|
|
967
|
-
else_part = expr[(else_pos + 6)..].strip
|
|
968
|
-
cond = eval_expr(cond_part, context)
|
|
969
|
-
truthy?(cond) ? eval_expr(value_part, context) : eval_expr(else_part, context)
|
|
970
|
-
end
|
|
971
|
-
|
|
972
|
-
# ── Null coalescing: value ?? "default" ──
|
|
973
|
-
|
|
974
|
-
def eval_null_coalesce(expr, context)
|
|
975
|
-
return :not_coalesce unless expr.include?("??")
|
|
976
|
-
left, _, right = expr.partition("??")
|
|
977
|
-
val = eval_expr(left.strip, context)
|
|
978
|
-
val.nil? ? eval_expr(right.strip, context) : val
|
|
979
|
-
end
|
|
980
|
-
|
|
981
|
-
# ── String concatenation: a ~ b ──
|
|
982
|
-
|
|
983
|
-
def eval_concat(expr, context)
|
|
984
|
-
return :not_concat unless expr.include?("~")
|
|
985
|
-
parts = expr.split("~")
|
|
986
|
-
parts.map { |p| (eval_expr(p.strip, context) || "").to_s }.join
|
|
987
|
-
end
|
|
988
|
-
|
|
989
|
-
# ── Arithmetic: +, -, *, //, /, %, ** ──
|
|
990
|
-
|
|
991
|
-
def eval_arithmetic(expr, context)
|
|
992
|
-
ARITHMETIC_OPS.each do |op|
|
|
993
|
-
pos = find_outside_quotes(expr, op)
|
|
994
|
-
next unless pos && pos >= 0
|
|
995
|
-
l_val = eval_expr(expr[0...pos].strip, context)
|
|
996
|
-
r_val = eval_expr(expr[(pos + op.length)..].strip, context)
|
|
997
|
-
return apply_math(l_val, op.strip, r_val)
|
|
998
|
-
end
|
|
999
|
-
:not_arithmetic
|
|
1000
|
-
end
|
|
1001
|
-
|
|
1002
|
-
# ── Function call: name(arg1, arg2) ──
|
|
1003
|
-
|
|
1004
|
-
def eval_function_call(expr, context)
|
|
1005
|
-
return :not_function unless expr =~ FUNC_CALL_RE
|
|
1006
|
-
fn_name = Regexp.last_match(1)
|
|
1007
|
-
raw_args = Regexp.last_match(2).strip
|
|
1008
|
-
fn = context[fn_name]
|
|
1009
|
-
return :not_function unless fn.respond_to?(:call)
|
|
1010
|
-
args = raw_args.empty? ? [] : split_args_toplevel(raw_args).map { |a| eval_expr(a.strip, context) }
|
|
1011
|
-
fn.call(*args)
|
|
1012
|
-
end
|
|
1013
|
-
|
|
1014
|
-
def has_comparison?(expr)
|
|
1015
|
-
[" not in ", " in ", " is not ", " is ", "!=", "==", ">=", "<=", ">", "<",
|
|
1016
|
-
" and ", " or ", " not "].any? { |op| expr.include?(op) }
|
|
1017
|
-
end
|
|
1018
|
-
|
|
1019
|
-
# Split comma-separated args at top level (not inside quotes/parens/brackets).
|
|
1020
|
-
def split_args_toplevel(str)
|
|
1021
|
-
parts = []
|
|
1022
|
-
current = +""
|
|
1023
|
-
in_quote = nil
|
|
1024
|
-
depth = 0
|
|
1025
|
-
|
|
1026
|
-
str.each_char do |ch|
|
|
1027
|
-
if in_quote
|
|
1028
|
-
current << ch
|
|
1029
|
-
in_quote = nil if ch == in_quote
|
|
1030
|
-
elsif ch == '"' || ch == "'"
|
|
1031
|
-
in_quote = ch
|
|
1032
|
-
current << ch
|
|
1033
|
-
elsif ch == "(" || ch == "[" || ch == "{"
|
|
1034
|
-
depth += 1
|
|
1035
|
-
current << ch
|
|
1036
|
-
elsif ch == ")" || ch == "]" || ch == "}"
|
|
1037
|
-
depth -= 1
|
|
1038
|
-
current << ch
|
|
1039
|
-
elsif ch == "," && depth == 0
|
|
1040
|
-
parts << current.strip
|
|
1041
|
-
current = +""
|
|
1042
|
-
else
|
|
1043
|
-
current << ch
|
|
1044
|
-
end
|
|
1045
|
-
end
|
|
1046
|
-
parts << current.strip unless current.strip.empty?
|
|
1047
|
-
parts
|
|
1048
|
-
end
|
|
1049
|
-
|
|
1050
|
-
# -----------------------------------------------------------------------
|
|
1051
|
-
# Comparison / logical evaluator
|
|
1052
|
-
# -----------------------------------------------------------------------
|
|
1053
|
-
|
|
1054
|
-
def eval_comparison(expr, context, eval_fn = nil)
|
|
1055
|
-
eval_fn ||= method(:eval_expr)
|
|
1056
|
-
expr = expr.strip
|
|
1057
|
-
|
|
1058
|
-
# Handle 'not' prefix
|
|
1059
|
-
if expr.start_with?("not ")
|
|
1060
|
-
return !eval_comparison(expr[4..], context, eval_fn)
|
|
1061
|
-
end
|
|
1062
|
-
|
|
1063
|
-
# 'or' (lowest precedence)
|
|
1064
|
-
or_parts = expr.split(OR_SPLIT_RE)
|
|
1065
|
-
if or_parts.length > 1
|
|
1066
|
-
return or_parts.any? { |p| eval_comparison(p, context, eval_fn) }
|
|
1067
|
-
end
|
|
1068
|
-
|
|
1069
|
-
# 'and'
|
|
1070
|
-
and_parts = expr.split(AND_SPLIT_RE)
|
|
1071
|
-
if and_parts.length > 1
|
|
1072
|
-
return and_parts.all? { |p| eval_comparison(p, context, eval_fn) }
|
|
1073
|
-
end
|
|
1074
|
-
|
|
1075
|
-
# 'is not' test
|
|
1076
|
-
if expr =~ IS_NOT_RE
|
|
1077
|
-
return !eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
|
|
1078
|
-
Regexp.last_match(3).strip, context, eval_fn)
|
|
1079
|
-
end
|
|
1080
|
-
|
|
1081
|
-
# 'is' test
|
|
1082
|
-
if expr =~ IS_RE
|
|
1083
|
-
return eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
|
|
1084
|
-
Regexp.last_match(3).strip, context, eval_fn)
|
|
1085
|
-
end
|
|
1086
|
-
|
|
1087
|
-
# 'not in'
|
|
1088
|
-
if expr =~ NOT_IN_RE
|
|
1089
|
-
val = eval_fn.call(Regexp.last_match(1).strip, context)
|
|
1090
|
-
collection = eval_fn.call(Regexp.last_match(2).strip, context)
|
|
1091
|
-
return !(collection.respond_to?(:include?) && collection.include?(val))
|
|
1092
|
-
end
|
|
1093
|
-
|
|
1094
|
-
# 'in'
|
|
1095
|
-
if expr =~ IN_RE
|
|
1096
|
-
val = eval_fn.call(Regexp.last_match(1).strip, context)
|
|
1097
|
-
collection = eval_fn.call(Regexp.last_match(2).strip, context)
|
|
1098
|
-
return collection.respond_to?(:include?) ? collection.include?(val) : false
|
|
1099
|
-
end
|
|
1100
|
-
|
|
1101
|
-
# Binary comparison operators
|
|
1102
|
-
[["!=", ->(a, b) { a != b }],
|
|
1103
|
-
["==", ->(a, b) { a == b }],
|
|
1104
|
-
[">=", ->(a, b) { a.to_f >= b.to_f }],
|
|
1105
|
-
["<=", ->(a, b) { a.to_f <= b.to_f }],
|
|
1106
|
-
[">", ->(a, b) { a.to_f > b.to_f }],
|
|
1107
|
-
["<", ->(a, b) { a.to_f < b.to_f }]].each do |op, fn|
|
|
1108
|
-
if expr.include?(op)
|
|
1109
|
-
left, _, right = expr.partition(op)
|
|
1110
|
-
l = eval_fn.call(left.strip, context)
|
|
1111
|
-
r = eval_fn.call(right.strip, context)
|
|
1112
|
-
begin
|
|
1113
|
-
return fn.call(l, r)
|
|
1114
|
-
rescue
|
|
1115
|
-
return false
|
|
1116
|
-
end
|
|
1117
|
-
end
|
|
1118
|
-
end
|
|
1119
|
-
|
|
1120
|
-
# Fall through to simple eval
|
|
1121
|
-
val = eval_fn.call(expr, context)
|
|
1122
|
-
truthy?(val)
|
|
1123
|
-
end
|
|
1124
|
-
|
|
1125
|
-
# -----------------------------------------------------------------------
|
|
1126
|
-
# Tests ('is' expressions)
|
|
1127
|
-
# -----------------------------------------------------------------------
|
|
1128
|
-
|
|
1129
|
-
def eval_test(value_expr, test_name, args_str, context, eval_fn = nil)
|
|
1130
|
-
eval_fn ||= method(:eval_expr)
|
|
1131
|
-
val = eval_fn.call(value_expr, context)
|
|
1132
|
-
|
|
1133
|
-
# 'divisible by(n)'
|
|
1134
|
-
if test_name == "divisible"
|
|
1135
|
-
if args_str =~ DIVISIBLE_BY_RE
|
|
1136
|
-
n = Regexp.last_match(1).to_i
|
|
1137
|
-
return val.is_a?(Integer) && (val % n).zero?
|
|
1138
|
-
end
|
|
1139
|
-
return false
|
|
1140
|
-
end
|
|
1141
|
-
|
|
1142
|
-
# Check custom tests first
|
|
1143
|
-
custom = @tests[test_name]
|
|
1144
|
-
return custom.call(val) if custom
|
|
1145
|
-
|
|
1146
|
-
false
|
|
1147
|
-
end
|
|
1148
|
-
|
|
1149
|
-
def default_tests
|
|
1150
|
-
{
|
|
1151
|
-
"defined" => ->(v) { !v.nil? },
|
|
1152
|
-
"empty" => ->(v) { v.nil? || (v.respond_to?(:empty?) && v.empty?) || v == 0 || v == false },
|
|
1153
|
-
"null" => ->(v) { v.nil? },
|
|
1154
|
-
"none" => ->(v) { v.nil? },
|
|
1155
|
-
"even" => ->(v) { v.is_a?(Integer) && v.even? },
|
|
1156
|
-
"odd" => ->(v) { v.is_a?(Integer) && v.odd? },
|
|
1157
|
-
"iterable" => ->(v) { v.respond_to?(:each) && !v.is_a?(String) },
|
|
1158
|
-
"string" => ->(v) { v.is_a?(String) },
|
|
1159
|
-
"number" => ->(v) { v.is_a?(Numeric) },
|
|
1160
|
-
"boolean" => ->(v) { v.is_a?(TrueClass) || v.is_a?(FalseClass) },
|
|
1161
|
-
}
|
|
1162
|
-
end
|
|
1163
|
-
|
|
1164
|
-
# -----------------------------------------------------------------------
|
|
1165
|
-
# Variable resolver
|
|
1166
|
-
# -----------------------------------------------------------------------
|
|
1167
|
-
|
|
1168
|
-
def resolve(expr, context)
|
|
1169
|
-
parts = @resolve_cache[expr]
|
|
1170
|
-
unless parts
|
|
1171
|
-
parts = expr.split(RESOLVE_SPLIT_RE).reject(&:empty?)
|
|
1172
|
-
@resolve_cache[expr] = parts
|
|
1173
|
-
end
|
|
1174
|
-
|
|
1175
|
-
value = context
|
|
1176
|
-
|
|
1177
|
-
parts.each do |part|
|
|
1178
|
-
part = part.strip.gsub(RESOLVE_STRIP_RE, "") # strip quotes from bracket access
|
|
1179
|
-
if value.is_a?(Hash) || value.is_a?(LoopContext)
|
|
1180
|
-
value = value[part] || value[part.to_sym]
|
|
1181
|
-
elsif value.is_a?(Array)
|
|
1182
|
-
# Slice syntax: value[1:5], value[:10], value[start:end]
|
|
1183
|
-
if part.include?(":") && !(part.start_with?('"') || part.start_with?("'"))
|
|
1184
|
-
slice_parts = part.split(":", 2)
|
|
1185
|
-
s_start = slice_parts[0].strip.empty? ? nil : eval_expr(slice_parts[0].strip, context).to_i
|
|
1186
|
-
s_end = slice_parts[1].strip.empty? ? nil : eval_expr(slice_parts[1].strip, context).to_i
|
|
1187
|
-
if s_start && s_end
|
|
1188
|
-
value = value[s_start...s_end]
|
|
1189
|
-
elsif s_start
|
|
1190
|
-
value = value[s_start..]
|
|
1191
|
-
elsif s_end
|
|
1192
|
-
value = value[0...s_end]
|
|
1193
|
-
else
|
|
1194
|
-
value = value.dup
|
|
1195
|
-
end
|
|
1196
|
-
next
|
|
1197
|
-
end
|
|
1198
|
-
idx = if part =~ DIGIT_RE
|
|
1199
|
-
part.to_i
|
|
1200
|
-
else
|
|
1201
|
-
eval_expr(part, context)
|
|
1202
|
-
end
|
|
1203
|
-
idx = idx.to_i if idx.is_a?(Numeric)
|
|
1204
|
-
value = idx.is_a?(Integer) ? value[idx] : nil
|
|
1205
|
-
elsif value.respond_to?(part.to_sym)
|
|
1206
|
-
value = value.send(part.to_sym)
|
|
1207
|
-
else
|
|
1208
|
-
return nil
|
|
1209
|
-
end
|
|
1210
|
-
return nil if value.nil?
|
|
1211
|
-
end
|
|
1212
|
-
|
|
1213
|
-
value
|
|
1214
|
-
end
|
|
1215
|
-
|
|
1216
|
-
# -----------------------------------------------------------------------
|
|
1217
|
-
# Math
|
|
1218
|
-
# -----------------------------------------------------------------------
|
|
1219
|
-
|
|
1220
|
-
def apply_math(left, op, right)
|
|
1221
|
-
l = (left || 0).to_f
|
|
1222
|
-
r = (right || 0).to_f
|
|
1223
|
-
# Preserve int type when both operands are int-like (except for / which returns float)
|
|
1224
|
-
both_int = l == l.to_i && r == r.to_i && op != "/"
|
|
1225
|
-
result = case op
|
|
1226
|
-
when "+" then l + r
|
|
1227
|
-
when "-" then l - r
|
|
1228
|
-
when "*" then l * r
|
|
1229
|
-
when "/" then r != 0 ? l / r : 0
|
|
1230
|
-
when "//" then r != 0 ? (l / r).floor : 0
|
|
1231
|
-
when "%" then r != 0 ? l % r : 0
|
|
1232
|
-
when "**" then l ** r
|
|
1233
|
-
else 0
|
|
1234
|
-
end
|
|
1235
|
-
both_int && result == result.to_i ? result.to_i : result.to_f == result.to_i ? result.to_i : result
|
|
1236
|
-
end
|
|
1237
|
-
|
|
1238
|
-
# -----------------------------------------------------------------------
|
|
1239
|
-
# Block handlers
|
|
1240
|
-
# -----------------------------------------------------------------------
|
|
1241
|
-
|
|
1242
|
-
# {% if %}...{% elseif %}...{% else %}...{% endif %}
|
|
1243
|
-
def handle_if(tokens, start, context)
|
|
1244
|
-
content, _, strip_a_open = strip_tag(tokens[start][1])
|
|
1245
|
-
condition_expr = content.sub(/\Aif\s+/, "").strip
|
|
1246
|
-
|
|
1247
|
-
branches = []
|
|
1248
|
-
current_tokens = []
|
|
1249
|
-
current_cond = condition_expr
|
|
1250
|
-
depth = 0
|
|
1251
|
-
i = start + 1
|
|
1252
|
-
|
|
1253
|
-
# If the opening {%- if -%} has strip_after, lstrip the first body text
|
|
1254
|
-
pending_lstrip = strip_a_open
|
|
1255
|
-
|
|
1256
|
-
while i < tokens.length
|
|
1257
|
-
ttype, raw = tokens[i]
|
|
1258
|
-
if ttype == BLOCK
|
|
1259
|
-
tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
|
|
1260
|
-
tag = tag_content.split[0] || ""
|
|
1261
|
-
|
|
1262
|
-
if tag == "if"
|
|
1263
|
-
depth += 1
|
|
1264
|
-
current_tokens << tokens[i]
|
|
1265
|
-
elsif tag == "endif" && depth > 0
|
|
1266
|
-
depth -= 1
|
|
1267
|
-
current_tokens << tokens[i]
|
|
1268
|
-
elsif tag == "endif" && depth == 0
|
|
1269
|
-
# Apply strip_before from endif to last body token
|
|
1270
|
-
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1271
|
-
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1272
|
-
end
|
|
1273
|
-
branches << [current_cond, current_tokens]
|
|
1274
|
-
i += 1
|
|
1275
|
-
break
|
|
1276
|
-
elsif (tag == "elseif" || tag == "elif") && depth == 0
|
|
1277
|
-
# Apply strip_before from elseif to last body token
|
|
1278
|
-
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1279
|
-
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1280
|
-
end
|
|
1281
|
-
branches << [current_cond, current_tokens]
|
|
1282
|
-
current_cond = tag_content.sub(/\A(?:elseif|elif)\s+/, "").strip
|
|
1283
|
-
current_tokens = []
|
|
1284
|
-
pending_lstrip = strip_a_tag
|
|
1285
|
-
elsif tag == "else" && depth == 0
|
|
1286
|
-
# Apply strip_before from else to last body token
|
|
1287
|
-
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1288
|
-
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1289
|
-
end
|
|
1290
|
-
branches << [current_cond, current_tokens]
|
|
1291
|
-
current_cond = nil
|
|
1292
|
-
current_tokens = []
|
|
1293
|
-
pending_lstrip = strip_a_tag
|
|
1294
|
-
else
|
|
1295
|
-
current_tokens << tokens[i]
|
|
1296
|
-
end
|
|
1297
|
-
else
|
|
1298
|
-
tok = tokens[i]
|
|
1299
|
-
if pending_lstrip && ttype == TEXT
|
|
1300
|
-
tok = [TEXT, tok[1].lstrip]
|
|
1301
|
-
pending_lstrip = false
|
|
1302
|
-
end
|
|
1303
|
-
current_tokens << tok
|
|
1304
|
-
end
|
|
1305
|
-
i += 1
|
|
1306
|
-
end
|
|
1307
|
-
|
|
1308
|
-
branches.each do |cond, branch_tokens|
|
|
1309
|
-
if cond.nil? || eval_comparison(cond, context, method(:eval_var_raw))
|
|
1310
|
-
return [render_tokens(branch_tokens.dup, context), i]
|
|
1311
|
-
end
|
|
1312
|
-
end
|
|
1313
|
-
|
|
1314
|
-
["", i]
|
|
1315
|
-
end
|
|
1316
|
-
|
|
1317
|
-
# {% for item in items %}...{% else %}...{% endfor %}
|
|
1318
|
-
def handle_for(tokens, start, context)
|
|
1319
|
-
content, _, strip_a_open = strip_tag(tokens[start][1])
|
|
1320
|
-
m = content.match(FOR_RE)
|
|
1321
|
-
return ["", start + 1] unless m
|
|
1322
|
-
|
|
1323
|
-
var1 = m[1]
|
|
1324
|
-
var2 = m[2]
|
|
1325
|
-
iterable_expr = m[3].strip
|
|
1326
|
-
|
|
1327
|
-
body_tokens = []
|
|
1328
|
-
else_tokens = []
|
|
1329
|
-
in_else = false
|
|
1330
|
-
for_depth = 0
|
|
1331
|
-
if_depth = 0
|
|
1332
|
-
i = start + 1
|
|
1333
|
-
pending_lstrip = strip_a_open
|
|
1334
|
-
|
|
1335
|
-
while i < tokens.length
|
|
1336
|
-
ttype, raw = tokens[i]
|
|
1337
|
-
if ttype == BLOCK
|
|
1338
|
-
tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
|
|
1339
|
-
tag = tag_content.split[0] || ""
|
|
1340
|
-
|
|
1341
|
-
if tag == "for"
|
|
1342
|
-
for_depth += 1
|
|
1343
|
-
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1344
|
-
elsif tag == "endfor" && for_depth > 0
|
|
1345
|
-
for_depth -= 1
|
|
1346
|
-
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1347
|
-
elsif tag == "endfor" && for_depth == 0
|
|
1348
|
-
target = in_else ? else_tokens : body_tokens
|
|
1349
|
-
if strip_b_tag && !target.empty? && target[-1][0] == TEXT
|
|
1350
|
-
target[-1] = [TEXT, target[-1][1].rstrip]
|
|
1351
|
-
end
|
|
1352
|
-
i += 1
|
|
1353
|
-
break
|
|
1354
|
-
elsif tag == "if"
|
|
1355
|
-
if_depth += 1
|
|
1356
|
-
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1357
|
-
elsif tag == "endif"
|
|
1358
|
-
if_depth -= 1
|
|
1359
|
-
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1360
|
-
elsif tag == "else" && for_depth == 0 && if_depth == 0
|
|
1361
|
-
if strip_b_tag && !body_tokens.empty? && body_tokens[-1][0] == TEXT
|
|
1362
|
-
body_tokens[-1] = [TEXT, body_tokens[-1][1].rstrip]
|
|
1363
|
-
end
|
|
1364
|
-
in_else = true
|
|
1365
|
-
pending_lstrip = strip_a_tag
|
|
1366
|
-
else
|
|
1367
|
-
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1368
|
-
end
|
|
1369
|
-
else
|
|
1370
|
-
tok = tokens[i]
|
|
1371
|
-
if pending_lstrip && ttype == TEXT
|
|
1372
|
-
tok = [TEXT, tok[1].lstrip]
|
|
1373
|
-
pending_lstrip = false
|
|
1374
|
-
end
|
|
1375
|
-
(in_else ? else_tokens : body_tokens) << tok
|
|
1376
|
-
end
|
|
1377
|
-
i += 1
|
|
1378
|
-
end
|
|
1379
|
-
|
|
1380
|
-
iterable = eval_expr(iterable_expr, context)
|
|
1381
|
-
|
|
1382
|
-
if iterable.nil? || (iterable.respond_to?(:empty?) && iterable.empty?)
|
|
1383
|
-
if else_tokens.any?
|
|
1384
|
-
return [render_tokens(else_tokens.dup, context), i]
|
|
1385
|
-
end
|
|
1386
|
-
return ["", i]
|
|
1387
|
-
end
|
|
1388
|
-
|
|
1389
|
-
output = []
|
|
1390
|
-
items = iterable.is_a?(Hash) ? iterable.to_a : Array(iterable)
|
|
1391
|
-
total = items.length
|
|
1392
|
-
|
|
1393
|
-
items.each_with_index do |item, idx|
|
|
1394
|
-
loop_ctx = LoopContext.new(context)
|
|
1395
|
-
loop_ctx["loop"] = {
|
|
1396
|
-
"index" => idx + 1,
|
|
1397
|
-
"index0" => idx,
|
|
1398
|
-
"first" => idx == 0,
|
|
1399
|
-
"last" => idx == total - 1,
|
|
1400
|
-
"length" => total,
|
|
1401
|
-
"revindex" => total - idx,
|
|
1402
|
-
"revindex0" => total - idx - 1,
|
|
1403
|
-
"even" => ((idx + 1) % 2).zero?,
|
|
1404
|
-
"odd" => ((idx + 1) % 2) != 0,
|
|
1405
|
-
}
|
|
1406
|
-
|
|
1407
|
-
if iterable.is_a?(Hash)
|
|
1408
|
-
key, value = item
|
|
1409
|
-
if var2
|
|
1410
|
-
loop_ctx[var1] = key
|
|
1411
|
-
loop_ctx[var2] = value
|
|
1412
|
-
else
|
|
1413
|
-
loop_ctx[var1] = key
|
|
1414
|
-
end
|
|
1415
|
-
else
|
|
1416
|
-
if var2
|
|
1417
|
-
loop_ctx[var1] = idx
|
|
1418
|
-
loop_ctx[var2] = item
|
|
1419
|
-
else
|
|
1420
|
-
loop_ctx[var1] = item
|
|
1421
|
-
end
|
|
1422
|
-
end
|
|
1423
|
-
|
|
1424
|
-
output << render_tokens(body_tokens.dup, loop_ctx)
|
|
1425
|
-
end
|
|
1426
|
-
|
|
1427
|
-
[output.join, i]
|
|
1428
|
-
end
|
|
1429
|
-
|
|
1430
|
-
# {% set name = expr %}
|
|
1431
|
-
def handle_set(content, context)
|
|
1432
|
-
if content =~ SET_RE
|
|
1433
|
-
name = Regexp.last_match(1)
|
|
1434
|
-
expr = Regexp.last_match(2).strip
|
|
1435
|
-
context[name] = eval_var_raw(expr, context)
|
|
1436
|
-
end
|
|
1437
|
-
end
|
|
1438
|
-
|
|
1439
|
-
# {% include "file.html" %}
|
|
1440
|
-
def handle_include(content, context)
|
|
1441
|
-
ignore_missing = content.include?("ignore missing")
|
|
1442
|
-
content = content.gsub("ignore missing", "").strip
|
|
1443
|
-
|
|
1444
|
-
m = content.match(INCLUDE_RE)
|
|
1445
|
-
return "" unless m
|
|
1446
|
-
|
|
1447
|
-
filename = m[1]
|
|
1448
|
-
with_expr = m[2]
|
|
1449
|
-
|
|
1450
|
-
begin
|
|
1451
|
-
source = load_template(filename)
|
|
1452
|
-
rescue
|
|
1453
|
-
return "" if ignore_missing
|
|
1454
|
-
raise
|
|
1455
|
-
end
|
|
1456
|
-
|
|
1457
|
-
inc_context = context.dup
|
|
1458
|
-
if with_expr
|
|
1459
|
-
extra = eval_expr(with_expr, context)
|
|
1460
|
-
inc_context.merge!(stringify_keys(extra)) if extra.is_a?(Hash)
|
|
1461
|
-
end
|
|
1462
|
-
|
|
1463
|
-
execute(source, inc_context)
|
|
1464
|
-
end
|
|
1465
|
-
|
|
1466
|
-
# {% macro name(args) %}...{% endmacro %}
|
|
1467
|
-
def handle_macro(tokens, start, context)
|
|
1468
|
-
content, _, _ = strip_tag(tokens[start][1])
|
|
1469
|
-
m = content.match(MACRO_RE)
|
|
1470
|
-
unless m
|
|
1471
|
-
i = start + 1
|
|
1472
|
-
while i < tokens.length
|
|
1473
|
-
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1474
|
-
return i + 1
|
|
1475
|
-
end
|
|
1476
|
-
i += 1
|
|
1477
|
-
end
|
|
1478
|
-
return i
|
|
1479
|
-
end
|
|
1480
|
-
|
|
1481
|
-
macro_name = m[1]
|
|
1482
|
-
param_names = m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1483
|
-
|
|
1484
|
-
body_tokens = []
|
|
1485
|
-
i = start + 1
|
|
1486
|
-
while i < tokens.length
|
|
1487
|
-
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1488
|
-
i += 1
|
|
1489
|
-
break
|
|
1490
|
-
end
|
|
1491
|
-
body_tokens << tokens[i]
|
|
1492
|
-
i += 1
|
|
1493
|
-
end
|
|
1494
|
-
|
|
1495
|
-
engine = self
|
|
1496
|
-
captured_body = body_tokens.dup
|
|
1497
|
-
captured_context = context
|
|
1498
|
-
|
|
1499
|
-
context[macro_name] = lambda { |*args|
|
|
1500
|
-
macro_ctx = captured_context.dup
|
|
1501
|
-
param_names.each_with_index do |pname, pi|
|
|
1502
|
-
macro_ctx[pname] = pi < args.length ? args[pi] : nil
|
|
1503
|
-
end
|
|
1504
|
-
Tina4::SafeString.new(engine.send(:render_tokens, captured_body.dup, macro_ctx))
|
|
1505
|
-
}
|
|
1506
|
-
|
|
1507
|
-
i
|
|
1508
|
-
end
|
|
1509
|
-
|
|
1510
|
-
# {% from "file" import macro1, macro2 %}
|
|
1511
|
-
def handle_from_import(content, context)
|
|
1512
|
-
m = content.match(FROM_IMPORT_RE)
|
|
1513
|
-
return unless m
|
|
1514
|
-
|
|
1515
|
-
filename = m[1]
|
|
1516
|
-
names = m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1517
|
-
|
|
1518
|
-
source = load_template(filename)
|
|
1519
|
-
tokens = tokenize(source)
|
|
1520
|
-
|
|
1521
|
-
i = 0
|
|
1522
|
-
while i < tokens.length
|
|
1523
|
-
ttype, raw = tokens[i]
|
|
1524
|
-
if ttype == BLOCK
|
|
1525
|
-
tag_content, _, _ = strip_tag(raw)
|
|
1526
|
-
tag = (tag_content.split[0] || "")
|
|
1527
|
-
if tag == "macro"
|
|
1528
|
-
macro_m = tag_content.match(MACRO_RE)
|
|
1529
|
-
if macro_m && names.include?(macro_m[1])
|
|
1530
|
-
macro_name = macro_m[1]
|
|
1531
|
-
param_names = macro_m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1532
|
-
|
|
1533
|
-
body_tokens = []
|
|
1534
|
-
i += 1
|
|
1535
|
-
while i < tokens.length
|
|
1536
|
-
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1537
|
-
i += 1
|
|
1538
|
-
break
|
|
1539
|
-
end
|
|
1540
|
-
body_tokens << tokens[i]
|
|
1541
|
-
i += 1
|
|
1542
|
-
end
|
|
1543
|
-
|
|
1544
|
-
context[macro_name] = _make_macro_fn(body_tokens.dup, param_names.dup, context.dup)
|
|
1545
|
-
next
|
|
1546
|
-
end
|
|
1547
|
-
end
|
|
1548
|
-
end
|
|
1549
|
-
i += 1
|
|
1550
|
-
end
|
|
1551
|
-
end
|
|
1552
|
-
|
|
1553
|
-
# Build an isolated lambda for a macro — avoids closure-in-loop variable sharing.
|
|
1554
|
-
def _make_macro_fn(body_tokens, param_names, ctx)
|
|
1555
|
-
engine = self
|
|
1556
|
-
lambda { |*args|
|
|
1557
|
-
macro_ctx = ctx.dup
|
|
1558
|
-
param_names.each_with_index do |pname, pi|
|
|
1559
|
-
macro_ctx[pname] = pi < args.length ? args[pi] : nil
|
|
1560
|
-
end
|
|
1561
|
-
Tina4::SafeString.new(engine.send(:render_tokens, body_tokens.dup, macro_ctx))
|
|
1562
|
-
}
|
|
1563
|
-
end
|
|
1564
|
-
|
|
1565
|
-
# {% cache "key" ttl %}...{% endcache %}
|
|
1566
|
-
def handle_cache(tokens, start, context)
|
|
1567
|
-
content, _, _ = strip_tag(tokens[start][1])
|
|
1568
|
-
m = content.match(CACHE_RE)
|
|
1569
|
-
cache_key = m ? m[1] : "default"
|
|
1570
|
-
ttl = m && m[2] ? m[2].to_i : 60
|
|
1571
|
-
|
|
1572
|
-
# Check cache
|
|
1573
|
-
cached = @fragment_cache[cache_key]
|
|
1574
|
-
if cached
|
|
1575
|
-
html_content, expires_at = cached
|
|
1576
|
-
if Time.now.to_f < expires_at
|
|
1577
|
-
# Skip to endcache
|
|
1578
|
-
i = start + 1
|
|
1579
|
-
depth = 0
|
|
1580
|
-
while i < tokens.length
|
|
1581
|
-
if tokens[i][0] == BLOCK
|
|
1582
|
-
tc, _, _ = strip_tag(tokens[i][1])
|
|
1583
|
-
tag = tc.split[0] || ""
|
|
1584
|
-
if tag == "cache"
|
|
1585
|
-
depth += 1
|
|
1586
|
-
elsif tag == "endcache"
|
|
1587
|
-
return [html_content, i + 1] if depth == 0
|
|
1588
|
-
depth -= 1
|
|
1589
|
-
end
|
|
1590
|
-
end
|
|
1591
|
-
i += 1
|
|
1592
|
-
end
|
|
1593
|
-
return [html_content, i]
|
|
1594
|
-
end
|
|
1595
|
-
end
|
|
1596
|
-
|
|
1597
|
-
body_tokens = []
|
|
1598
|
-
i = start + 1
|
|
1599
|
-
depth = 0
|
|
1600
|
-
while i < tokens.length
|
|
1601
|
-
if tokens[i][0] == BLOCK
|
|
1602
|
-
tc, _, _ = strip_tag(tokens[i][1])
|
|
1603
|
-
tag = tc.split[0] || ""
|
|
1604
|
-
if tag == "cache"
|
|
1605
|
-
depth += 1
|
|
1606
|
-
body_tokens << tokens[i]
|
|
1607
|
-
elsif tag == "endcache"
|
|
1608
|
-
if depth == 0
|
|
1609
|
-
i += 1
|
|
1610
|
-
break
|
|
1611
|
-
end
|
|
1612
|
-
depth -= 1
|
|
1613
|
-
body_tokens << tokens[i]
|
|
1614
|
-
else
|
|
1615
|
-
body_tokens << tokens[i]
|
|
1616
|
-
end
|
|
1617
|
-
else
|
|
1618
|
-
body_tokens << tokens[i]
|
|
1619
|
-
end
|
|
1620
|
-
i += 1
|
|
1621
|
-
end
|
|
1622
|
-
|
|
1623
|
-
rendered = render_tokens(body_tokens.dup, context)
|
|
1624
|
-
@fragment_cache[cache_key] = [rendered, Time.now.to_f + ttl]
|
|
1625
|
-
[rendered, i]
|
|
1626
|
-
end
|
|
1627
|
-
|
|
1628
|
-
def handle_spaceless(tokens, start, context)
|
|
1629
|
-
body_tokens = []
|
|
1630
|
-
i = start + 1
|
|
1631
|
-
depth = 0
|
|
1632
|
-
while i < tokens.length
|
|
1633
|
-
if tokens[i][0] == BLOCK
|
|
1634
|
-
tc, _, _ = strip_tag(tokens[i][1])
|
|
1635
|
-
tag = tc.split[0] || ""
|
|
1636
|
-
if tag == "spaceless"
|
|
1637
|
-
depth += 1
|
|
1638
|
-
body_tokens << tokens[i]
|
|
1639
|
-
elsif tag == "endspaceless"
|
|
1640
|
-
if depth == 0
|
|
1641
|
-
i += 1
|
|
1642
|
-
break
|
|
1643
|
-
end
|
|
1644
|
-
depth -= 1
|
|
1645
|
-
body_tokens << tokens[i]
|
|
1646
|
-
else
|
|
1647
|
-
body_tokens << tokens[i]
|
|
1648
|
-
end
|
|
1649
|
-
else
|
|
1650
|
-
body_tokens << tokens[i]
|
|
1651
|
-
end
|
|
1652
|
-
i += 1
|
|
1653
|
-
end
|
|
1654
|
-
|
|
1655
|
-
rendered = render_tokens(body_tokens.dup, context)
|
|
1656
|
-
rendered = rendered.gsub(SPACELESS_RE, "><")
|
|
1657
|
-
[rendered, i]
|
|
1658
|
-
end
|
|
1659
|
-
|
|
1660
|
-
def handle_autoescape(tokens, start, context)
|
|
1661
|
-
content, _, _ = strip_tag(tokens[start][1])
|
|
1662
|
-
mode_match = content.match(AUTOESCAPE_RE)
|
|
1663
|
-
auto_escape_on = !(mode_match && mode_match[1] == "false")
|
|
1664
|
-
|
|
1665
|
-
body_tokens = []
|
|
1666
|
-
i = start + 1
|
|
1667
|
-
depth = 0
|
|
1668
|
-
while i < tokens.length
|
|
1669
|
-
if tokens[i][0] == BLOCK
|
|
1670
|
-
tc, _, _ = strip_tag(tokens[i][1])
|
|
1671
|
-
tag = tc.split[0] || ""
|
|
1672
|
-
if tag == "autoescape"
|
|
1673
|
-
depth += 1
|
|
1674
|
-
body_tokens << tokens[i]
|
|
1675
|
-
elsif tag == "endautoescape"
|
|
1676
|
-
if depth == 0
|
|
1677
|
-
i += 1
|
|
1678
|
-
break
|
|
1679
|
-
end
|
|
1680
|
-
depth -= 1
|
|
1681
|
-
body_tokens << tokens[i]
|
|
1682
|
-
else
|
|
1683
|
-
body_tokens << tokens[i]
|
|
1684
|
-
end
|
|
1685
|
-
else
|
|
1686
|
-
body_tokens << tokens[i]
|
|
1687
|
-
end
|
|
1688
|
-
i += 1
|
|
1689
|
-
end
|
|
1690
|
-
|
|
1691
|
-
if !auto_escape_on
|
|
1692
|
-
old_auto_escape = @auto_escape
|
|
1693
|
-
@auto_escape = false
|
|
1694
|
-
rendered = render_tokens(body_tokens.dup, context)
|
|
1695
|
-
@auto_escape = old_auto_escape
|
|
1696
|
-
else
|
|
1697
|
-
rendered = render_tokens(body_tokens.dup, context)
|
|
1698
|
-
end
|
|
1699
|
-
|
|
1700
|
-
[rendered, i]
|
|
1701
|
-
end
|
|
1702
|
-
|
|
1703
|
-
# -----------------------------------------------------------------------
|
|
1704
|
-
# Helpers
|
|
1705
|
-
# -----------------------------------------------------------------------
|
|
1706
|
-
|
|
1707
|
-
def truthy?(val)
|
|
1708
|
-
return false if val.nil? || val == false || val == 0 || val == ""
|
|
1709
|
-
return false if val.respond_to?(:empty?) && val.empty?
|
|
1710
|
-
true
|
|
1711
|
-
end
|
|
1712
|
-
|
|
1713
|
-
def stringify_keys(hash)
|
|
1714
|
-
return {} unless hash.is_a?(Hash)
|
|
1715
|
-
hash.each_with_object({}) { |(k, v), h| h[k.to_s] = v }
|
|
1716
|
-
end
|
|
1717
|
-
|
|
1718
|
-
# -----------------------------------------------------------------------
|
|
1719
|
-
# Built-in filters (53 total)
|
|
1720
|
-
# -----------------------------------------------------------------------
|
|
1721
|
-
|
|
1722
|
-
def default_filters
|
|
1723
|
-
{
|
|
1724
|
-
# -- Text --
|
|
1725
|
-
"upper" => ->(v, *_a) { v.to_s.upcase },
|
|
1726
|
-
"lower" => ->(v, *_a) { v.to_s.downcase },
|
|
1727
|
-
"capitalize" => ->(v, *_a) { v.to_s.capitalize },
|
|
1728
|
-
"title" => ->(v, *_a) { v.to_s.split.map(&:capitalize).join(" ") },
|
|
1729
|
-
"trim" => ->(v, *_a) { v.to_s.strip },
|
|
1730
|
-
"ltrim" => ->(v, *_a) { v.to_s.lstrip },
|
|
1731
|
-
"rtrim" => ->(v, *_a) { v.to_s.rstrip },
|
|
1732
|
-
"replace" => ->(v, *a) {
|
|
1733
|
-
if a.length == 1 && a[0].is_a?(Hash)
|
|
1734
|
-
result = v.to_s
|
|
1735
|
-
a[0].each { |old, new_val| result = result.gsub(old.to_s, new_val.to_s) }
|
|
1736
|
-
result
|
|
1737
|
-
elsif a.length >= 2
|
|
1738
|
-
v.to_s.gsub(a[0].to_s, a[1].to_s)
|
|
1739
|
-
else
|
|
1740
|
-
v.to_s
|
|
1741
|
-
end
|
|
1742
|
-
},
|
|
1743
|
-
"striptags" => ->(v, *_a) { v.to_s.gsub(STRIPTAGS_RE, "") },
|
|
1744
|
-
|
|
1745
|
-
# -- Encoding --
|
|
1746
|
-
"escape" => ->(v, *_a) { Frond.escape_html(v.to_s) },
|
|
1747
|
-
"e" => ->(v, *_a) { Frond.escape_html(v.to_s) },
|
|
1748
|
-
"raw" => ->(v, *_a) { v },
|
|
1749
|
-
"safe" => ->(v, *_a) { v },
|
|
1750
|
-
"json_encode" => ->(v, *_a) { JSON.generate(v) rescue v.to_s },
|
|
1751
|
-
"json_decode" => ->(v, *_a) { v.is_a?(String) ? (JSON.parse(v) rescue v) : v },
|
|
1752
|
-
"base64_encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
|
|
1753
|
-
"base64encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
|
|
1754
|
-
"base64_decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
|
|
1755
|
-
"base64decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
|
|
1756
|
-
"data_uri" => ->(v, *_a) {
|
|
1757
|
-
if v.is_a?(Hash)
|
|
1758
|
-
ct = v[:type] || v["type"] || "application/octet-stream"
|
|
1759
|
-
raw = v[:content] || v["content"] || ""
|
|
1760
|
-
raw = raw.respond_to?(:read) ? raw.read : raw
|
|
1761
|
-
"data:#{ct};base64,#{Base64.strict_encode64(raw.to_s)}"
|
|
1762
|
-
else
|
|
1763
|
-
v.to_s
|
|
1764
|
-
end
|
|
1765
|
-
},
|
|
1766
|
-
"url_encode" => ->(v, *_a) { CGI.escape(v.to_s) },
|
|
1767
|
-
|
|
1768
|
-
# -- JSON / JS --
|
|
1769
|
-
"to_json" => ->(v, *a) {
|
|
1770
|
-
indent = a[0] ? a[0].to_i : nil
|
|
1771
|
-
json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
|
|
1772
|
-
# Escape <, >, & for safe HTML embedding
|
|
1773
|
-
Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
|
|
1774
|
-
},
|
|
1775
|
-
"tojson" => ->(v, *a) {
|
|
1776
|
-
indent = a[0] ? a[0].to_i : nil
|
|
1777
|
-
json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
|
|
1778
|
-
Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
|
|
1779
|
-
},
|
|
1780
|
-
"js_escape" => ->(v, *_a) {
|
|
1781
|
-
Tina4::SafeString.new(
|
|
1782
|
-
v.to_s.gsub("\\", "\\\\").gsub("'", "\\'").gsub('"', '\\"')
|
|
1783
|
-
.gsub("\n", "\\n").gsub("\r", "\\r").gsub("\t", "\\t")
|
|
1784
|
-
)
|
|
1785
|
-
},
|
|
1786
|
-
|
|
1787
|
-
# -- Hashing --
|
|
1788
|
-
"md5" => ->(v, *_a) { Digest::MD5.hexdigest(v.to_s) },
|
|
1789
|
-
"sha256" => ->(v, *_a) { Digest::SHA256.hexdigest(v.to_s) },
|
|
1790
|
-
|
|
1791
|
-
# -- Numbers --
|
|
1792
|
-
"abs" => ->(v, *_a) { v.is_a?(Numeric) ? v.abs : v.to_f.abs },
|
|
1793
|
-
"round" => ->(v, *a) { v.to_f.round(a[0] ? a[0].to_i : 0) },
|
|
1794
|
-
"int" => ->(v, *_a) { v.to_i },
|
|
1795
|
-
"float" => ->(v, *_a) { v.to_f },
|
|
1796
|
-
"number_format" => ->(v, *a) {
|
|
1797
|
-
decimals = a[0] ? a[0].to_i : 0
|
|
1798
|
-
formatted = format("%.#{decimals}f", v.to_f)
|
|
1799
|
-
# Add comma thousands separator
|
|
1800
|
-
parts = formatted.split(".")
|
|
1801
|
-
parts[0] = parts[0].gsub(THOUSANDS_RE, '\\1,')
|
|
1802
|
-
parts.join(".")
|
|
1803
|
-
},
|
|
1804
|
-
|
|
1805
|
-
# -- Date --
|
|
1806
|
-
"date" => ->(v, *a) {
|
|
1807
|
-
fmt = a[0] || "%Y-%m-%d"
|
|
1808
|
-
begin
|
|
1809
|
-
if v.is_a?(String)
|
|
1810
|
-
dt = DateTime.parse(v)
|
|
1811
|
-
dt.strftime(fmt)
|
|
1812
|
-
elsif v.respond_to?(:strftime)
|
|
1813
|
-
v.strftime(fmt)
|
|
1814
|
-
else
|
|
1815
|
-
v.to_s
|
|
1816
|
-
end
|
|
1817
|
-
rescue
|
|
1818
|
-
v.to_s
|
|
1819
|
-
end
|
|
1820
|
-
},
|
|
1821
|
-
|
|
1822
|
-
# -- Arrays --
|
|
1823
|
-
"length" => ->(v, *_a) { v.respond_to?(:length) ? v.length : v.to_s.length },
|
|
1824
|
-
"first" => ->(v, *_a) { v.respond_to?(:first) ? v.first : (v.to_s[0] rescue nil) },
|
|
1825
|
-
"last" => ->(v, *_a) { v.respond_to?(:last) ? v.last : (v.to_s[-1] rescue nil) },
|
|
1826
|
-
"reverse" => ->(v, *_a) { v.respond_to?(:reverse) ? v.reverse : v.to_s.reverse },
|
|
1827
|
-
"sort" => ->(v, *_a) { v.respond_to?(:sort) ? v.sort : v },
|
|
1828
|
-
"shuffle" => ->(v, *_a) { v.respond_to?(:shuffle) ? v.shuffle : v },
|
|
1829
|
-
"unique" => ->(v, *_a) { v.is_a?(Array) ? v.uniq : v },
|
|
1830
|
-
"join" => ->(v, *a) { v.respond_to?(:join) ? v.join(a[0] || ", ") : v.to_s },
|
|
1831
|
-
"split" => ->(v, *a) { v.to_s.split(a[0] || " ") },
|
|
1832
|
-
"slice" => ->(v, *a) {
|
|
1833
|
-
if a.length >= 2
|
|
1834
|
-
s = a[0].to_i
|
|
1835
|
-
e = a[1].to_i
|
|
1836
|
-
if v.is_a?(Array)
|
|
1837
|
-
v[s...e]
|
|
1838
|
-
else
|
|
1839
|
-
v.to_s[s...e]
|
|
1840
|
-
end
|
|
1841
|
-
else
|
|
1842
|
-
v
|
|
1843
|
-
end
|
|
1844
|
-
},
|
|
1845
|
-
"batch" => ->(v, *a) {
|
|
1846
|
-
if a[0] && v.respond_to?(:each_slice)
|
|
1847
|
-
v.each_slice(a[0].to_i).to_a
|
|
1848
|
-
else
|
|
1849
|
-
[v]
|
|
1850
|
-
end
|
|
1851
|
-
},
|
|
1852
|
-
"map" => ->(v, *a) {
|
|
1853
|
-
if a[0] && v.is_a?(Array)
|
|
1854
|
-
v.map { |item| item.is_a?(Hash) ? (item[a[0]] || item[a[0].to_sym]) : nil }
|
|
1855
|
-
else
|
|
1856
|
-
v
|
|
1857
|
-
end
|
|
1858
|
-
},
|
|
1859
|
-
"filter" => ->(v, *_a) { v.is_a?(Array) ? v.select { |item| item } : v },
|
|
1860
|
-
"column" => ->(v, *a) {
|
|
1861
|
-
if a[0] && v.is_a?(Array)
|
|
1862
|
-
v.map { |row| row.is_a?(Hash) ? (row[a[0]] || row[a[0].to_sym]) : nil }
|
|
1863
|
-
else
|
|
1864
|
-
v
|
|
1865
|
-
end
|
|
1866
|
-
},
|
|
1867
|
-
|
|
1868
|
-
# -- Dict --
|
|
1869
|
-
"keys" => ->(v, *_a) { v.respond_to?(:keys) ? v.keys : [] },
|
|
1870
|
-
"values" => ->(v, *_a) { v.respond_to?(:values) ? v.values : [v] },
|
|
1871
|
-
"merge" => ->(v, *a) {
|
|
1872
|
-
if v.respond_to?(:merge) && a[0].is_a?(Hash)
|
|
1873
|
-
v.merge(a[0])
|
|
1874
|
-
elsif v.is_a?(Array) && a[0].is_a?(Array)
|
|
1875
|
-
v + a[0]
|
|
1876
|
-
else
|
|
1877
|
-
v
|
|
1878
|
-
end
|
|
1879
|
-
},
|
|
1880
|
-
|
|
1881
|
-
# -- Utility --
|
|
1882
|
-
"default" => ->(v, *a) { (v.nil? || v.to_s.empty?) ? (a[0] || "") : v },
|
|
1883
|
-
# dump filter — gated on TINA4_DEBUG=true via Frond.render_dump.
|
|
1884
|
-
# Both the |dump filter and the dump() global delegate to the same
|
|
1885
|
-
# helper so they produce identical output and obey the same gating.
|
|
1886
|
-
"dump" => ->(v, *_a) { Frond.render_dump(v) },
|
|
1887
|
-
"string" => ->(v, *_a) { v.to_s },
|
|
1888
|
-
"truncate" => ->(v, *a) {
|
|
1889
|
-
len = a[0] ? a[0].to_i : 50
|
|
1890
|
-
str = v.to_s
|
|
1891
|
-
str.length > len ? str[0...len] + "..." : str
|
|
1892
|
-
},
|
|
1893
|
-
"wordwrap" => ->(v, *a) {
|
|
1894
|
-
width = a[0] ? a[0].to_i : 75
|
|
1895
|
-
words = v.to_s.split
|
|
1896
|
-
lines = []
|
|
1897
|
-
current = +""
|
|
1898
|
-
words.each do |word|
|
|
1899
|
-
if !current.empty? && current.length + 1 + word.length > width
|
|
1900
|
-
lines << current
|
|
1901
|
-
current = word
|
|
1902
|
-
else
|
|
1903
|
-
current = current.empty? ? word : "#{current} #{word}"
|
|
1904
|
-
end
|
|
1905
|
-
end
|
|
1906
|
-
lines << current unless current.empty?
|
|
1907
|
-
lines.join("\n")
|
|
1908
|
-
},
|
|
1909
|
-
"slug" => ->(v, *_a) { v.to_s.downcase.gsub(SLUG_CLEAN_RE, "-").gsub(SLUG_TRIM_RE, "") },
|
|
1910
|
-
"nl2br" => ->(v, *_a) { v.to_s.gsub("\n", "<br>\n") },
|
|
1911
|
-
"format" => ->(v, *a) {
|
|
1912
|
-
if a.any?
|
|
1913
|
-
v.to_s % a
|
|
1914
|
-
else
|
|
1915
|
-
v.to_s
|
|
1916
|
-
end
|
|
1917
|
-
},
|
|
1918
|
-
"form_token" => ->(_v, *_a) { Frond.generate_form_token(_v.to_s) },
|
|
1919
|
-
}
|
|
1920
|
-
end
|
|
1921
|
-
|
|
1922
|
-
# -----------------------------------------------------------------------
|
|
1923
|
-
# Built-in globals
|
|
1924
|
-
# -----------------------------------------------------------------------
|
|
1925
|
-
|
|
1926
|
-
def register_builtin_globals
|
|
1927
|
-
@globals["form_token"] = ->(descriptor = "") { Frond.generate_form_token(descriptor.to_s) }
|
|
1928
|
-
@globals["formTokenValue"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
|
|
1929
|
-
@globals["form_token_value"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
|
|
1930
|
-
|
|
1931
|
-
# Debug helper: {{ dump(x) }} — gated on TINA4_DEBUG=true.
|
|
1932
|
-
# Both this global and the |dump filter call Frond.render_dump which
|
|
1933
|
-
# returns an empty SafeString in production so dump never leaks state.
|
|
1934
|
-
@globals["dump"] = ->(value = nil) { Frond.render_dump(value) }
|
|
1935
|
-
end
|
|
1936
|
-
|
|
1937
|
-
# Render a value as a pre-formatted inspect() wrapped in <pre> tags.
|
|
1938
|
-
#
|
|
1939
|
-
# Gated on TINA4_DEBUG=true. In production (TINA4_DEBUG unset or false)
|
|
1940
|
-
# this returns an empty SafeString to avoid leaking internal state,
|
|
1941
|
-
# object shapes, or sensitive values into rendered HTML.
|
|
1942
|
-
#
|
|
1943
|
-
# Shared by the {{ value|dump }} filter and the {{ dump(value) }}
|
|
1944
|
-
# global function so both produce identical output and obey the same
|
|
1945
|
-
# gating.
|
|
1946
|
-
def self.render_dump(value)
|
|
1947
|
-
return SafeString.new("") unless ENV.fetch("TINA4_DEBUG", "").downcase == "true"
|
|
1948
|
-
|
|
1949
|
-
dumped = value.inspect
|
|
1950
|
-
escaped = dumped
|
|
1951
|
-
.gsub("&", "&")
|
|
1952
|
-
.gsub("<", "<")
|
|
1953
|
-
.gsub(">", ">")
|
|
1954
|
-
.gsub('"', """)
|
|
1955
|
-
SafeString.new("<pre>#{escaped}</pre>")
|
|
1956
|
-
end
|
|
1957
|
-
|
|
1958
|
-
# Generate a JWT form token and return a hidden input element.
|
|
1959
|
-
#
|
|
1960
|
-
# @param descriptor [String] Optional string to enrich the token payload.
|
|
1961
|
-
# - Empty: payload is {"type" => "form"}
|
|
1962
|
-
# - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
|
|
1963
|
-
# - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
|
|
1964
|
-
#
|
|
1965
|
-
# @return [String] <input type="hidden" name="formToken" value="TOKEN">
|
|
1966
|
-
# Session ID used by generate_form_token for CSRF session binding.
|
|
1967
|
-
# Set this before rendering templates to bind tokens to the current session.
|
|
1968
|
-
@form_token_session_id = ""
|
|
1969
|
-
|
|
1970
|
-
class << self
|
|
1971
|
-
attr_accessor :form_token_session_id
|
|
1972
|
-
|
|
1973
|
-
# Set the session ID used for CSRF form token binding.
|
|
1974
|
-
# Parity with Python/PHP/Node: Frond.set_form_token_session_id(id)
|
|
1975
|
-
#
|
|
1976
|
-
# @param session_id [String] The session ID to bind form tokens to
|
|
1977
|
-
def set_form_token_session_id(session_id)
|
|
1978
|
-
self.form_token_session_id = session_id
|
|
1979
|
-
end
|
|
1980
|
-
end
|
|
1981
|
-
|
|
1982
|
-
# Generate a raw JWT form token string.
|
|
1983
|
-
#
|
|
1984
|
-
# @param descriptor [String] Optional string to enrich the token payload.
|
|
1985
|
-
# - Empty: payload is {"type" => "form"}
|
|
1986
|
-
# - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
|
|
1987
|
-
# - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
|
|
1988
|
-
#
|
|
1989
|
-
# @return [String] The raw JWT token string.
|
|
1990
|
-
def self.generate_form_jwt(descriptor = "")
|
|
1991
|
-
require_relative "log"
|
|
1992
|
-
require_relative "auth"
|
|
1993
|
-
|
|
1994
|
-
payload = { "type" => "form", "nonce" => SecureRandom.hex(8) }
|
|
1995
|
-
if descriptor && !descriptor.empty?
|
|
1996
|
-
if descriptor.include?("|")
|
|
1997
|
-
parts = descriptor.split("|", 2)
|
|
1998
|
-
payload["context"] = parts[0]
|
|
1999
|
-
payload["ref"] = parts[1]
|
|
2000
|
-
else
|
|
2001
|
-
payload["context"] = descriptor
|
|
2002
|
-
end
|
|
2003
|
-
end
|
|
2004
|
-
|
|
2005
|
-
# Include session_id for CSRF session binding
|
|
2006
|
-
sid = form_token_session_id.to_s
|
|
2007
|
-
payload["session_id"] = sid unless sid.empty?
|
|
2008
|
-
|
|
2009
|
-
ttl_minutes = (ENV["TINA4_TOKEN_LIMIT"] || "60").to_i
|
|
2010
|
-
expires_in = ttl_minutes * 60
|
|
2011
|
-
Tina4::Auth.create_token(payload, expires_in: expires_in)
|
|
2012
|
-
end
|
|
2013
|
-
|
|
2014
|
-
def self.generate_form_token(descriptor = "")
|
|
2015
|
-
token = generate_form_jwt(descriptor)
|
|
2016
|
-
Tina4::SafeString.new(%(<input type="hidden" name="formToken" value="#{CGI.escapeHTML(token)}">))
|
|
2017
|
-
end
|
|
2018
|
-
|
|
2019
|
-
# Return just the raw JWT form token string (no <input> wrapper).
|
|
2020
|
-
# Registered as both formTokenValue and form_token_value template globals.
|
|
2021
|
-
def self.generate_form_token_value(descriptor = "")
|
|
2022
|
-
Tina4::SafeString.new(generate_form_jwt(descriptor))
|
|
2023
|
-
end
|
|
2024
|
-
end
|
|
2025
|
-
end
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Tina4 Frond Engine -- Lexer, parser, and runtime.
|
|
4
|
+
# Zero-dependency twig-like template engine.
|
|
5
|
+
# Supports: variables, filters, if/elseif/else/endif, for/else/endfor,
|
|
6
|
+
# extends/block, include, macro, set, comments, whitespace control, tests,
|
|
7
|
+
# fragment caching, sandboxing, auto-escaping, custom filters/tests/globals.
|
|
8
|
+
|
|
9
|
+
require "json"
|
|
10
|
+
require "digest"
|
|
11
|
+
require "base64"
|
|
12
|
+
require "cgi"
|
|
13
|
+
require "uri"
|
|
14
|
+
require "date"
|
|
15
|
+
require "time"
|
|
16
|
+
require "securerandom"
|
|
17
|
+
|
|
18
|
+
module Tina4
|
|
19
|
+
# Marker class for strings that should not be auto-escaped in Frond.
|
|
20
|
+
class SafeString < String
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
class Frond
|
|
24
|
+
# -- Token types ----------------------------------------------------------
|
|
25
|
+
TEXT = :text
|
|
26
|
+
VAR = :var # {{ ... }}
|
|
27
|
+
BLOCK = :block # {% ... %}
|
|
28
|
+
COMMENT = :comment # {# ... #}
|
|
29
|
+
|
|
30
|
+
# Regex to split template source into tokens
|
|
31
|
+
TOKEN_RE = /(\{%-?\s*.*?\s*-?%\})|(\{\{-?\s*.*?\s*-?\}\})|(\{#.*?#\})/m
|
|
32
|
+
|
|
33
|
+
# HTML escape table
|
|
34
|
+
HTML_ESCAPE_MAP = { "&" => "&", "<" => "<", ">" => ">",
|
|
35
|
+
'"' => """, "'" => "'" }.freeze
|
|
36
|
+
HTML_ESCAPE_RE = /[&<>"']/
|
|
37
|
+
|
|
38
|
+
# -- Compiled regex constants (optimization: avoid re-compiling in methods) --
|
|
39
|
+
EXTENDS_RE = /\{%-?\s*extends\s+["'](.+?)["']\s*-?%\}/
|
|
40
|
+
BLOCK_RE = /\{%-?\s*block\s+(\w+)\s*-?%\}(.*?)\{%-?\s*endblock\s*-?%\}/m
|
|
41
|
+
STRING_LIT_RE = /\A["'](.*)["']\z/
|
|
42
|
+
INTEGER_RE = /\A-?\d+\z/
|
|
43
|
+
FLOAT_RE = /\A-?\d+\.\d+\z/
|
|
44
|
+
ARRAY_LIT_RE = /\A\[(.+)\]\z/m
|
|
45
|
+
HASH_LIT_RE = /\A\{(.+)\}\z/m
|
|
46
|
+
HASH_PAIR_RE = /\A\s*(?:["']([^"']+)["']|(\w+))\s*:\s*(.+)\z/
|
|
47
|
+
RANGE_LIT_RE = /\A(\d+)\.\.(\d+)\z/
|
|
48
|
+
ARITHMETIC_OPS = [" + ", " - ", " * ", " // ", " / ", " % ", " ** "].freeze
|
|
49
|
+
FUNC_CALL_RE = /\A(\w+)\s*\((.*)\)\z/m
|
|
50
|
+
FILTER_WITH_ARGS_RE = /\A(\w+)\s*\((.*)\)\z/m
|
|
51
|
+
FILTER_CMP_RE = /\A(\w+)\s*(!=|==|>=|<=|>|<)\s*(.+)\z/
|
|
52
|
+
OR_SPLIT_RE = /\s+or\s+/
|
|
53
|
+
AND_SPLIT_RE = /\s+and\s+/
|
|
54
|
+
IS_NOT_RE = /\A(.+?)\s+is\s+not\s+(\w+)(.*)\z/
|
|
55
|
+
IS_RE = /\A(.+?)\s+is\s+(\w+)(.*)\z/
|
|
56
|
+
NOT_IN_RE = /\A(.+?)\s+not\s+in\s+(.+)\z/
|
|
57
|
+
IN_RE = /\A(.+?)\s+in\s+(.+)\z/
|
|
58
|
+
DIVISIBLE_BY_RE = /\s*by\s*\(\s*(\d+)\s*\)/
|
|
59
|
+
RESOLVE_SPLIT_RE = /\.|\[([^\]]+)\]/
|
|
60
|
+
RESOLVE_STRIP_RE = /\A["']|["']\z/
|
|
61
|
+
DIGIT_RE = /\A\d+\z/
|
|
62
|
+
FOR_RE = /\Afor\s+(\w+)(?:\s*,\s*(\w+))?\s+in\s+(.+)\z/
|
|
63
|
+
SET_RE = /\Aset\s+(\w+)\s*=\s*(.+)\z/m
|
|
64
|
+
INCLUDE_RE = /\Ainclude\s+["'](.+?)["'](?:\s+with\s+(.+))?\z/
|
|
65
|
+
MACRO_RE = /\Amacro\s+(\w+)\s*\(([^)]*)\)/
|
|
66
|
+
FROM_IMPORT_RE = /\Afrom\s+["'](.+?)["']\s+import\s+(.+)/
|
|
67
|
+
CACHE_RE = /\Acache\s+["'](.+?)["']\s*(\d+)?/
|
|
68
|
+
SPACELESS_RE = />\s+</
|
|
69
|
+
AUTOESCAPE_RE = /\Aautoescape\s+(false|true)/
|
|
70
|
+
STRIPTAGS_RE = /<[^>]+>/
|
|
71
|
+
THOUSANDS_RE = /(\d)(?=(\d{3})+(?!\d))/
|
|
72
|
+
SLUG_CLEAN_RE = /[^a-z0-9]+/
|
|
73
|
+
SLUG_TRIM_RE = /\A-|-\z/
|
|
74
|
+
|
|
75
|
+
# Set of common no-arg filter names that can be inlined for speed
|
|
76
|
+
INLINE_FILTERS = %w[upper lower length trim capitalize title string int escape e].each_with_object({}) { |f, h| h[f] = true }.freeze
|
|
77
|
+
|
|
78
|
+
# -- Lazy context overlay for for-loops (avoids full Hash#dup) --
|
|
79
|
+
class LoopContext
|
|
80
|
+
def initialize(parent)
|
|
81
|
+
@parent = parent
|
|
82
|
+
@local = {}
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def [](key)
|
|
86
|
+
@local.key?(key) ? @local[key] : @parent[key]
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def []=(key, value)
|
|
90
|
+
@local[key] = value
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def key?(key)
|
|
94
|
+
@local.key?(key) || @parent.key?(key)
|
|
95
|
+
end
|
|
96
|
+
alias include? key?
|
|
97
|
+
alias has_key? key?
|
|
98
|
+
|
|
99
|
+
def fetch(key, *args, &block)
|
|
100
|
+
if @local.key?(key)
|
|
101
|
+
@local[key]
|
|
102
|
+
elsif @parent.key?(key)
|
|
103
|
+
@parent[key]
|
|
104
|
+
elsif block
|
|
105
|
+
yield key
|
|
106
|
+
elsif !args.empty?
|
|
107
|
+
args[0]
|
|
108
|
+
else
|
|
109
|
+
raise KeyError, "key not found: #{key.inspect}"
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
def merge(other)
|
|
114
|
+
dup_hash = to_h
|
|
115
|
+
dup_hash.merge!(other)
|
|
116
|
+
dup_hash
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
def merge!(other)
|
|
120
|
+
other.each { |k, v| @local[k] = v }
|
|
121
|
+
self
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def dup
|
|
125
|
+
copy = LoopContext.new(@parent)
|
|
126
|
+
@local.each { |k, v| copy[k] = v }
|
|
127
|
+
copy
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def to_h
|
|
131
|
+
h = @parent.is_a?(LoopContext) ? @parent.to_h : @parent.dup
|
|
132
|
+
@local.each { |k, v| h[k] = v }
|
|
133
|
+
h
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
def each(&block)
|
|
137
|
+
to_h.each(&block)
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
def respond_to_missing?(name, include_private = false)
|
|
141
|
+
@parent.respond_to?(name, include_private) || super
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
def is_a?(klass)
|
|
145
|
+
klass == Hash || super
|
|
146
|
+
end
|
|
147
|
+
|
|
148
|
+
def keys
|
|
149
|
+
(@parent.is_a?(LoopContext) ? @parent.keys : @parent.keys) | @local.keys
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# -----------------------------------------------------------------------
|
|
154
|
+
# Public API
|
|
155
|
+
# -----------------------------------------------------------------------
|
|
156
|
+
|
|
157
|
+
attr_reader :template_dir
|
|
158
|
+
|
|
159
|
+
def initialize(template_dir: "src/templates")
|
|
160
|
+
@template_dir = template_dir
|
|
161
|
+
@filters = default_filters
|
|
162
|
+
@globals = {}
|
|
163
|
+
@tests = default_tests
|
|
164
|
+
@auto_escape = true
|
|
165
|
+
|
|
166
|
+
# Sandboxing
|
|
167
|
+
@sandbox = false
|
|
168
|
+
@allowed_filters = nil
|
|
169
|
+
@allowed_tags = nil
|
|
170
|
+
@allowed_vars = nil
|
|
171
|
+
|
|
172
|
+
# Fragment cache: key => [html, expires_at]
|
|
173
|
+
@fragment_cache = {}
|
|
174
|
+
|
|
175
|
+
# Token pre-compilation cache
|
|
176
|
+
@compiled = {} # {template_name => [tokens, mtime]}
|
|
177
|
+
@compiled_strings = {} # {md5_hash => tokens}
|
|
178
|
+
|
|
179
|
+
# Parsed filter chain cache: expr_string => [variable, filters]
|
|
180
|
+
@filter_chain_cache = {}
|
|
181
|
+
|
|
182
|
+
# Resolved dotted-path split cache: expr_string => parts_array
|
|
183
|
+
@resolve_cache = {}
|
|
184
|
+
|
|
185
|
+
# Sandbox root-var split cache: var_name => root_var_string
|
|
186
|
+
@dotted_split_cache = {}
|
|
187
|
+
|
|
188
|
+
# Built-in global functions
|
|
189
|
+
register_builtin_globals
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
# Render a template file with data. Uses token caching for performance.
|
|
193
|
+
def render(template, data = {})
|
|
194
|
+
context = @globals.merge(stringify_keys(data))
|
|
195
|
+
|
|
196
|
+
path = File.join(@template_dir, template)
|
|
197
|
+
raise "Template not found: #{path}" unless File.exist?(path)
|
|
198
|
+
|
|
199
|
+
debug_mode = ENV.fetch("TINA4_DEBUG", "").downcase == "true"
|
|
200
|
+
|
|
201
|
+
unless debug_mode
|
|
202
|
+
# Production: use permanent cache (no filesystem checks)
|
|
203
|
+
cached = @compiled[template]
|
|
204
|
+
return execute_cached(cached[0], context) if cached
|
|
205
|
+
end
|
|
206
|
+
# Dev mode: skip cache entirely — always re-read and re-tokenize
|
|
207
|
+
# so edits to partials and extended base templates are detected
|
|
208
|
+
|
|
209
|
+
# Cache miss — load, tokenize, cache
|
|
210
|
+
source = File.read(path, encoding: "utf-8")
|
|
211
|
+
mtime = File.mtime(path)
|
|
212
|
+
tokens = tokenize(source)
|
|
213
|
+
@compiled[template] = [tokens, mtime]
|
|
214
|
+
execute_with_tokens(source, tokens, context)
|
|
215
|
+
end
|
|
216
|
+
|
|
217
|
+
# Render a template string directly. Uses token caching for performance.
|
|
218
|
+
def render_string(source, data = {})
|
|
219
|
+
context = @globals.merge(stringify_keys(data))
|
|
220
|
+
|
|
221
|
+
key = Digest::MD5.hexdigest(source)
|
|
222
|
+
cached_tokens = @compiled_strings[key]
|
|
223
|
+
|
|
224
|
+
if cached_tokens
|
|
225
|
+
return execute_cached(cached_tokens, context)
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
tokens = tokenize(source)
|
|
229
|
+
@compiled_strings[key] = tokens
|
|
230
|
+
execute_cached(tokens, context)
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
# Clear all compiled template caches.
|
|
234
|
+
def clear_cache
|
|
235
|
+
@compiled.clear
|
|
236
|
+
@compiled_strings.clear
|
|
237
|
+
@filter_chain_cache.clear
|
|
238
|
+
@resolve_cache.clear
|
|
239
|
+
@dotted_split_cache.clear
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
# Register a custom filter.
|
|
243
|
+
def add_filter(name, &blk)
|
|
244
|
+
@filters[name.to_s] = blk
|
|
245
|
+
end
|
|
246
|
+
|
|
247
|
+
# Register a custom test.
|
|
248
|
+
def add_test(name, &blk)
|
|
249
|
+
@tests[name.to_s] = blk
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
# Register a global variable available in all templates.
|
|
253
|
+
def add_global(name, value)
|
|
254
|
+
@globals[name.to_s] = value
|
|
255
|
+
end
|
|
256
|
+
|
|
257
|
+
# Enable sandbox mode.
|
|
258
|
+
def sandbox(filters: nil, tags: nil, vars: nil)
|
|
259
|
+
@sandbox = true
|
|
260
|
+
@allowed_filters = filters ? filters.map(&:to_s) : nil
|
|
261
|
+
@allowed_tags = tags ? tags.map(&:to_s) : nil
|
|
262
|
+
@allowed_vars = vars ? vars.map(&:to_s) : nil
|
|
263
|
+
self
|
|
264
|
+
end
|
|
265
|
+
|
|
266
|
+
# Disable sandbox mode.
|
|
267
|
+
def unsandbox
|
|
268
|
+
@sandbox = false
|
|
269
|
+
@allowed_filters = nil
|
|
270
|
+
@allowed_tags = nil
|
|
271
|
+
@allowed_vars = nil
|
|
272
|
+
self
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
# Utility: HTML escape
|
|
276
|
+
def self.escape_html(str)
|
|
277
|
+
str.to_s.gsub(HTML_ESCAPE_RE, HTML_ESCAPE_MAP)
|
|
278
|
+
end
|
|
279
|
+
|
|
280
|
+
private
|
|
281
|
+
|
|
282
|
+
# -----------------------------------------------------------------------
|
|
283
|
+
# Tokenizer
|
|
284
|
+
# -----------------------------------------------------------------------
|
|
285
|
+
|
|
286
|
+
# Regex to extract {% raw %}...{% endraw %} blocks before tokenizing
|
|
287
|
+
RAW_BLOCK_RE = /\{%-?\s*raw\s*-?%\}(.*?)\{%-?\s*endraw\s*-?%\}/m
|
|
288
|
+
|
|
289
|
+
def tokenize(source)
|
|
290
|
+
# 1. Extract raw blocks and replace with placeholders
|
|
291
|
+
raw_blocks = []
|
|
292
|
+
source = source.gsub(RAW_BLOCK_RE) do
|
|
293
|
+
idx = raw_blocks.length
|
|
294
|
+
raw_blocks << Regexp.last_match(1)
|
|
295
|
+
"\x00RAW_#{idx}\x00"
|
|
296
|
+
end
|
|
297
|
+
|
|
298
|
+
# 2. Normal tokenization
|
|
299
|
+
tokens = []
|
|
300
|
+
pos = 0
|
|
301
|
+
source.scan(TOKEN_RE) do
|
|
302
|
+
m = Regexp.last_match
|
|
303
|
+
start = m.begin(0)
|
|
304
|
+
tokens << [TEXT, source[pos...start]] if start > pos
|
|
305
|
+
|
|
306
|
+
raw = m[0]
|
|
307
|
+
if raw.start_with?("{#")
|
|
308
|
+
tokens << [COMMENT, raw]
|
|
309
|
+
elsif raw.start_with?("{{")
|
|
310
|
+
tokens << [VAR, raw]
|
|
311
|
+
elsif raw.start_with?("{%")
|
|
312
|
+
tokens << [BLOCK, raw]
|
|
313
|
+
end
|
|
314
|
+
pos = m.end(0)
|
|
315
|
+
end
|
|
316
|
+
tokens << [TEXT, source[pos..]] if pos < source.length
|
|
317
|
+
|
|
318
|
+
# 3. Restore raw block placeholders as literal TEXT
|
|
319
|
+
unless raw_blocks.empty?
|
|
320
|
+
tokens = tokens.map do |ttype, value|
|
|
321
|
+
if ttype == TEXT && value.include?("\x00RAW_")
|
|
322
|
+
raw_blocks.each_with_index do |content, idx|
|
|
323
|
+
value = value.gsub("\x00RAW_#{idx}\x00", content)
|
|
324
|
+
end
|
|
325
|
+
end
|
|
326
|
+
[ttype, value]
|
|
327
|
+
end
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
tokens
|
|
331
|
+
end
|
|
332
|
+
|
|
333
|
+
# Strip delimiters from a tag and detect whitespace control markers.
|
|
334
|
+
# Returns [content, strip_before, strip_after].
|
|
335
|
+
def strip_tag(raw)
|
|
336
|
+
inner = raw[2..-3] # remove {{ }} or {% %} or {# #}
|
|
337
|
+
strip_before = false
|
|
338
|
+
strip_after = false
|
|
339
|
+
|
|
340
|
+
if inner.start_with?("-")
|
|
341
|
+
strip_before = true
|
|
342
|
+
inner = inner[1..]
|
|
343
|
+
end
|
|
344
|
+
if inner.end_with?("-")
|
|
345
|
+
strip_after = true
|
|
346
|
+
inner = inner[0..-2]
|
|
347
|
+
end
|
|
348
|
+
|
|
349
|
+
[inner.strip, strip_before, strip_after]
|
|
350
|
+
end
|
|
351
|
+
|
|
352
|
+
# -----------------------------------------------------------------------
|
|
353
|
+
# Template loading
|
|
354
|
+
# -----------------------------------------------------------------------
|
|
355
|
+
|
|
356
|
+
def load_template(name)
|
|
357
|
+
path = File.join(@template_dir, name)
|
|
358
|
+
raise "Template not found: #{path}" unless File.exist?(path)
|
|
359
|
+
|
|
360
|
+
File.read(path, encoding: "utf-8")
|
|
361
|
+
end
|
|
362
|
+
|
|
363
|
+
# -----------------------------------------------------------------------
|
|
364
|
+
# Execution
|
|
365
|
+
# -----------------------------------------------------------------------
|
|
366
|
+
|
|
367
|
+
def execute_cached(tokens, context)
|
|
368
|
+
# Check if first non-text token is an extends block
|
|
369
|
+
tokens.each do |ttype, raw|
|
|
370
|
+
next if ttype == TEXT && raw.strip.empty?
|
|
371
|
+
if ttype == BLOCK
|
|
372
|
+
content, _, _ = strip_tag(raw)
|
|
373
|
+
if content.start_with?("extends ")
|
|
374
|
+
# Extends requires source-based execution for block extraction
|
|
375
|
+
source = tokens.map { |_, v| v }.join
|
|
376
|
+
return execute(source, context)
|
|
377
|
+
end
|
|
378
|
+
end
|
|
379
|
+
break
|
|
380
|
+
end
|
|
381
|
+
render_tokens(tokens, context)
|
|
382
|
+
end
|
|
383
|
+
|
|
384
|
+
def execute_with_tokens(source, tokens, context)
|
|
385
|
+
# Handle extends first
|
|
386
|
+
if source =~ EXTENDS_RE
|
|
387
|
+
parent_name = Regexp.last_match(1)
|
|
388
|
+
parent_source = load_template(parent_name)
|
|
389
|
+
child_blocks = extract_blocks(source)
|
|
390
|
+
return render_with_blocks(parent_source, context, child_blocks)
|
|
391
|
+
end
|
|
392
|
+
|
|
393
|
+
render_tokens(tokens, context)
|
|
394
|
+
end
|
|
395
|
+
|
|
396
|
+
def execute(source, context)
|
|
397
|
+
# Handle extends first
|
|
398
|
+
if source =~ EXTENDS_RE
|
|
399
|
+
parent_name = Regexp.last_match(1)
|
|
400
|
+
parent_source = load_template(parent_name)
|
|
401
|
+
child_blocks = extract_blocks(source)
|
|
402
|
+
return render_with_blocks(parent_source, context, child_blocks)
|
|
403
|
+
end
|
|
404
|
+
|
|
405
|
+
render_tokens(tokenize(source), context)
|
|
406
|
+
end
|
|
407
|
+
|
|
408
|
+
def extract_blocks(source)
|
|
409
|
+
blocks = {}
|
|
410
|
+
source.scan(BLOCK_RE) do
|
|
411
|
+
blocks[Regexp.last_match(1)] = Regexp.last_match(2)
|
|
412
|
+
end
|
|
413
|
+
blocks
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
def render_with_blocks(parent_source, context, child_blocks)
|
|
417
|
+
engine = self
|
|
418
|
+
result = parent_source.gsub(BLOCK_RE) do
|
|
419
|
+
name = Regexp.last_match(1)
|
|
420
|
+
parent_content = Regexp.last_match(2)
|
|
421
|
+
block_source = child_blocks.fetch(name, parent_content)
|
|
422
|
+
|
|
423
|
+
# Make parent() and super() available inside child blocks
|
|
424
|
+
rendered_parent = nil
|
|
425
|
+
get_parent = lambda do
|
|
426
|
+
rendered_parent ||= Tina4::SafeString.new(
|
|
427
|
+
engine.send(:render_tokens, tokenize(parent_content), context)
|
|
428
|
+
)
|
|
429
|
+
rendered_parent
|
|
430
|
+
end
|
|
431
|
+
|
|
432
|
+
block_ctx = context.merge("parent" => get_parent, "super" => get_parent)
|
|
433
|
+
render_tokens(tokenize(block_source), block_ctx)
|
|
434
|
+
end
|
|
435
|
+
render_tokens(tokenize(result), context)
|
|
436
|
+
end
|
|
437
|
+
|
|
438
|
+
# -----------------------------------------------------------------------
|
|
439
|
+
# Token renderer
|
|
440
|
+
# -----------------------------------------------------------------------
|
|
441
|
+
|
|
442
|
+
def render_tokens(tokens, context)
|
|
443
|
+
output = []
|
|
444
|
+
i = 0
|
|
445
|
+
|
|
446
|
+
while i < tokens.length
|
|
447
|
+
ttype, raw = tokens[i]
|
|
448
|
+
|
|
449
|
+
case ttype
|
|
450
|
+
when TEXT
|
|
451
|
+
output << raw
|
|
452
|
+
i += 1
|
|
453
|
+
|
|
454
|
+
when COMMENT
|
|
455
|
+
i += 1
|
|
456
|
+
|
|
457
|
+
when VAR
|
|
458
|
+
content, strip_b, strip_a = strip_tag(raw)
|
|
459
|
+
output[-1] = output[-1].rstrip if strip_b && !output.empty?
|
|
460
|
+
|
|
461
|
+
result = eval_var(content, context)
|
|
462
|
+
output << (result.nil? ? "" : result.to_s)
|
|
463
|
+
|
|
464
|
+
if strip_a && i + 1 < tokens.length && tokens[i + 1][0] == TEXT
|
|
465
|
+
tokens[i + 1] = [TEXT, tokens[i + 1][1].lstrip]
|
|
466
|
+
end
|
|
467
|
+
i += 1
|
|
468
|
+
|
|
469
|
+
when BLOCK
|
|
470
|
+
content, strip_b, strip_a = strip_tag(raw)
|
|
471
|
+
output[-1] = output[-1].rstrip if strip_b && !output.empty?
|
|
472
|
+
|
|
473
|
+
tag = content.split[0] || ""
|
|
474
|
+
|
|
475
|
+
case tag
|
|
476
|
+
when "if"
|
|
477
|
+
result, i = handle_if(tokens, i, context)
|
|
478
|
+
output << result
|
|
479
|
+
when "for"
|
|
480
|
+
result, i = handle_for(tokens, i, context)
|
|
481
|
+
output << result
|
|
482
|
+
when "set"
|
|
483
|
+
handle_set(content, context)
|
|
484
|
+
i += 1
|
|
485
|
+
when "include"
|
|
486
|
+
if @sandbox && @allowed_tags && !@allowed_tags.include?("include")
|
|
487
|
+
i += 1
|
|
488
|
+
else
|
|
489
|
+
output << handle_include(content, context)
|
|
490
|
+
i += 1
|
|
491
|
+
end
|
|
492
|
+
when "macro"
|
|
493
|
+
i = handle_macro(tokens, i, context)
|
|
494
|
+
when "from"
|
|
495
|
+
handle_from_import(content, context)
|
|
496
|
+
i += 1
|
|
497
|
+
when "cache"
|
|
498
|
+
result, i = handle_cache(tokens, i, context)
|
|
499
|
+
output << result
|
|
500
|
+
when "spaceless"
|
|
501
|
+
result, i = handle_spaceless(tokens, i, context)
|
|
502
|
+
output << result
|
|
503
|
+
when "autoescape"
|
|
504
|
+
result, i = handle_autoescape(tokens, i, context)
|
|
505
|
+
output << result
|
|
506
|
+
when "block", "endblock", "extends"
|
|
507
|
+
i += 1
|
|
508
|
+
else
|
|
509
|
+
i += 1
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
if strip_a && i < tokens.length && tokens[i][0] == TEXT
|
|
513
|
+
tokens[i] = [TEXT, tokens[i][1].lstrip]
|
|
514
|
+
end
|
|
515
|
+
else
|
|
516
|
+
i += 1
|
|
517
|
+
end
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
output.join
|
|
521
|
+
end
|
|
522
|
+
|
|
523
|
+
# -----------------------------------------------------------------------
|
|
524
|
+
# Variable evaluation
|
|
525
|
+
# -----------------------------------------------------------------------
|
|
526
|
+
|
|
527
|
+
def eval_var(expr, context)
|
|
528
|
+
# Check for top-level ternary BEFORE splitting filters so that
|
|
529
|
+
# expressions like ``products|length != 1 ? "s" : ""`` work correctly.
|
|
530
|
+
ternary_pos = find_ternary(expr)
|
|
531
|
+
if ternary_pos != -1
|
|
532
|
+
cond_part = expr[0...ternary_pos].strip
|
|
533
|
+
rest = expr[(ternary_pos + 1)..]
|
|
534
|
+
colon_pos = find_colon(rest)
|
|
535
|
+
if colon_pos != -1
|
|
536
|
+
true_part = rest[0...colon_pos].strip
|
|
537
|
+
false_part = rest[(colon_pos + 1)..].strip
|
|
538
|
+
cond = eval_var_raw(cond_part, context)
|
|
539
|
+
return truthy?(cond) ? eval_var(true_part, context) : eval_var(false_part, context)
|
|
540
|
+
end
|
|
541
|
+
end
|
|
542
|
+
|
|
543
|
+
eval_var_inner(expr, context)
|
|
544
|
+
end
|
|
545
|
+
|
|
546
|
+
def eval_var_raw(expr, context)
|
|
547
|
+
var_name, filters = parse_filter_chain(expr)
|
|
548
|
+
value = eval_expr(var_name, context)
|
|
549
|
+
filters.each do |fname, args|
|
|
550
|
+
next if fname == "raw" || fname == "safe"
|
|
551
|
+
fn = @filters[fname]
|
|
552
|
+
if fn
|
|
553
|
+
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
554
|
+
value = fn.call(value, *evaluated_args)
|
|
555
|
+
else
|
|
556
|
+
# The filter name may include a trailing comparison operator,
|
|
557
|
+
# e.g. "length != 1". Extract the real filter name and the
|
|
558
|
+
# comparison suffix, apply the filter, then evaluate the comparison.
|
|
559
|
+
m = fname.match(FILTER_CMP_RE)
|
|
560
|
+
if m
|
|
561
|
+
real_filter = m[1]
|
|
562
|
+
op = m[2]
|
|
563
|
+
right_expr = m[3].strip
|
|
564
|
+
fn2 = @filters[real_filter]
|
|
565
|
+
if fn2
|
|
566
|
+
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
567
|
+
value = fn2.call(value, *evaluated_args)
|
|
568
|
+
end
|
|
569
|
+
right = eval_expr(right_expr, context)
|
|
570
|
+
value = case op
|
|
571
|
+
when "!=" then value != right
|
|
572
|
+
when "==" then value == right
|
|
573
|
+
when ">=" then value >= right
|
|
574
|
+
when "<=" then value <= right
|
|
575
|
+
when ">" then value > right
|
|
576
|
+
when "<" then value < right
|
|
577
|
+
else false
|
|
578
|
+
end rescue false
|
|
579
|
+
else
|
|
580
|
+
value = eval_expr(fname, context)
|
|
581
|
+
end
|
|
582
|
+
end
|
|
583
|
+
end
|
|
584
|
+
value
|
|
585
|
+
end
|
|
586
|
+
|
|
587
|
+
def eval_var_inner(expr, context)
|
|
588
|
+
var_name, filters = parse_filter_chain(expr)
|
|
589
|
+
|
|
590
|
+
# Sandbox: check variable access
|
|
591
|
+
if @sandbox && @allowed_vars
|
|
592
|
+
root_var = @dotted_split_cache[var_name]
|
|
593
|
+
unless root_var
|
|
594
|
+
root_var = var_name.split(".")[0].split("[")[0].strip
|
|
595
|
+
@dotted_split_cache[var_name] = root_var
|
|
596
|
+
end
|
|
597
|
+
return "" if !root_var.empty? && !@allowed_vars.include?(root_var) && root_var != "loop"
|
|
598
|
+
end
|
|
599
|
+
|
|
600
|
+
value = eval_expr(var_name, context)
|
|
601
|
+
|
|
602
|
+
is_safe = false
|
|
603
|
+
filters.each do |fname, args|
|
|
604
|
+
if fname == "raw" || fname == "safe"
|
|
605
|
+
is_safe = true
|
|
606
|
+
next
|
|
607
|
+
end
|
|
608
|
+
|
|
609
|
+
# Sandbox: check filter access
|
|
610
|
+
if @sandbox && @allowed_filters && !@allowed_filters.include?(fname)
|
|
611
|
+
next
|
|
612
|
+
end
|
|
613
|
+
|
|
614
|
+
# Inline common no-arg filters for speed (skip generic dispatch)
|
|
615
|
+
if args.empty? && INLINE_FILTERS.include?(fname)
|
|
616
|
+
value = case fname
|
|
617
|
+
when "upper" then value.to_s.upcase
|
|
618
|
+
when "lower" then value.to_s.downcase
|
|
619
|
+
when "length" then value.respond_to?(:length) ? value.length : value.to_s.length
|
|
620
|
+
when "trim" then value.to_s.strip
|
|
621
|
+
when "capitalize" then value.to_s.capitalize
|
|
622
|
+
when "title" then value.to_s.split.map(&:capitalize).join(" ")
|
|
623
|
+
when "string" then value.to_s
|
|
624
|
+
when "int" then value.to_i
|
|
625
|
+
when "escape", "e" then Frond.escape_html(value.to_s)
|
|
626
|
+
else value
|
|
627
|
+
end
|
|
628
|
+
next
|
|
629
|
+
end
|
|
630
|
+
|
|
631
|
+
fn = @filters[fname]
|
|
632
|
+
if fn
|
|
633
|
+
evaluated_args = args.map { |a| eval_filter_arg(a, context) }
|
|
634
|
+
value = fn.call(value, *evaluated_args)
|
|
635
|
+
end
|
|
636
|
+
end
|
|
637
|
+
|
|
638
|
+
# Auto-escape HTML unless marked safe or SafeString
|
|
639
|
+
if @auto_escape && !is_safe && value.is_a?(String) && !value.is_a?(SafeString)
|
|
640
|
+
value = Frond.escape_html(value)
|
|
641
|
+
end
|
|
642
|
+
|
|
643
|
+
value
|
|
644
|
+
end
|
|
645
|
+
|
|
646
|
+
def eval_filter_arg(arg, context)
|
|
647
|
+
return Regexp.last_match(1) if arg =~ STRING_LIT_RE
|
|
648
|
+
return arg.to_i if arg =~ INTEGER_RE
|
|
649
|
+
return arg.to_f if arg =~ FLOAT_RE
|
|
650
|
+
eval_expr(arg, context)
|
|
651
|
+
end
|
|
652
|
+
|
|
653
|
+
# Find the first occurrence of +needle+ that is not inside quotes or
|
|
654
|
+
# parentheses. Returns the index, or -1 if not found.
|
|
655
|
+
def find_outside_quotes(expr, needle)
|
|
656
|
+
in_q = nil
|
|
657
|
+
depth = 0
|
|
658
|
+
bracket_depth = 0
|
|
659
|
+
i = 0
|
|
660
|
+
nlen = needle.length
|
|
661
|
+
while i <= expr.length - nlen
|
|
662
|
+
ch = expr[i]
|
|
663
|
+
if (ch == '"' || ch == "'") && depth == 0
|
|
664
|
+
if in_q.nil?
|
|
665
|
+
in_q = ch
|
|
666
|
+
elsif ch == in_q
|
|
667
|
+
in_q = nil
|
|
668
|
+
end
|
|
669
|
+
i += 1
|
|
670
|
+
next
|
|
671
|
+
end
|
|
672
|
+
if in_q
|
|
673
|
+
i += 1
|
|
674
|
+
next
|
|
675
|
+
end
|
|
676
|
+
if ch == "("
|
|
677
|
+
depth += 1
|
|
678
|
+
elsif ch == ")"
|
|
679
|
+
depth -= 1
|
|
680
|
+
elsif ch == "["
|
|
681
|
+
bracket_depth += 1
|
|
682
|
+
elsif ch == "]"
|
|
683
|
+
bracket_depth -= 1
|
|
684
|
+
end
|
|
685
|
+
if depth == 0 && bracket_depth == 0 && expr[i, nlen] == needle
|
|
686
|
+
return i
|
|
687
|
+
end
|
|
688
|
+
i += 1
|
|
689
|
+
end
|
|
690
|
+
-1
|
|
691
|
+
end
|
|
692
|
+
|
|
693
|
+
# Find the index of a top-level ``?`` that is part of a ternary operator.
|
|
694
|
+
# Respects quoted strings, parentheses, and skips ``??`` (null coalesce).
|
|
695
|
+
# Returns -1 if not found.
|
|
696
|
+
def find_ternary(expr)
|
|
697
|
+
depth = 0
|
|
698
|
+
in_quote = nil
|
|
699
|
+
i = 0
|
|
700
|
+
len = expr.length
|
|
701
|
+
while i < len
|
|
702
|
+
ch = expr[i]
|
|
703
|
+
if in_quote
|
|
704
|
+
in_quote = nil if ch == in_quote
|
|
705
|
+
i += 1
|
|
706
|
+
next
|
|
707
|
+
end
|
|
708
|
+
if ch == '"' || ch == "'"
|
|
709
|
+
in_quote = ch
|
|
710
|
+
i += 1
|
|
711
|
+
next
|
|
712
|
+
end
|
|
713
|
+
if ch == "("
|
|
714
|
+
depth += 1
|
|
715
|
+
elsif ch == ")"
|
|
716
|
+
depth -= 1
|
|
717
|
+
elsif ch == "?" && depth == 0
|
|
718
|
+
# Skip ``??`` (null coalesce)
|
|
719
|
+
if i + 1 < len && expr[i + 1] == "?"
|
|
720
|
+
i += 2
|
|
721
|
+
next
|
|
722
|
+
end
|
|
723
|
+
return i
|
|
724
|
+
end
|
|
725
|
+
i += 1
|
|
726
|
+
end
|
|
727
|
+
-1
|
|
728
|
+
end
|
|
729
|
+
|
|
730
|
+
# Find the index of the top-level ``:`` that separates the true/false
|
|
731
|
+
# branches of a ternary. Respects quotes and parentheses.
|
|
732
|
+
def find_colon(expr)
|
|
733
|
+
depth = 0
|
|
734
|
+
in_quote = nil
|
|
735
|
+
expr.each_char.with_index do |ch, i|
|
|
736
|
+
if in_quote
|
|
737
|
+
in_quote = nil if ch == in_quote
|
|
738
|
+
next
|
|
739
|
+
end
|
|
740
|
+
if ch == '"' || ch == "'"
|
|
741
|
+
in_quote = ch
|
|
742
|
+
next
|
|
743
|
+
end
|
|
744
|
+
if ch == "("
|
|
745
|
+
depth += 1
|
|
746
|
+
elsif ch == ")"
|
|
747
|
+
depth -= 1
|
|
748
|
+
elsif ch == ":" && depth == 0
|
|
749
|
+
return i
|
|
750
|
+
end
|
|
751
|
+
end
|
|
752
|
+
-1
|
|
753
|
+
end
|
|
754
|
+
|
|
755
|
+
# -----------------------------------------------------------------------
|
|
756
|
+
# Filter chain parser
|
|
757
|
+
# -----------------------------------------------------------------------
|
|
758
|
+
|
|
759
|
+
def parse_filter_chain(expr)
|
|
760
|
+
cached = @filter_chain_cache[expr]
|
|
761
|
+
return cached if cached
|
|
762
|
+
|
|
763
|
+
parts = split_on_pipe(expr)
|
|
764
|
+
variable = parts[0].strip
|
|
765
|
+
filters = []
|
|
766
|
+
|
|
767
|
+
parts[1..].each do |f|
|
|
768
|
+
f = f.strip
|
|
769
|
+
if f =~ FILTER_WITH_ARGS_RE
|
|
770
|
+
name = Regexp.last_match(1)
|
|
771
|
+
raw_args = Regexp.last_match(2).strip
|
|
772
|
+
args = raw_args.empty? ? [] : parse_args(raw_args)
|
|
773
|
+
filters << [name, args]
|
|
774
|
+
else
|
|
775
|
+
filters << [f.strip, []]
|
|
776
|
+
end
|
|
777
|
+
end
|
|
778
|
+
|
|
779
|
+
result = [variable, filters].freeze
|
|
780
|
+
@filter_chain_cache[expr] = result
|
|
781
|
+
result
|
|
782
|
+
end
|
|
783
|
+
|
|
784
|
+
# Split expression on | but not inside quotes or parens.
|
|
785
|
+
def split_on_pipe(expr)
|
|
786
|
+
parts = []
|
|
787
|
+
current = +""
|
|
788
|
+
in_quote = nil
|
|
789
|
+
depth = 0
|
|
790
|
+
|
|
791
|
+
expr.each_char do |ch|
|
|
792
|
+
if in_quote
|
|
793
|
+
current << ch
|
|
794
|
+
in_quote = nil if ch == in_quote
|
|
795
|
+
elsif ch == '"' || ch == "'"
|
|
796
|
+
in_quote = ch
|
|
797
|
+
current << ch
|
|
798
|
+
elsif ch == "("
|
|
799
|
+
depth += 1
|
|
800
|
+
current << ch
|
|
801
|
+
elsif ch == ")"
|
|
802
|
+
depth -= 1
|
|
803
|
+
current << ch
|
|
804
|
+
elsif ch == "|" && depth == 0
|
|
805
|
+
parts << current
|
|
806
|
+
current = +""
|
|
807
|
+
else
|
|
808
|
+
current << ch
|
|
809
|
+
end
|
|
810
|
+
end
|
|
811
|
+
parts << current unless current.empty?
|
|
812
|
+
parts
|
|
813
|
+
end
|
|
814
|
+
|
|
815
|
+
def parse_args(raw)
|
|
816
|
+
args = []
|
|
817
|
+
current = +""
|
|
818
|
+
in_quote = nil
|
|
819
|
+
depth = 0
|
|
820
|
+
|
|
821
|
+
raw.each_char do |ch|
|
|
822
|
+
if in_quote
|
|
823
|
+
if ch == in_quote
|
|
824
|
+
in_quote = nil
|
|
825
|
+
end
|
|
826
|
+
current << ch
|
|
827
|
+
elsif ch == '"' || ch == "'"
|
|
828
|
+
in_quote = ch
|
|
829
|
+
current << ch
|
|
830
|
+
elsif ch == "(" || ch == "{" || ch == "["
|
|
831
|
+
depth += 1
|
|
832
|
+
current << ch
|
|
833
|
+
elsif ch == ")" || ch == "}" || ch == "]"
|
|
834
|
+
depth -= 1
|
|
835
|
+
current << ch
|
|
836
|
+
elsif ch == "," && depth == 0
|
|
837
|
+
args << current.strip
|
|
838
|
+
current = +""
|
|
839
|
+
else
|
|
840
|
+
current << ch
|
|
841
|
+
end
|
|
842
|
+
end
|
|
843
|
+
args << current.strip unless current.strip.empty?
|
|
844
|
+
args
|
|
845
|
+
end
|
|
846
|
+
|
|
847
|
+
# -----------------------------------------------------------------------
|
|
848
|
+
# Expression evaluator
|
|
849
|
+
# -----------------------------------------------------------------------
|
|
850
|
+
|
|
851
|
+
# ── Expression evaluator (dispatcher) ──────────────────────────────
|
|
852
|
+
# Each expression type is handled by a focused helper method.
|
|
853
|
+
# Helpers return :not_matched when the expression doesn't match their
|
|
854
|
+
# type, so the dispatcher falls through to the next handler.
|
|
855
|
+
|
|
856
|
+
def eval_expr(expr, context)
|
|
857
|
+
expr = expr.strip
|
|
858
|
+
return nil if expr.empty?
|
|
859
|
+
|
|
860
|
+
result = eval_literal(expr)
|
|
861
|
+
return result unless result == :not_literal
|
|
862
|
+
|
|
863
|
+
result = eval_collection_literal(expr, context)
|
|
864
|
+
return result unless result == :not_collection
|
|
865
|
+
|
|
866
|
+
return eval_expr(expr[1..-2], context) if matched_parens?(expr)
|
|
867
|
+
|
|
868
|
+
result = eval_ternary(expr, context)
|
|
869
|
+
return result unless result == :not_ternary
|
|
870
|
+
|
|
871
|
+
result = eval_inline_if(expr, context)
|
|
872
|
+
return result unless result == :not_inline_if
|
|
873
|
+
|
|
874
|
+
result = eval_null_coalesce(expr, context)
|
|
875
|
+
return result unless result == :not_coalesce
|
|
876
|
+
|
|
877
|
+
result = eval_concat(expr, context)
|
|
878
|
+
return result unless result == :not_concat
|
|
879
|
+
|
|
880
|
+
return eval_comparison(expr, context) if has_comparison?(expr)
|
|
881
|
+
|
|
882
|
+
result = eval_arithmetic(expr, context)
|
|
883
|
+
return result unless result == :not_arithmetic
|
|
884
|
+
|
|
885
|
+
result = eval_function_call(expr, context)
|
|
886
|
+
return result unless result == :not_function
|
|
887
|
+
|
|
888
|
+
resolve(expr, context)
|
|
889
|
+
end
|
|
890
|
+
|
|
891
|
+
# ── Literal values: strings, numbers, booleans, null ──
|
|
892
|
+
|
|
893
|
+
def eval_literal(expr)
|
|
894
|
+
if (expr.start_with?('"') && expr.end_with?('"')) ||
|
|
895
|
+
(expr.start_with?("'") && expr.end_with?("'"))
|
|
896
|
+
return expr[1..-2]
|
|
897
|
+
end
|
|
898
|
+
return expr.to_i if expr =~ INTEGER_RE
|
|
899
|
+
return expr.to_f if expr =~ FLOAT_RE
|
|
900
|
+
return true if expr == "true"
|
|
901
|
+
return false if expr == "false"
|
|
902
|
+
return nil if expr == "null" || expr == "none" || expr == "nil"
|
|
903
|
+
:not_literal
|
|
904
|
+
end
|
|
905
|
+
|
|
906
|
+
# ── Collection literals: arrays, hashes, ranges ──
|
|
907
|
+
|
|
908
|
+
def eval_collection_literal(expr, context)
|
|
909
|
+
if expr =~ ARRAY_LIT_RE
|
|
910
|
+
inner = Regexp.last_match(1)
|
|
911
|
+
return split_args_toplevel(inner).map { |item| eval_expr(item.strip, context) }
|
|
912
|
+
end
|
|
913
|
+
if expr =~ HASH_LIT_RE
|
|
914
|
+
inner = Regexp.last_match(1)
|
|
915
|
+
hash = {}
|
|
916
|
+
split_args_toplevel(inner).each do |pair|
|
|
917
|
+
if pair =~ HASH_PAIR_RE
|
|
918
|
+
key = Regexp.last_match(1) || Regexp.last_match(2)
|
|
919
|
+
hash[key] = eval_expr(Regexp.last_match(3).strip, context)
|
|
920
|
+
end
|
|
921
|
+
end
|
|
922
|
+
return hash
|
|
923
|
+
end
|
|
924
|
+
if expr =~ RANGE_LIT_RE
|
|
925
|
+
return (Regexp.last_match(1).to_i..Regexp.last_match(2).to_i).to_a
|
|
926
|
+
end
|
|
927
|
+
:not_collection
|
|
928
|
+
end
|
|
929
|
+
|
|
930
|
+
# ── Parenthesized sub-expression check ──
|
|
931
|
+
|
|
932
|
+
def matched_parens?(expr)
|
|
933
|
+
return false unless expr.start_with?("(") && expr.end_with?(")")
|
|
934
|
+
depth = 0
|
|
935
|
+
expr.each_char.with_index do |ch, pi|
|
|
936
|
+
depth += 1 if ch == "("
|
|
937
|
+
depth -= 1 if ch == ")"
|
|
938
|
+
return false if depth == 0 && pi < expr.length - 1
|
|
939
|
+
end
|
|
940
|
+
true
|
|
941
|
+
end
|
|
942
|
+
|
|
943
|
+
# ── Ternary: condition ? "yes" : "no" ──
|
|
944
|
+
|
|
945
|
+
def eval_ternary(expr, context)
|
|
946
|
+
q_pos = find_outside_quotes(expr, "?")
|
|
947
|
+
return :not_ternary unless q_pos && q_pos > 0
|
|
948
|
+
cond_part = expr[0...q_pos].strip
|
|
949
|
+
rest = expr[(q_pos + 1)..]
|
|
950
|
+
c_pos = find_outside_quotes(rest, ":")
|
|
951
|
+
return :not_ternary unless c_pos && c_pos >= 0
|
|
952
|
+
true_part = rest[0...c_pos].strip
|
|
953
|
+
false_part = rest[(c_pos + 1)..].strip
|
|
954
|
+
cond = eval_expr(cond_part, context)
|
|
955
|
+
truthy?(cond) ? eval_expr(true_part, context) : eval_expr(false_part, context)
|
|
956
|
+
end
|
|
957
|
+
|
|
958
|
+
# ── Inline if: value if condition else other_value ──
|
|
959
|
+
|
|
960
|
+
def eval_inline_if(expr, context)
|
|
961
|
+
if_pos = find_outside_quotes(expr, " if ")
|
|
962
|
+
return :not_inline_if unless if_pos && if_pos >= 0
|
|
963
|
+
else_pos = find_outside_quotes(expr, " else ")
|
|
964
|
+
return :not_inline_if unless else_pos && else_pos > if_pos
|
|
965
|
+
value_part = expr[0...if_pos].strip
|
|
966
|
+
cond_part = expr[(if_pos + 4)...else_pos].strip
|
|
967
|
+
else_part = expr[(else_pos + 6)..].strip
|
|
968
|
+
cond = eval_expr(cond_part, context)
|
|
969
|
+
truthy?(cond) ? eval_expr(value_part, context) : eval_expr(else_part, context)
|
|
970
|
+
end
|
|
971
|
+
|
|
972
|
+
# ── Null coalescing: value ?? "default" ──
|
|
973
|
+
|
|
974
|
+
def eval_null_coalesce(expr, context)
|
|
975
|
+
return :not_coalesce unless expr.include?("??")
|
|
976
|
+
left, _, right = expr.partition("??")
|
|
977
|
+
val = eval_expr(left.strip, context)
|
|
978
|
+
val.nil? ? eval_expr(right.strip, context) : val
|
|
979
|
+
end
|
|
980
|
+
|
|
981
|
+
# ── String concatenation: a ~ b ──
|
|
982
|
+
|
|
983
|
+
def eval_concat(expr, context)
|
|
984
|
+
return :not_concat unless expr.include?("~")
|
|
985
|
+
parts = expr.split("~")
|
|
986
|
+
parts.map { |p| (eval_expr(p.strip, context) || "").to_s }.join
|
|
987
|
+
end
|
|
988
|
+
|
|
989
|
+
# ── Arithmetic: +, -, *, //, /, %, ** ──
|
|
990
|
+
|
|
991
|
+
def eval_arithmetic(expr, context)
|
|
992
|
+
ARITHMETIC_OPS.each do |op|
|
|
993
|
+
pos = find_outside_quotes(expr, op)
|
|
994
|
+
next unless pos && pos >= 0
|
|
995
|
+
l_val = eval_expr(expr[0...pos].strip, context)
|
|
996
|
+
r_val = eval_expr(expr[(pos + op.length)..].strip, context)
|
|
997
|
+
return apply_math(l_val, op.strip, r_val)
|
|
998
|
+
end
|
|
999
|
+
:not_arithmetic
|
|
1000
|
+
end
|
|
1001
|
+
|
|
1002
|
+
# ── Function call: name(arg1, arg2) ──
|
|
1003
|
+
|
|
1004
|
+
def eval_function_call(expr, context)
|
|
1005
|
+
return :not_function unless expr =~ FUNC_CALL_RE
|
|
1006
|
+
fn_name = Regexp.last_match(1)
|
|
1007
|
+
raw_args = Regexp.last_match(2).strip
|
|
1008
|
+
fn = context[fn_name]
|
|
1009
|
+
return :not_function unless fn.respond_to?(:call)
|
|
1010
|
+
args = raw_args.empty? ? [] : split_args_toplevel(raw_args).map { |a| eval_expr(a.strip, context) }
|
|
1011
|
+
fn.call(*args)
|
|
1012
|
+
end
|
|
1013
|
+
|
|
1014
|
+
def has_comparison?(expr)
|
|
1015
|
+
[" not in ", " in ", " is not ", " is ", "!=", "==", ">=", "<=", ">", "<",
|
|
1016
|
+
" and ", " or ", " not "].any? { |op| expr.include?(op) }
|
|
1017
|
+
end
|
|
1018
|
+
|
|
1019
|
+
# Split comma-separated args at top level (not inside quotes/parens/brackets).
|
|
1020
|
+
def split_args_toplevel(str)
|
|
1021
|
+
parts = []
|
|
1022
|
+
current = +""
|
|
1023
|
+
in_quote = nil
|
|
1024
|
+
depth = 0
|
|
1025
|
+
|
|
1026
|
+
str.each_char do |ch|
|
|
1027
|
+
if in_quote
|
|
1028
|
+
current << ch
|
|
1029
|
+
in_quote = nil if ch == in_quote
|
|
1030
|
+
elsif ch == '"' || ch == "'"
|
|
1031
|
+
in_quote = ch
|
|
1032
|
+
current << ch
|
|
1033
|
+
elsif ch == "(" || ch == "[" || ch == "{"
|
|
1034
|
+
depth += 1
|
|
1035
|
+
current << ch
|
|
1036
|
+
elsif ch == ")" || ch == "]" || ch == "}"
|
|
1037
|
+
depth -= 1
|
|
1038
|
+
current << ch
|
|
1039
|
+
elsif ch == "," && depth == 0
|
|
1040
|
+
parts << current.strip
|
|
1041
|
+
current = +""
|
|
1042
|
+
else
|
|
1043
|
+
current << ch
|
|
1044
|
+
end
|
|
1045
|
+
end
|
|
1046
|
+
parts << current.strip unless current.strip.empty?
|
|
1047
|
+
parts
|
|
1048
|
+
end
|
|
1049
|
+
|
|
1050
|
+
# -----------------------------------------------------------------------
|
|
1051
|
+
# Comparison / logical evaluator
|
|
1052
|
+
# -----------------------------------------------------------------------
|
|
1053
|
+
|
|
1054
|
+
def eval_comparison(expr, context, eval_fn = nil)
|
|
1055
|
+
eval_fn ||= method(:eval_expr)
|
|
1056
|
+
expr = expr.strip
|
|
1057
|
+
|
|
1058
|
+
# Handle 'not' prefix
|
|
1059
|
+
if expr.start_with?("not ")
|
|
1060
|
+
return !eval_comparison(expr[4..], context, eval_fn)
|
|
1061
|
+
end
|
|
1062
|
+
|
|
1063
|
+
# 'or' (lowest precedence)
|
|
1064
|
+
or_parts = expr.split(OR_SPLIT_RE)
|
|
1065
|
+
if or_parts.length > 1
|
|
1066
|
+
return or_parts.any? { |p| eval_comparison(p, context, eval_fn) }
|
|
1067
|
+
end
|
|
1068
|
+
|
|
1069
|
+
# 'and'
|
|
1070
|
+
and_parts = expr.split(AND_SPLIT_RE)
|
|
1071
|
+
if and_parts.length > 1
|
|
1072
|
+
return and_parts.all? { |p| eval_comparison(p, context, eval_fn) }
|
|
1073
|
+
end
|
|
1074
|
+
|
|
1075
|
+
# 'is not' test
|
|
1076
|
+
if expr =~ IS_NOT_RE
|
|
1077
|
+
return !eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
|
|
1078
|
+
Regexp.last_match(3).strip, context, eval_fn)
|
|
1079
|
+
end
|
|
1080
|
+
|
|
1081
|
+
# 'is' test
|
|
1082
|
+
if expr =~ IS_RE
|
|
1083
|
+
return eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
|
|
1084
|
+
Regexp.last_match(3).strip, context, eval_fn)
|
|
1085
|
+
end
|
|
1086
|
+
|
|
1087
|
+
# 'not in'
|
|
1088
|
+
if expr =~ NOT_IN_RE
|
|
1089
|
+
val = eval_fn.call(Regexp.last_match(1).strip, context)
|
|
1090
|
+
collection = eval_fn.call(Regexp.last_match(2).strip, context)
|
|
1091
|
+
return !(collection.respond_to?(:include?) && collection.include?(val))
|
|
1092
|
+
end
|
|
1093
|
+
|
|
1094
|
+
# 'in'
|
|
1095
|
+
if expr =~ IN_RE
|
|
1096
|
+
val = eval_fn.call(Regexp.last_match(1).strip, context)
|
|
1097
|
+
collection = eval_fn.call(Regexp.last_match(2).strip, context)
|
|
1098
|
+
return collection.respond_to?(:include?) ? collection.include?(val) : false
|
|
1099
|
+
end
|
|
1100
|
+
|
|
1101
|
+
# Binary comparison operators
|
|
1102
|
+
[["!=", ->(a, b) { a != b }],
|
|
1103
|
+
["==", ->(a, b) { a == b }],
|
|
1104
|
+
[">=", ->(a, b) { a.to_f >= b.to_f }],
|
|
1105
|
+
["<=", ->(a, b) { a.to_f <= b.to_f }],
|
|
1106
|
+
[">", ->(a, b) { a.to_f > b.to_f }],
|
|
1107
|
+
["<", ->(a, b) { a.to_f < b.to_f }]].each do |op, fn|
|
|
1108
|
+
if expr.include?(op)
|
|
1109
|
+
left, _, right = expr.partition(op)
|
|
1110
|
+
l = eval_fn.call(left.strip, context)
|
|
1111
|
+
r = eval_fn.call(right.strip, context)
|
|
1112
|
+
begin
|
|
1113
|
+
return fn.call(l, r)
|
|
1114
|
+
rescue
|
|
1115
|
+
return false
|
|
1116
|
+
end
|
|
1117
|
+
end
|
|
1118
|
+
end
|
|
1119
|
+
|
|
1120
|
+
# Fall through to simple eval
|
|
1121
|
+
val = eval_fn.call(expr, context)
|
|
1122
|
+
truthy?(val)
|
|
1123
|
+
end
|
|
1124
|
+
|
|
1125
|
+
# -----------------------------------------------------------------------
|
|
1126
|
+
# Tests ('is' expressions)
|
|
1127
|
+
# -----------------------------------------------------------------------
|
|
1128
|
+
|
|
1129
|
+
def eval_test(value_expr, test_name, args_str, context, eval_fn = nil)
|
|
1130
|
+
eval_fn ||= method(:eval_expr)
|
|
1131
|
+
val = eval_fn.call(value_expr, context)
|
|
1132
|
+
|
|
1133
|
+
# 'divisible by(n)'
|
|
1134
|
+
if test_name == "divisible"
|
|
1135
|
+
if args_str =~ DIVISIBLE_BY_RE
|
|
1136
|
+
n = Regexp.last_match(1).to_i
|
|
1137
|
+
return val.is_a?(Integer) && (val % n).zero?
|
|
1138
|
+
end
|
|
1139
|
+
return false
|
|
1140
|
+
end
|
|
1141
|
+
|
|
1142
|
+
# Check custom tests first
|
|
1143
|
+
custom = @tests[test_name]
|
|
1144
|
+
return custom.call(val) if custom
|
|
1145
|
+
|
|
1146
|
+
false
|
|
1147
|
+
end
|
|
1148
|
+
|
|
1149
|
+
def default_tests
|
|
1150
|
+
{
|
|
1151
|
+
"defined" => ->(v) { !v.nil? },
|
|
1152
|
+
"empty" => ->(v) { v.nil? || (v.respond_to?(:empty?) && v.empty?) || v == 0 || v == false },
|
|
1153
|
+
"null" => ->(v) { v.nil? },
|
|
1154
|
+
"none" => ->(v) { v.nil? },
|
|
1155
|
+
"even" => ->(v) { v.is_a?(Integer) && v.even? },
|
|
1156
|
+
"odd" => ->(v) { v.is_a?(Integer) && v.odd? },
|
|
1157
|
+
"iterable" => ->(v) { v.respond_to?(:each) && !v.is_a?(String) },
|
|
1158
|
+
"string" => ->(v) { v.is_a?(String) },
|
|
1159
|
+
"number" => ->(v) { v.is_a?(Numeric) },
|
|
1160
|
+
"boolean" => ->(v) { v.is_a?(TrueClass) || v.is_a?(FalseClass) },
|
|
1161
|
+
}
|
|
1162
|
+
end
|
|
1163
|
+
|
|
1164
|
+
# -----------------------------------------------------------------------
|
|
1165
|
+
# Variable resolver
|
|
1166
|
+
# -----------------------------------------------------------------------
|
|
1167
|
+
|
|
1168
|
+
def resolve(expr, context)
|
|
1169
|
+
parts = @resolve_cache[expr]
|
|
1170
|
+
unless parts
|
|
1171
|
+
parts = expr.split(RESOLVE_SPLIT_RE).reject(&:empty?)
|
|
1172
|
+
@resolve_cache[expr] = parts
|
|
1173
|
+
end
|
|
1174
|
+
|
|
1175
|
+
value = context
|
|
1176
|
+
|
|
1177
|
+
parts.each do |part|
|
|
1178
|
+
part = part.strip.gsub(RESOLVE_STRIP_RE, "") # strip quotes from bracket access
|
|
1179
|
+
if value.is_a?(Hash) || value.is_a?(LoopContext)
|
|
1180
|
+
value = value[part] || value[part.to_sym]
|
|
1181
|
+
elsif value.is_a?(Array)
|
|
1182
|
+
# Slice syntax: value[1:5], value[:10], value[start:end]
|
|
1183
|
+
if part.include?(":") && !(part.start_with?('"') || part.start_with?("'"))
|
|
1184
|
+
slice_parts = part.split(":", 2)
|
|
1185
|
+
s_start = slice_parts[0].strip.empty? ? nil : eval_expr(slice_parts[0].strip, context).to_i
|
|
1186
|
+
s_end = slice_parts[1].strip.empty? ? nil : eval_expr(slice_parts[1].strip, context).to_i
|
|
1187
|
+
if s_start && s_end
|
|
1188
|
+
value = value[s_start...s_end]
|
|
1189
|
+
elsif s_start
|
|
1190
|
+
value = value[s_start..]
|
|
1191
|
+
elsif s_end
|
|
1192
|
+
value = value[0...s_end]
|
|
1193
|
+
else
|
|
1194
|
+
value = value.dup
|
|
1195
|
+
end
|
|
1196
|
+
next
|
|
1197
|
+
end
|
|
1198
|
+
idx = if part =~ DIGIT_RE
|
|
1199
|
+
part.to_i
|
|
1200
|
+
else
|
|
1201
|
+
eval_expr(part, context)
|
|
1202
|
+
end
|
|
1203
|
+
idx = idx.to_i if idx.is_a?(Numeric)
|
|
1204
|
+
value = idx.is_a?(Integer) ? value[idx] : nil
|
|
1205
|
+
elsif value.respond_to?(part.to_sym)
|
|
1206
|
+
value = value.send(part.to_sym)
|
|
1207
|
+
else
|
|
1208
|
+
return nil
|
|
1209
|
+
end
|
|
1210
|
+
return nil if value.nil?
|
|
1211
|
+
end
|
|
1212
|
+
|
|
1213
|
+
value
|
|
1214
|
+
end
|
|
1215
|
+
|
|
1216
|
+
# -----------------------------------------------------------------------
|
|
1217
|
+
# Math
|
|
1218
|
+
# -----------------------------------------------------------------------
|
|
1219
|
+
|
|
1220
|
+
def apply_math(left, op, right)
|
|
1221
|
+
l = (left || 0).to_f
|
|
1222
|
+
r = (right || 0).to_f
|
|
1223
|
+
# Preserve int type when both operands are int-like (except for / which returns float)
|
|
1224
|
+
both_int = l == l.to_i && r == r.to_i && op != "/"
|
|
1225
|
+
result = case op
|
|
1226
|
+
when "+" then l + r
|
|
1227
|
+
when "-" then l - r
|
|
1228
|
+
when "*" then l * r
|
|
1229
|
+
when "/" then r != 0 ? l / r : 0
|
|
1230
|
+
when "//" then r != 0 ? (l / r).floor : 0
|
|
1231
|
+
when "%" then r != 0 ? l % r : 0
|
|
1232
|
+
when "**" then l ** r
|
|
1233
|
+
else 0
|
|
1234
|
+
end
|
|
1235
|
+
both_int && result == result.to_i ? result.to_i : result.to_f == result.to_i ? result.to_i : result
|
|
1236
|
+
end
|
|
1237
|
+
|
|
1238
|
+
# -----------------------------------------------------------------------
|
|
1239
|
+
# Block handlers
|
|
1240
|
+
# -----------------------------------------------------------------------
|
|
1241
|
+
|
|
1242
|
+
# {% if %}...{% elseif %}...{% else %}...{% endif %}
|
|
1243
|
+
def handle_if(tokens, start, context)
|
|
1244
|
+
content, _, strip_a_open = strip_tag(tokens[start][1])
|
|
1245
|
+
condition_expr = content.sub(/\Aif\s+/, "").strip
|
|
1246
|
+
|
|
1247
|
+
branches = []
|
|
1248
|
+
current_tokens = []
|
|
1249
|
+
current_cond = condition_expr
|
|
1250
|
+
depth = 0
|
|
1251
|
+
i = start + 1
|
|
1252
|
+
|
|
1253
|
+
# If the opening {%- if -%} has strip_after, lstrip the first body text
|
|
1254
|
+
pending_lstrip = strip_a_open
|
|
1255
|
+
|
|
1256
|
+
while i < tokens.length
|
|
1257
|
+
ttype, raw = tokens[i]
|
|
1258
|
+
if ttype == BLOCK
|
|
1259
|
+
tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
|
|
1260
|
+
tag = tag_content.split[0] || ""
|
|
1261
|
+
|
|
1262
|
+
if tag == "if"
|
|
1263
|
+
depth += 1
|
|
1264
|
+
current_tokens << tokens[i]
|
|
1265
|
+
elsif tag == "endif" && depth > 0
|
|
1266
|
+
depth -= 1
|
|
1267
|
+
current_tokens << tokens[i]
|
|
1268
|
+
elsif tag == "endif" && depth == 0
|
|
1269
|
+
# Apply strip_before from endif to last body token
|
|
1270
|
+
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1271
|
+
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1272
|
+
end
|
|
1273
|
+
branches << [current_cond, current_tokens]
|
|
1274
|
+
i += 1
|
|
1275
|
+
break
|
|
1276
|
+
elsif (tag == "elseif" || tag == "elif") && depth == 0
|
|
1277
|
+
# Apply strip_before from elseif to last body token
|
|
1278
|
+
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1279
|
+
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1280
|
+
end
|
|
1281
|
+
branches << [current_cond, current_tokens]
|
|
1282
|
+
current_cond = tag_content.sub(/\A(?:elseif|elif)\s+/, "").strip
|
|
1283
|
+
current_tokens = []
|
|
1284
|
+
pending_lstrip = strip_a_tag
|
|
1285
|
+
elsif tag == "else" && depth == 0
|
|
1286
|
+
# Apply strip_before from else to last body token
|
|
1287
|
+
if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
|
|
1288
|
+
current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
|
|
1289
|
+
end
|
|
1290
|
+
branches << [current_cond, current_tokens]
|
|
1291
|
+
current_cond = nil
|
|
1292
|
+
current_tokens = []
|
|
1293
|
+
pending_lstrip = strip_a_tag
|
|
1294
|
+
else
|
|
1295
|
+
current_tokens << tokens[i]
|
|
1296
|
+
end
|
|
1297
|
+
else
|
|
1298
|
+
tok = tokens[i]
|
|
1299
|
+
if pending_lstrip && ttype == TEXT
|
|
1300
|
+
tok = [TEXT, tok[1].lstrip]
|
|
1301
|
+
pending_lstrip = false
|
|
1302
|
+
end
|
|
1303
|
+
current_tokens << tok
|
|
1304
|
+
end
|
|
1305
|
+
i += 1
|
|
1306
|
+
end
|
|
1307
|
+
|
|
1308
|
+
branches.each do |cond, branch_tokens|
|
|
1309
|
+
if cond.nil? || eval_comparison(cond, context, method(:eval_var_raw))
|
|
1310
|
+
return [render_tokens(branch_tokens.dup, context), i]
|
|
1311
|
+
end
|
|
1312
|
+
end
|
|
1313
|
+
|
|
1314
|
+
["", i]
|
|
1315
|
+
end
|
|
1316
|
+
|
|
1317
|
+
# {% for item in items %}...{% else %}...{% endfor %}
|
|
1318
|
+
def handle_for(tokens, start, context)
|
|
1319
|
+
content, _, strip_a_open = strip_tag(tokens[start][1])
|
|
1320
|
+
m = content.match(FOR_RE)
|
|
1321
|
+
return ["", start + 1] unless m
|
|
1322
|
+
|
|
1323
|
+
var1 = m[1]
|
|
1324
|
+
var2 = m[2]
|
|
1325
|
+
iterable_expr = m[3].strip
|
|
1326
|
+
|
|
1327
|
+
body_tokens = []
|
|
1328
|
+
else_tokens = []
|
|
1329
|
+
in_else = false
|
|
1330
|
+
for_depth = 0
|
|
1331
|
+
if_depth = 0
|
|
1332
|
+
i = start + 1
|
|
1333
|
+
pending_lstrip = strip_a_open
|
|
1334
|
+
|
|
1335
|
+
while i < tokens.length
|
|
1336
|
+
ttype, raw = tokens[i]
|
|
1337
|
+
if ttype == BLOCK
|
|
1338
|
+
tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
|
|
1339
|
+
tag = tag_content.split[0] || ""
|
|
1340
|
+
|
|
1341
|
+
if tag == "for"
|
|
1342
|
+
for_depth += 1
|
|
1343
|
+
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1344
|
+
elsif tag == "endfor" && for_depth > 0
|
|
1345
|
+
for_depth -= 1
|
|
1346
|
+
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1347
|
+
elsif tag == "endfor" && for_depth == 0
|
|
1348
|
+
target = in_else ? else_tokens : body_tokens
|
|
1349
|
+
if strip_b_tag && !target.empty? && target[-1][0] == TEXT
|
|
1350
|
+
target[-1] = [TEXT, target[-1][1].rstrip]
|
|
1351
|
+
end
|
|
1352
|
+
i += 1
|
|
1353
|
+
break
|
|
1354
|
+
elsif tag == "if"
|
|
1355
|
+
if_depth += 1
|
|
1356
|
+
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1357
|
+
elsif tag == "endif"
|
|
1358
|
+
if_depth -= 1
|
|
1359
|
+
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1360
|
+
elsif tag == "else" && for_depth == 0 && if_depth == 0
|
|
1361
|
+
if strip_b_tag && !body_tokens.empty? && body_tokens[-1][0] == TEXT
|
|
1362
|
+
body_tokens[-1] = [TEXT, body_tokens[-1][1].rstrip]
|
|
1363
|
+
end
|
|
1364
|
+
in_else = true
|
|
1365
|
+
pending_lstrip = strip_a_tag
|
|
1366
|
+
else
|
|
1367
|
+
(in_else ? else_tokens : body_tokens) << tokens[i]
|
|
1368
|
+
end
|
|
1369
|
+
else
|
|
1370
|
+
tok = tokens[i]
|
|
1371
|
+
if pending_lstrip && ttype == TEXT
|
|
1372
|
+
tok = [TEXT, tok[1].lstrip]
|
|
1373
|
+
pending_lstrip = false
|
|
1374
|
+
end
|
|
1375
|
+
(in_else ? else_tokens : body_tokens) << tok
|
|
1376
|
+
end
|
|
1377
|
+
i += 1
|
|
1378
|
+
end
|
|
1379
|
+
|
|
1380
|
+
iterable = eval_expr(iterable_expr, context)
|
|
1381
|
+
|
|
1382
|
+
if iterable.nil? || (iterable.respond_to?(:empty?) && iterable.empty?)
|
|
1383
|
+
if else_tokens.any?
|
|
1384
|
+
return [render_tokens(else_tokens.dup, context), i]
|
|
1385
|
+
end
|
|
1386
|
+
return ["", i]
|
|
1387
|
+
end
|
|
1388
|
+
|
|
1389
|
+
output = []
|
|
1390
|
+
items = iterable.is_a?(Hash) ? iterable.to_a : Array(iterable)
|
|
1391
|
+
total = items.length
|
|
1392
|
+
|
|
1393
|
+
items.each_with_index do |item, idx|
|
|
1394
|
+
loop_ctx = LoopContext.new(context)
|
|
1395
|
+
loop_ctx["loop"] = {
|
|
1396
|
+
"index" => idx + 1,
|
|
1397
|
+
"index0" => idx,
|
|
1398
|
+
"first" => idx == 0,
|
|
1399
|
+
"last" => idx == total - 1,
|
|
1400
|
+
"length" => total,
|
|
1401
|
+
"revindex" => total - idx,
|
|
1402
|
+
"revindex0" => total - idx - 1,
|
|
1403
|
+
"even" => ((idx + 1) % 2).zero?,
|
|
1404
|
+
"odd" => ((idx + 1) % 2) != 0,
|
|
1405
|
+
}
|
|
1406
|
+
|
|
1407
|
+
if iterable.is_a?(Hash)
|
|
1408
|
+
key, value = item
|
|
1409
|
+
if var2
|
|
1410
|
+
loop_ctx[var1] = key
|
|
1411
|
+
loop_ctx[var2] = value
|
|
1412
|
+
else
|
|
1413
|
+
loop_ctx[var1] = key
|
|
1414
|
+
end
|
|
1415
|
+
else
|
|
1416
|
+
if var2
|
|
1417
|
+
loop_ctx[var1] = idx
|
|
1418
|
+
loop_ctx[var2] = item
|
|
1419
|
+
else
|
|
1420
|
+
loop_ctx[var1] = item
|
|
1421
|
+
end
|
|
1422
|
+
end
|
|
1423
|
+
|
|
1424
|
+
output << render_tokens(body_tokens.dup, loop_ctx)
|
|
1425
|
+
end
|
|
1426
|
+
|
|
1427
|
+
[output.join, i]
|
|
1428
|
+
end
|
|
1429
|
+
|
|
1430
|
+
# {% set name = expr %}
|
|
1431
|
+
def handle_set(content, context)
|
|
1432
|
+
if content =~ SET_RE
|
|
1433
|
+
name = Regexp.last_match(1)
|
|
1434
|
+
expr = Regexp.last_match(2).strip
|
|
1435
|
+
context[name] = eval_var_raw(expr, context)
|
|
1436
|
+
end
|
|
1437
|
+
end
|
|
1438
|
+
|
|
1439
|
+
# {% include "file.html" %}
|
|
1440
|
+
def handle_include(content, context)
|
|
1441
|
+
ignore_missing = content.include?("ignore missing")
|
|
1442
|
+
content = content.gsub("ignore missing", "").strip
|
|
1443
|
+
|
|
1444
|
+
m = content.match(INCLUDE_RE)
|
|
1445
|
+
return "" unless m
|
|
1446
|
+
|
|
1447
|
+
filename = m[1]
|
|
1448
|
+
with_expr = m[2]
|
|
1449
|
+
|
|
1450
|
+
begin
|
|
1451
|
+
source = load_template(filename)
|
|
1452
|
+
rescue
|
|
1453
|
+
return "" if ignore_missing
|
|
1454
|
+
raise
|
|
1455
|
+
end
|
|
1456
|
+
|
|
1457
|
+
inc_context = context.dup
|
|
1458
|
+
if with_expr
|
|
1459
|
+
extra = eval_expr(with_expr, context)
|
|
1460
|
+
inc_context.merge!(stringify_keys(extra)) if extra.is_a?(Hash)
|
|
1461
|
+
end
|
|
1462
|
+
|
|
1463
|
+
execute(source, inc_context)
|
|
1464
|
+
end
|
|
1465
|
+
|
|
1466
|
+
# {% macro name(args) %}...{% endmacro %}
|
|
1467
|
+
def handle_macro(tokens, start, context)
|
|
1468
|
+
content, _, _ = strip_tag(tokens[start][1])
|
|
1469
|
+
m = content.match(MACRO_RE)
|
|
1470
|
+
unless m
|
|
1471
|
+
i = start + 1
|
|
1472
|
+
while i < tokens.length
|
|
1473
|
+
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1474
|
+
return i + 1
|
|
1475
|
+
end
|
|
1476
|
+
i += 1
|
|
1477
|
+
end
|
|
1478
|
+
return i
|
|
1479
|
+
end
|
|
1480
|
+
|
|
1481
|
+
macro_name = m[1]
|
|
1482
|
+
param_names = m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1483
|
+
|
|
1484
|
+
body_tokens = []
|
|
1485
|
+
i = start + 1
|
|
1486
|
+
while i < tokens.length
|
|
1487
|
+
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1488
|
+
i += 1
|
|
1489
|
+
break
|
|
1490
|
+
end
|
|
1491
|
+
body_tokens << tokens[i]
|
|
1492
|
+
i += 1
|
|
1493
|
+
end
|
|
1494
|
+
|
|
1495
|
+
engine = self
|
|
1496
|
+
captured_body = body_tokens.dup
|
|
1497
|
+
captured_context = context
|
|
1498
|
+
|
|
1499
|
+
context[macro_name] = lambda { |*args|
|
|
1500
|
+
macro_ctx = captured_context.dup
|
|
1501
|
+
param_names.each_with_index do |pname, pi|
|
|
1502
|
+
macro_ctx[pname] = pi < args.length ? args[pi] : nil
|
|
1503
|
+
end
|
|
1504
|
+
Tina4::SafeString.new(engine.send(:render_tokens, captured_body.dup, macro_ctx))
|
|
1505
|
+
}
|
|
1506
|
+
|
|
1507
|
+
i
|
|
1508
|
+
end
|
|
1509
|
+
|
|
1510
|
+
# {% from "file" import macro1, macro2 %}
|
|
1511
|
+
def handle_from_import(content, context)
|
|
1512
|
+
m = content.match(FROM_IMPORT_RE)
|
|
1513
|
+
return unless m
|
|
1514
|
+
|
|
1515
|
+
filename = m[1]
|
|
1516
|
+
names = m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1517
|
+
|
|
1518
|
+
source = load_template(filename)
|
|
1519
|
+
tokens = tokenize(source)
|
|
1520
|
+
|
|
1521
|
+
i = 0
|
|
1522
|
+
while i < tokens.length
|
|
1523
|
+
ttype, raw = tokens[i]
|
|
1524
|
+
if ttype == BLOCK
|
|
1525
|
+
tag_content, _, _ = strip_tag(raw)
|
|
1526
|
+
tag = (tag_content.split[0] || "")
|
|
1527
|
+
if tag == "macro"
|
|
1528
|
+
macro_m = tag_content.match(MACRO_RE)
|
|
1529
|
+
if macro_m && names.include?(macro_m[1])
|
|
1530
|
+
macro_name = macro_m[1]
|
|
1531
|
+
param_names = macro_m[2].split(",").map(&:strip).reject(&:empty?)
|
|
1532
|
+
|
|
1533
|
+
body_tokens = []
|
|
1534
|
+
i += 1
|
|
1535
|
+
while i < tokens.length
|
|
1536
|
+
if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
|
|
1537
|
+
i += 1
|
|
1538
|
+
break
|
|
1539
|
+
end
|
|
1540
|
+
body_tokens << tokens[i]
|
|
1541
|
+
i += 1
|
|
1542
|
+
end
|
|
1543
|
+
|
|
1544
|
+
context[macro_name] = _make_macro_fn(body_tokens.dup, param_names.dup, context.dup)
|
|
1545
|
+
next
|
|
1546
|
+
end
|
|
1547
|
+
end
|
|
1548
|
+
end
|
|
1549
|
+
i += 1
|
|
1550
|
+
end
|
|
1551
|
+
end
|
|
1552
|
+
|
|
1553
|
+
# Build an isolated lambda for a macro — avoids closure-in-loop variable sharing.
|
|
1554
|
+
def _make_macro_fn(body_tokens, param_names, ctx)
|
|
1555
|
+
engine = self
|
|
1556
|
+
lambda { |*args|
|
|
1557
|
+
macro_ctx = ctx.dup
|
|
1558
|
+
param_names.each_with_index do |pname, pi|
|
|
1559
|
+
macro_ctx[pname] = pi < args.length ? args[pi] : nil
|
|
1560
|
+
end
|
|
1561
|
+
Tina4::SafeString.new(engine.send(:render_tokens, body_tokens.dup, macro_ctx))
|
|
1562
|
+
}
|
|
1563
|
+
end
|
|
1564
|
+
|
|
1565
|
+
# {% cache "key" ttl %}...{% endcache %}
|
|
1566
|
+
def handle_cache(tokens, start, context)
|
|
1567
|
+
content, _, _ = strip_tag(tokens[start][1])
|
|
1568
|
+
m = content.match(CACHE_RE)
|
|
1569
|
+
cache_key = m ? m[1] : "default"
|
|
1570
|
+
ttl = m && m[2] ? m[2].to_i : 60
|
|
1571
|
+
|
|
1572
|
+
# Check cache
|
|
1573
|
+
cached = @fragment_cache[cache_key]
|
|
1574
|
+
if cached
|
|
1575
|
+
html_content, expires_at = cached
|
|
1576
|
+
if Time.now.to_f < expires_at
|
|
1577
|
+
# Skip to endcache
|
|
1578
|
+
i = start + 1
|
|
1579
|
+
depth = 0
|
|
1580
|
+
while i < tokens.length
|
|
1581
|
+
if tokens[i][0] == BLOCK
|
|
1582
|
+
tc, _, _ = strip_tag(tokens[i][1])
|
|
1583
|
+
tag = tc.split[0] || ""
|
|
1584
|
+
if tag == "cache"
|
|
1585
|
+
depth += 1
|
|
1586
|
+
elsif tag == "endcache"
|
|
1587
|
+
return [html_content, i + 1] if depth == 0
|
|
1588
|
+
depth -= 1
|
|
1589
|
+
end
|
|
1590
|
+
end
|
|
1591
|
+
i += 1
|
|
1592
|
+
end
|
|
1593
|
+
return [html_content, i]
|
|
1594
|
+
end
|
|
1595
|
+
end
|
|
1596
|
+
|
|
1597
|
+
body_tokens = []
|
|
1598
|
+
i = start + 1
|
|
1599
|
+
depth = 0
|
|
1600
|
+
while i < tokens.length
|
|
1601
|
+
if tokens[i][0] == BLOCK
|
|
1602
|
+
tc, _, _ = strip_tag(tokens[i][1])
|
|
1603
|
+
tag = tc.split[0] || ""
|
|
1604
|
+
if tag == "cache"
|
|
1605
|
+
depth += 1
|
|
1606
|
+
body_tokens << tokens[i]
|
|
1607
|
+
elsif tag == "endcache"
|
|
1608
|
+
if depth == 0
|
|
1609
|
+
i += 1
|
|
1610
|
+
break
|
|
1611
|
+
end
|
|
1612
|
+
depth -= 1
|
|
1613
|
+
body_tokens << tokens[i]
|
|
1614
|
+
else
|
|
1615
|
+
body_tokens << tokens[i]
|
|
1616
|
+
end
|
|
1617
|
+
else
|
|
1618
|
+
body_tokens << tokens[i]
|
|
1619
|
+
end
|
|
1620
|
+
i += 1
|
|
1621
|
+
end
|
|
1622
|
+
|
|
1623
|
+
rendered = render_tokens(body_tokens.dup, context)
|
|
1624
|
+
@fragment_cache[cache_key] = [rendered, Time.now.to_f + ttl]
|
|
1625
|
+
[rendered, i]
|
|
1626
|
+
end
|
|
1627
|
+
|
|
1628
|
+
def handle_spaceless(tokens, start, context)
|
|
1629
|
+
body_tokens = []
|
|
1630
|
+
i = start + 1
|
|
1631
|
+
depth = 0
|
|
1632
|
+
while i < tokens.length
|
|
1633
|
+
if tokens[i][0] == BLOCK
|
|
1634
|
+
tc, _, _ = strip_tag(tokens[i][1])
|
|
1635
|
+
tag = tc.split[0] || ""
|
|
1636
|
+
if tag == "spaceless"
|
|
1637
|
+
depth += 1
|
|
1638
|
+
body_tokens << tokens[i]
|
|
1639
|
+
elsif tag == "endspaceless"
|
|
1640
|
+
if depth == 0
|
|
1641
|
+
i += 1
|
|
1642
|
+
break
|
|
1643
|
+
end
|
|
1644
|
+
depth -= 1
|
|
1645
|
+
body_tokens << tokens[i]
|
|
1646
|
+
else
|
|
1647
|
+
body_tokens << tokens[i]
|
|
1648
|
+
end
|
|
1649
|
+
else
|
|
1650
|
+
body_tokens << tokens[i]
|
|
1651
|
+
end
|
|
1652
|
+
i += 1
|
|
1653
|
+
end
|
|
1654
|
+
|
|
1655
|
+
rendered = render_tokens(body_tokens.dup, context)
|
|
1656
|
+
rendered = rendered.gsub(SPACELESS_RE, "><")
|
|
1657
|
+
[rendered, i]
|
|
1658
|
+
end
|
|
1659
|
+
|
|
1660
|
+
def handle_autoescape(tokens, start, context)
|
|
1661
|
+
content, _, _ = strip_tag(tokens[start][1])
|
|
1662
|
+
mode_match = content.match(AUTOESCAPE_RE)
|
|
1663
|
+
auto_escape_on = !(mode_match && mode_match[1] == "false")
|
|
1664
|
+
|
|
1665
|
+
body_tokens = []
|
|
1666
|
+
i = start + 1
|
|
1667
|
+
depth = 0
|
|
1668
|
+
while i < tokens.length
|
|
1669
|
+
if tokens[i][0] == BLOCK
|
|
1670
|
+
tc, _, _ = strip_tag(tokens[i][1])
|
|
1671
|
+
tag = tc.split[0] || ""
|
|
1672
|
+
if tag == "autoescape"
|
|
1673
|
+
depth += 1
|
|
1674
|
+
body_tokens << tokens[i]
|
|
1675
|
+
elsif tag == "endautoescape"
|
|
1676
|
+
if depth == 0
|
|
1677
|
+
i += 1
|
|
1678
|
+
break
|
|
1679
|
+
end
|
|
1680
|
+
depth -= 1
|
|
1681
|
+
body_tokens << tokens[i]
|
|
1682
|
+
else
|
|
1683
|
+
body_tokens << tokens[i]
|
|
1684
|
+
end
|
|
1685
|
+
else
|
|
1686
|
+
body_tokens << tokens[i]
|
|
1687
|
+
end
|
|
1688
|
+
i += 1
|
|
1689
|
+
end
|
|
1690
|
+
|
|
1691
|
+
if !auto_escape_on
|
|
1692
|
+
old_auto_escape = @auto_escape
|
|
1693
|
+
@auto_escape = false
|
|
1694
|
+
rendered = render_tokens(body_tokens.dup, context)
|
|
1695
|
+
@auto_escape = old_auto_escape
|
|
1696
|
+
else
|
|
1697
|
+
rendered = render_tokens(body_tokens.dup, context)
|
|
1698
|
+
end
|
|
1699
|
+
|
|
1700
|
+
[rendered, i]
|
|
1701
|
+
end
|
|
1702
|
+
|
|
1703
|
+
# -----------------------------------------------------------------------
|
|
1704
|
+
# Helpers
|
|
1705
|
+
# -----------------------------------------------------------------------
|
|
1706
|
+
|
|
1707
|
+
def truthy?(val)
|
|
1708
|
+
return false if val.nil? || val == false || val == 0 || val == ""
|
|
1709
|
+
return false if val.respond_to?(:empty?) && val.empty?
|
|
1710
|
+
true
|
|
1711
|
+
end
|
|
1712
|
+
|
|
1713
|
+
def stringify_keys(hash)
|
|
1714
|
+
return {} unless hash.is_a?(Hash)
|
|
1715
|
+
hash.each_with_object({}) { |(k, v), h| h[k.to_s] = v }
|
|
1716
|
+
end
|
|
1717
|
+
|
|
1718
|
+
# -----------------------------------------------------------------------
|
|
1719
|
+
# Built-in filters (53 total)
|
|
1720
|
+
# -----------------------------------------------------------------------
|
|
1721
|
+
|
|
1722
|
+
def default_filters
|
|
1723
|
+
{
|
|
1724
|
+
# -- Text --
|
|
1725
|
+
"upper" => ->(v, *_a) { v.to_s.upcase },
|
|
1726
|
+
"lower" => ->(v, *_a) { v.to_s.downcase },
|
|
1727
|
+
"capitalize" => ->(v, *_a) { v.to_s.capitalize },
|
|
1728
|
+
"title" => ->(v, *_a) { v.to_s.split.map(&:capitalize).join(" ") },
|
|
1729
|
+
"trim" => ->(v, *_a) { v.to_s.strip },
|
|
1730
|
+
"ltrim" => ->(v, *_a) { v.to_s.lstrip },
|
|
1731
|
+
"rtrim" => ->(v, *_a) { v.to_s.rstrip },
|
|
1732
|
+
"replace" => ->(v, *a) {
|
|
1733
|
+
if a.length == 1 && a[0].is_a?(Hash)
|
|
1734
|
+
result = v.to_s
|
|
1735
|
+
a[0].each { |old, new_val| result = result.gsub(old.to_s, new_val.to_s) }
|
|
1736
|
+
result
|
|
1737
|
+
elsif a.length >= 2
|
|
1738
|
+
v.to_s.gsub(a[0].to_s, a[1].to_s)
|
|
1739
|
+
else
|
|
1740
|
+
v.to_s
|
|
1741
|
+
end
|
|
1742
|
+
},
|
|
1743
|
+
"striptags" => ->(v, *_a) { v.to_s.gsub(STRIPTAGS_RE, "") },
|
|
1744
|
+
|
|
1745
|
+
# -- Encoding --
|
|
1746
|
+
"escape" => ->(v, *_a) { Frond.escape_html(v.to_s) },
|
|
1747
|
+
"e" => ->(v, *_a) { Frond.escape_html(v.to_s) },
|
|
1748
|
+
"raw" => ->(v, *_a) { v },
|
|
1749
|
+
"safe" => ->(v, *_a) { v },
|
|
1750
|
+
"json_encode" => ->(v, *_a) { JSON.generate(v) rescue v.to_s },
|
|
1751
|
+
"json_decode" => ->(v, *_a) { v.is_a?(String) ? (JSON.parse(v) rescue v) : v },
|
|
1752
|
+
"base64_encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
|
|
1753
|
+
"base64encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
|
|
1754
|
+
"base64_decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
|
|
1755
|
+
"base64decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
|
|
1756
|
+
"data_uri" => ->(v, *_a) {
|
|
1757
|
+
if v.is_a?(Hash)
|
|
1758
|
+
ct = v[:type] || v["type"] || "application/octet-stream"
|
|
1759
|
+
raw = v[:content] || v["content"] || ""
|
|
1760
|
+
raw = raw.respond_to?(:read) ? raw.read : raw
|
|
1761
|
+
"data:#{ct};base64,#{Base64.strict_encode64(raw.to_s)}"
|
|
1762
|
+
else
|
|
1763
|
+
v.to_s
|
|
1764
|
+
end
|
|
1765
|
+
},
|
|
1766
|
+
"url_encode" => ->(v, *_a) { CGI.escape(v.to_s) },
|
|
1767
|
+
|
|
1768
|
+
# -- JSON / JS --
|
|
1769
|
+
"to_json" => ->(v, *a) {
|
|
1770
|
+
indent = a[0] ? a[0].to_i : nil
|
|
1771
|
+
json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
|
|
1772
|
+
# Escape <, >, & for safe HTML embedding
|
|
1773
|
+
Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
|
|
1774
|
+
},
|
|
1775
|
+
"tojson" => ->(v, *a) {
|
|
1776
|
+
indent = a[0] ? a[0].to_i : nil
|
|
1777
|
+
json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
|
|
1778
|
+
Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
|
|
1779
|
+
},
|
|
1780
|
+
"js_escape" => ->(v, *_a) {
|
|
1781
|
+
Tina4::SafeString.new(
|
|
1782
|
+
v.to_s.gsub("\\", "\\\\").gsub("'", "\\'").gsub('"', '\\"')
|
|
1783
|
+
.gsub("\n", "\\n").gsub("\r", "\\r").gsub("\t", "\\t")
|
|
1784
|
+
)
|
|
1785
|
+
},
|
|
1786
|
+
|
|
1787
|
+
# -- Hashing --
|
|
1788
|
+
"md5" => ->(v, *_a) { Digest::MD5.hexdigest(v.to_s) },
|
|
1789
|
+
"sha256" => ->(v, *_a) { Digest::SHA256.hexdigest(v.to_s) },
|
|
1790
|
+
|
|
1791
|
+
# -- Numbers --
|
|
1792
|
+
"abs" => ->(v, *_a) { v.is_a?(Numeric) ? v.abs : v.to_f.abs },
|
|
1793
|
+
"round" => ->(v, *a) { v.to_f.round(a[0] ? a[0].to_i : 0) },
|
|
1794
|
+
"int" => ->(v, *_a) { v.to_i },
|
|
1795
|
+
"float" => ->(v, *_a) { v.to_f },
|
|
1796
|
+
"number_format" => ->(v, *a) {
|
|
1797
|
+
decimals = a[0] ? a[0].to_i : 0
|
|
1798
|
+
formatted = format("%.#{decimals}f", v.to_f)
|
|
1799
|
+
# Add comma thousands separator
|
|
1800
|
+
parts = formatted.split(".")
|
|
1801
|
+
parts[0] = parts[0].gsub(THOUSANDS_RE, '\\1,')
|
|
1802
|
+
parts.join(".")
|
|
1803
|
+
},
|
|
1804
|
+
|
|
1805
|
+
# -- Date --
|
|
1806
|
+
"date" => ->(v, *a) {
|
|
1807
|
+
fmt = a[0] || "%Y-%m-%d"
|
|
1808
|
+
begin
|
|
1809
|
+
if v.is_a?(String)
|
|
1810
|
+
dt = DateTime.parse(v)
|
|
1811
|
+
dt.strftime(fmt)
|
|
1812
|
+
elsif v.respond_to?(:strftime)
|
|
1813
|
+
v.strftime(fmt)
|
|
1814
|
+
else
|
|
1815
|
+
v.to_s
|
|
1816
|
+
end
|
|
1817
|
+
rescue
|
|
1818
|
+
v.to_s
|
|
1819
|
+
end
|
|
1820
|
+
},
|
|
1821
|
+
|
|
1822
|
+
# -- Arrays --
|
|
1823
|
+
"length" => ->(v, *_a) { v.respond_to?(:length) ? v.length : v.to_s.length },
|
|
1824
|
+
"first" => ->(v, *_a) { v.respond_to?(:first) ? v.first : (v.to_s[0] rescue nil) },
|
|
1825
|
+
"last" => ->(v, *_a) { v.respond_to?(:last) ? v.last : (v.to_s[-1] rescue nil) },
|
|
1826
|
+
"reverse" => ->(v, *_a) { v.respond_to?(:reverse) ? v.reverse : v.to_s.reverse },
|
|
1827
|
+
"sort" => ->(v, *_a) { v.respond_to?(:sort) ? v.sort : v },
|
|
1828
|
+
"shuffle" => ->(v, *_a) { v.respond_to?(:shuffle) ? v.shuffle : v },
|
|
1829
|
+
"unique" => ->(v, *_a) { v.is_a?(Array) ? v.uniq : v },
|
|
1830
|
+
"join" => ->(v, *a) { v.respond_to?(:join) ? v.join(a[0] || ", ") : v.to_s },
|
|
1831
|
+
"split" => ->(v, *a) { v.to_s.split(a[0] || " ") },
|
|
1832
|
+
"slice" => ->(v, *a) {
|
|
1833
|
+
if a.length >= 2
|
|
1834
|
+
s = a[0].to_i
|
|
1835
|
+
e = a[1].to_i
|
|
1836
|
+
if v.is_a?(Array)
|
|
1837
|
+
v[s...e]
|
|
1838
|
+
else
|
|
1839
|
+
v.to_s[s...e]
|
|
1840
|
+
end
|
|
1841
|
+
else
|
|
1842
|
+
v
|
|
1843
|
+
end
|
|
1844
|
+
},
|
|
1845
|
+
"batch" => ->(v, *a) {
|
|
1846
|
+
if a[0] && v.respond_to?(:each_slice)
|
|
1847
|
+
v.each_slice(a[0].to_i).to_a
|
|
1848
|
+
else
|
|
1849
|
+
[v]
|
|
1850
|
+
end
|
|
1851
|
+
},
|
|
1852
|
+
"map" => ->(v, *a) {
|
|
1853
|
+
if a[0] && v.is_a?(Array)
|
|
1854
|
+
v.map { |item| item.is_a?(Hash) ? (item[a[0]] || item[a[0].to_sym]) : nil }
|
|
1855
|
+
else
|
|
1856
|
+
v
|
|
1857
|
+
end
|
|
1858
|
+
},
|
|
1859
|
+
"filter" => ->(v, *_a) { v.is_a?(Array) ? v.select { |item| item } : v },
|
|
1860
|
+
"column" => ->(v, *a) {
|
|
1861
|
+
if a[0] && v.is_a?(Array)
|
|
1862
|
+
v.map { |row| row.is_a?(Hash) ? (row[a[0]] || row[a[0].to_sym]) : nil }
|
|
1863
|
+
else
|
|
1864
|
+
v
|
|
1865
|
+
end
|
|
1866
|
+
},
|
|
1867
|
+
|
|
1868
|
+
# -- Dict --
|
|
1869
|
+
"keys" => ->(v, *_a) { v.respond_to?(:keys) ? v.keys : [] },
|
|
1870
|
+
"values" => ->(v, *_a) { v.respond_to?(:values) ? v.values : [v] },
|
|
1871
|
+
"merge" => ->(v, *a) {
|
|
1872
|
+
if v.respond_to?(:merge) && a[0].is_a?(Hash)
|
|
1873
|
+
v.merge(a[0])
|
|
1874
|
+
elsif v.is_a?(Array) && a[0].is_a?(Array)
|
|
1875
|
+
v + a[0]
|
|
1876
|
+
else
|
|
1877
|
+
v
|
|
1878
|
+
end
|
|
1879
|
+
},
|
|
1880
|
+
|
|
1881
|
+
# -- Utility --
|
|
1882
|
+
"default" => ->(v, *a) { (v.nil? || v.to_s.empty?) ? (a[0] || "") : v },
|
|
1883
|
+
# dump filter — gated on TINA4_DEBUG=true via Frond.render_dump.
|
|
1884
|
+
# Both the |dump filter and the dump() global delegate to the same
|
|
1885
|
+
# helper so they produce identical output and obey the same gating.
|
|
1886
|
+
"dump" => ->(v, *_a) { Frond.render_dump(v) },
|
|
1887
|
+
"string" => ->(v, *_a) { v.to_s },
|
|
1888
|
+
"truncate" => ->(v, *a) {
|
|
1889
|
+
len = a[0] ? a[0].to_i : 50
|
|
1890
|
+
str = v.to_s
|
|
1891
|
+
str.length > len ? str[0...len] + "..." : str
|
|
1892
|
+
},
|
|
1893
|
+
"wordwrap" => ->(v, *a) {
|
|
1894
|
+
width = a[0] ? a[0].to_i : 75
|
|
1895
|
+
words = v.to_s.split
|
|
1896
|
+
lines = []
|
|
1897
|
+
current = +""
|
|
1898
|
+
words.each do |word|
|
|
1899
|
+
if !current.empty? && current.length + 1 + word.length > width
|
|
1900
|
+
lines << current
|
|
1901
|
+
current = word
|
|
1902
|
+
else
|
|
1903
|
+
current = current.empty? ? word : "#{current} #{word}"
|
|
1904
|
+
end
|
|
1905
|
+
end
|
|
1906
|
+
lines << current unless current.empty?
|
|
1907
|
+
lines.join("\n")
|
|
1908
|
+
},
|
|
1909
|
+
"slug" => ->(v, *_a) { v.to_s.downcase.gsub(SLUG_CLEAN_RE, "-").gsub(SLUG_TRIM_RE, "") },
|
|
1910
|
+
"nl2br" => ->(v, *_a) { v.to_s.gsub("\n", "<br>\n") },
|
|
1911
|
+
"format" => ->(v, *a) {
|
|
1912
|
+
if a.any?
|
|
1913
|
+
v.to_s % a
|
|
1914
|
+
else
|
|
1915
|
+
v.to_s
|
|
1916
|
+
end
|
|
1917
|
+
},
|
|
1918
|
+
"form_token" => ->(_v, *_a) { Frond.generate_form_token(_v.to_s) },
|
|
1919
|
+
}
|
|
1920
|
+
end
|
|
1921
|
+
|
|
1922
|
+
# -----------------------------------------------------------------------
|
|
1923
|
+
# Built-in globals
|
|
1924
|
+
# -----------------------------------------------------------------------
|
|
1925
|
+
|
|
1926
|
+
def register_builtin_globals
|
|
1927
|
+
@globals["form_token"] = ->(descriptor = "") { Frond.generate_form_token(descriptor.to_s) }
|
|
1928
|
+
@globals["formTokenValue"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
|
|
1929
|
+
@globals["form_token_value"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
|
|
1930
|
+
|
|
1931
|
+
# Debug helper: {{ dump(x) }} — gated on TINA4_DEBUG=true.
|
|
1932
|
+
# Both this global and the |dump filter call Frond.render_dump which
|
|
1933
|
+
# returns an empty SafeString in production so dump never leaks state.
|
|
1934
|
+
@globals["dump"] = ->(value = nil) { Frond.render_dump(value) }
|
|
1935
|
+
end
|
|
1936
|
+
|
|
1937
|
+
# Render a value as a pre-formatted inspect() wrapped in <pre> tags.
|
|
1938
|
+
#
|
|
1939
|
+
# Gated on TINA4_DEBUG=true. In production (TINA4_DEBUG unset or false)
|
|
1940
|
+
# this returns an empty SafeString to avoid leaking internal state,
|
|
1941
|
+
# object shapes, or sensitive values into rendered HTML.
|
|
1942
|
+
#
|
|
1943
|
+
# Shared by the {{ value|dump }} filter and the {{ dump(value) }}
|
|
1944
|
+
# global function so both produce identical output and obey the same
|
|
1945
|
+
# gating.
|
|
1946
|
+
def self.render_dump(value)
|
|
1947
|
+
return SafeString.new("") unless ENV.fetch("TINA4_DEBUG", "").downcase == "true"
|
|
1948
|
+
|
|
1949
|
+
dumped = value.inspect
|
|
1950
|
+
escaped = dumped
|
|
1951
|
+
.gsub("&", "&")
|
|
1952
|
+
.gsub("<", "<")
|
|
1953
|
+
.gsub(">", ">")
|
|
1954
|
+
.gsub('"', """)
|
|
1955
|
+
SafeString.new("<pre>#{escaped}</pre>")
|
|
1956
|
+
end
|
|
1957
|
+
|
|
1958
|
+
# Generate a JWT form token and return a hidden input element.
|
|
1959
|
+
#
|
|
1960
|
+
# @param descriptor [String] Optional string to enrich the token payload.
|
|
1961
|
+
# - Empty: payload is {"type" => "form"}
|
|
1962
|
+
# - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
|
|
1963
|
+
# - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
|
|
1964
|
+
#
|
|
1965
|
+
# @return [String] <input type="hidden" name="formToken" value="TOKEN">
|
|
1966
|
+
# Session ID used by generate_form_token for CSRF session binding.
|
|
1967
|
+
# Set this before rendering templates to bind tokens to the current session.
|
|
1968
|
+
@form_token_session_id = ""
|
|
1969
|
+
|
|
1970
|
+
class << self
|
|
1971
|
+
attr_accessor :form_token_session_id
|
|
1972
|
+
|
|
1973
|
+
# Set the session ID used for CSRF form token binding.
|
|
1974
|
+
# Parity with Python/PHP/Node: Frond.set_form_token_session_id(id)
|
|
1975
|
+
#
|
|
1976
|
+
# @param session_id [String] The session ID to bind form tokens to
|
|
1977
|
+
def set_form_token_session_id(session_id)
|
|
1978
|
+
self.form_token_session_id = session_id
|
|
1979
|
+
end
|
|
1980
|
+
end
|
|
1981
|
+
|
|
1982
|
+
# Generate a raw JWT form token string.
|
|
1983
|
+
#
|
|
1984
|
+
# @param descriptor [String] Optional string to enrich the token payload.
|
|
1985
|
+
# - Empty: payload is {"type" => "form"}
|
|
1986
|
+
# - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
|
|
1987
|
+
# - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
|
|
1988
|
+
#
|
|
1989
|
+
# @return [String] The raw JWT token string.
|
|
1990
|
+
def self.generate_form_jwt(descriptor = "")
|
|
1991
|
+
require_relative "log"
|
|
1992
|
+
require_relative "auth"
|
|
1993
|
+
|
|
1994
|
+
payload = { "type" => "form", "nonce" => SecureRandom.hex(8) }
|
|
1995
|
+
if descriptor && !descriptor.empty?
|
|
1996
|
+
if descriptor.include?("|")
|
|
1997
|
+
parts = descriptor.split("|", 2)
|
|
1998
|
+
payload["context"] = parts[0]
|
|
1999
|
+
payload["ref"] = parts[1]
|
|
2000
|
+
else
|
|
2001
|
+
payload["context"] = descriptor
|
|
2002
|
+
end
|
|
2003
|
+
end
|
|
2004
|
+
|
|
2005
|
+
# Include session_id for CSRF session binding
|
|
2006
|
+
sid = form_token_session_id.to_s
|
|
2007
|
+
payload["session_id"] = sid unless sid.empty?
|
|
2008
|
+
|
|
2009
|
+
ttl_minutes = (ENV["TINA4_TOKEN_LIMIT"] || "60").to_i
|
|
2010
|
+
expires_in = ttl_minutes * 60
|
|
2011
|
+
Tina4::Auth.create_token(payload, expires_in: expires_in)
|
|
2012
|
+
end
|
|
2013
|
+
|
|
2014
|
+
def self.generate_form_token(descriptor = "")
|
|
2015
|
+
token = generate_form_jwt(descriptor)
|
|
2016
|
+
Tina4::SafeString.new(%(<input type="hidden" name="formToken" value="#{CGI.escapeHTML(token)}">))
|
|
2017
|
+
end
|
|
2018
|
+
|
|
2019
|
+
# Return just the raw JWT form token string (no <input> wrapper).
|
|
2020
|
+
# Registered as both formTokenValue and form_token_value template globals.
|
|
2021
|
+
def self.generate_form_token_value(descriptor = "")
|
|
2022
|
+
Tina4::SafeString.new(generate_form_jwt(descriptor))
|
|
2023
|
+
end
|
|
2024
|
+
end
|
|
2025
|
+
end
|