tina4ruby 3.11.15 → 3.11.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +80 -80
  3. data/LICENSE.txt +21 -21
  4. data/README.md +137 -137
  5. data/exe/tina4ruby +5 -5
  6. data/lib/tina4/ai.rb +696 -696
  7. data/lib/tina4/api.rb +189 -189
  8. data/lib/tina4/auth.rb +305 -305
  9. data/lib/tina4/auto_crud.rb +244 -244
  10. data/lib/tina4/cache.rb +154 -154
  11. data/lib/tina4/cli.rb +1449 -1449
  12. data/lib/tina4/constants.rb +46 -46
  13. data/lib/tina4/container.rb +74 -74
  14. data/lib/tina4/cors.rb +74 -74
  15. data/lib/tina4/crud.rb +692 -692
  16. data/lib/tina4/database/sqlite3_adapter.rb +165 -165
  17. data/lib/tina4/database.rb +625 -625
  18. data/lib/tina4/database_result.rb +208 -208
  19. data/lib/tina4/debug.rb +8 -8
  20. data/lib/tina4/dev.rb +14 -14
  21. data/lib/tina4/dev_admin.rb +1291 -935
  22. data/lib/tina4/dev_mailbox.rb +191 -191
  23. data/lib/tina4/drivers/firebird_driver.rb +124 -124
  24. data/lib/tina4/drivers/mongodb_driver.rb +561 -561
  25. data/lib/tina4/drivers/mssql_driver.rb +112 -112
  26. data/lib/tina4/drivers/mysql_driver.rb +90 -90
  27. data/lib/tina4/drivers/odbc_driver.rb +191 -191
  28. data/lib/tina4/drivers/postgres_driver.rb +116 -116
  29. data/lib/tina4/drivers/sqlite_driver.rb +122 -122
  30. data/lib/tina4/env.rb +95 -95
  31. data/lib/tina4/error_overlay.rb +252 -252
  32. data/lib/tina4/events.rb +109 -109
  33. data/lib/tina4/field_types.rb +154 -154
  34. data/lib/tina4/frond.rb +2087 -2025
  35. data/lib/tina4/gallery/auth/meta.json +1 -1
  36. data/lib/tina4/gallery/auth/src/routes/api/gallery_auth.rb +114 -114
  37. data/lib/tina4/gallery/database/meta.json +1 -1
  38. data/lib/tina4/gallery/database/src/routes/api/gallery_db.rb +43 -43
  39. data/lib/tina4/gallery/error-overlay/meta.json +1 -1
  40. data/lib/tina4/gallery/error-overlay/src/routes/api/gallery_crash.rb +17 -17
  41. data/lib/tina4/gallery/orm/meta.json +1 -1
  42. data/lib/tina4/gallery/orm/src/routes/api/gallery_products.rb +16 -16
  43. data/lib/tina4/gallery/queue/meta.json +1 -1
  44. data/lib/tina4/gallery/queue/src/routes/api/gallery_queue.rb +325 -325
  45. data/lib/tina4/gallery/rest-api/meta.json +1 -1
  46. data/lib/tina4/gallery/rest-api/src/routes/api/gallery_hello.rb +14 -14
  47. data/lib/tina4/gallery/templates/meta.json +1 -1
  48. data/lib/tina4/gallery/templates/src/routes/gallery_page.rb +12 -12
  49. data/lib/tina4/gallery/templates/src/templates/gallery_page.twig +257 -257
  50. data/lib/tina4/graphql.rb +966 -966
  51. data/lib/tina4/health.rb +39 -39
  52. data/lib/tina4/html_element.rb +170 -170
  53. data/lib/tina4/job.rb +80 -80
  54. data/lib/tina4/localization.rb +168 -168
  55. data/lib/tina4/log.rb +203 -203
  56. data/lib/tina4/mcp.rb +871 -696
  57. data/lib/tina4/messenger.rb +587 -587
  58. data/lib/tina4/metrics.rb +793 -793
  59. data/lib/tina4/middleware.rb +445 -445
  60. data/lib/tina4/migration.rb +451 -451
  61. data/lib/tina4/orm.rb +790 -790
  62. data/lib/tina4/plan.rb +471 -0
  63. data/lib/tina4/project_index.rb +366 -0
  64. data/lib/tina4/public/css/tina4.css +2463 -2463
  65. data/lib/tina4/public/css/tina4.min.css +1 -1
  66. data/lib/tina4/public/images/logo.svg +5 -5
  67. data/lib/tina4/public/js/frond.min.js +2 -2
  68. data/lib/tina4/public/js/tina4-dev-admin.js +1264 -565
  69. data/lib/tina4/public/js/tina4-dev-admin.min.js +1264 -480
  70. data/lib/tina4/public/js/tina4.min.js +92 -92
  71. data/lib/tina4/public/js/tina4js.min.js +48 -48
  72. data/lib/tina4/public/swagger/index.html +90 -90
  73. data/lib/tina4/public/swagger/oauth2-redirect.html +63 -63
  74. data/lib/tina4/query_builder.rb +380 -380
  75. data/lib/tina4/queue.rb +366 -366
  76. data/lib/tina4/queue_backends/kafka_backend.rb +80 -80
  77. data/lib/tina4/queue_backends/lite_backend.rb +298 -298
  78. data/lib/tina4/queue_backends/mongo_backend.rb +126 -126
  79. data/lib/tina4/queue_backends/rabbitmq_backend.rb +73 -73
  80. data/lib/tina4/rack_app.rb +817 -817
  81. data/lib/tina4/rate_limiter.rb +130 -130
  82. data/lib/tina4/request.rb +268 -268
  83. data/lib/tina4/response.rb +346 -346
  84. data/lib/tina4/response_cache.rb +551 -551
  85. data/lib/tina4/router.rb +406 -406
  86. data/lib/tina4/scss/tina4css/_alerts.scss +34 -34
  87. data/lib/tina4/scss/tina4css/_badges.scss +22 -22
  88. data/lib/tina4/scss/tina4css/_buttons.scss +69 -69
  89. data/lib/tina4/scss/tina4css/_cards.scss +49 -49
  90. data/lib/tina4/scss/tina4css/_forms.scss +156 -156
  91. data/lib/tina4/scss/tina4css/_grid.scss +81 -81
  92. data/lib/tina4/scss/tina4css/_modals.scss +84 -84
  93. data/lib/tina4/scss/tina4css/_nav.scss +149 -149
  94. data/lib/tina4/scss/tina4css/_reset.scss +94 -94
  95. data/lib/tina4/scss/tina4css/_tables.scss +54 -54
  96. data/lib/tina4/scss/tina4css/_typography.scss +55 -55
  97. data/lib/tina4/scss/tina4css/_utilities.scss +197 -197
  98. data/lib/tina4/scss/tina4css/_variables.scss +117 -117
  99. data/lib/tina4/scss/tina4css/base.scss +1 -1
  100. data/lib/tina4/scss/tina4css/colors.scss +48 -48
  101. data/lib/tina4/scss/tina4css/tina4.scss +17 -17
  102. data/lib/tina4/scss_compiler.rb +178 -178
  103. data/lib/tina4/seeder.rb +567 -567
  104. data/lib/tina4/service_runner.rb +303 -303
  105. data/lib/tina4/session.rb +297 -297
  106. data/lib/tina4/session_handlers/database_handler.rb +72 -72
  107. data/lib/tina4/session_handlers/file_handler.rb +67 -67
  108. data/lib/tina4/session_handlers/mongo_handler.rb +49 -49
  109. data/lib/tina4/session_handlers/redis_handler.rb +43 -43
  110. data/lib/tina4/session_handlers/valkey_handler.rb +43 -43
  111. data/lib/tina4/shutdown.rb +84 -84
  112. data/lib/tina4/sql_translation.rb +158 -158
  113. data/lib/tina4/swagger.rb +124 -124
  114. data/lib/tina4/template.rb +894 -894
  115. data/lib/tina4/templates/base.twig +26 -26
  116. data/lib/tina4/templates/errors/302.twig +14 -14
  117. data/lib/tina4/templates/errors/401.twig +9 -9
  118. data/lib/tina4/templates/errors/403.twig +29 -29
  119. data/lib/tina4/templates/errors/404.twig +29 -29
  120. data/lib/tina4/templates/errors/500.twig +38 -38
  121. data/lib/tina4/templates/errors/502.twig +9 -9
  122. data/lib/tina4/templates/errors/503.twig +12 -12
  123. data/lib/tina4/templates/errors/base.twig +37 -37
  124. data/lib/tina4/test_client.rb +159 -159
  125. data/lib/tina4/testing.rb +340 -340
  126. data/lib/tina4/validator.rb +174 -174
  127. data/lib/tina4/version.rb +1 -1
  128. data/lib/tina4/webserver.rb +312 -312
  129. data/lib/tina4/websocket.rb +343 -343
  130. data/lib/tina4/websocket_backplane.rb +190 -190
  131. data/lib/tina4/wsdl.rb +564 -564
  132. data/lib/tina4.rb +460 -458
  133. data/lib/tina4ruby.rb +4 -4
  134. metadata +5 -3
data/lib/tina4/frond.rb CHANGED
@@ -1,2025 +1,2087 @@
1
- # frozen_string_literal: true
2
-
3
- # Tina4 Frond Engine -- Lexer, parser, and runtime.
4
- # Zero-dependency twig-like template engine.
5
- # Supports: variables, filters, if/elseif/else/endif, for/else/endfor,
6
- # extends/block, include, macro, set, comments, whitespace control, tests,
7
- # fragment caching, sandboxing, auto-escaping, custom filters/tests/globals.
8
-
9
- require "json"
10
- require "digest"
11
- require "base64"
12
- require "cgi"
13
- require "uri"
14
- require "date"
15
- require "time"
16
- require "securerandom"
17
-
18
- module Tina4
19
- # Marker class for strings that should not be auto-escaped in Frond.
20
- class SafeString < String
21
- end
22
-
23
- class Frond
24
- # -- Token types ----------------------------------------------------------
25
- TEXT = :text
26
- VAR = :var # {{ ... }}
27
- BLOCK = :block # {% ... %}
28
- COMMENT = :comment # {# ... #}
29
-
30
- # Regex to split template source into tokens
31
- TOKEN_RE = /(\{%-?\s*.*?\s*-?%\})|(\{\{-?\s*.*?\s*-?\}\})|(\{#.*?#\})/m
32
-
33
- # HTML escape table
34
- HTML_ESCAPE_MAP = { "&" => "&amp;", "<" => "&lt;", ">" => "&gt;",
35
- '"' => "&quot;", "'" => "&#39;" }.freeze
36
- HTML_ESCAPE_RE = /[&<>"']/
37
-
38
- # -- Compiled regex constants (optimization: avoid re-compiling in methods) --
39
- EXTENDS_RE = /\{%-?\s*extends\s+["'](.+?)["']\s*-?%\}/
40
- BLOCK_RE = /\{%-?\s*block\s+(\w+)\s*-?%\}(.*?)\{%-?\s*endblock\s*-?%\}/m
41
- STRING_LIT_RE = /\A["'](.*)["']\z/
42
- INTEGER_RE = /\A-?\d+\z/
43
- FLOAT_RE = /\A-?\d+\.\d+\z/
44
- ARRAY_LIT_RE = /\A\[(.+)\]\z/m
45
- HASH_LIT_RE = /\A\{(.+)\}\z/m
46
- HASH_PAIR_RE = /\A\s*(?:["']([^"']+)["']|(\w+))\s*:\s*(.+)\z/
47
- RANGE_LIT_RE = /\A(\d+)\.\.(\d+)\z/
48
- ARITHMETIC_OPS = [" + ", " - ", " * ", " // ", " / ", " % ", " ** "].freeze
49
- FUNC_CALL_RE = /\A(\w+)\s*\((.*)\)\z/m
50
- FILTER_WITH_ARGS_RE = /\A(\w+)\s*\((.*)\)\z/m
51
- FILTER_CMP_RE = /\A(\w+)\s*(!=|==|>=|<=|>|<)\s*(.+)\z/
52
- OR_SPLIT_RE = /\s+or\s+/
53
- AND_SPLIT_RE = /\s+and\s+/
54
- IS_NOT_RE = /\A(.+?)\s+is\s+not\s+(\w+)(.*)\z/
55
- IS_RE = /\A(.+?)\s+is\s+(\w+)(.*)\z/
56
- NOT_IN_RE = /\A(.+?)\s+not\s+in\s+(.+)\z/
57
- IN_RE = /\A(.+?)\s+in\s+(.+)\z/
58
- DIVISIBLE_BY_RE = /\s*by\s*\(\s*(\d+)\s*\)/
59
- RESOLVE_SPLIT_RE = /\.|\[([^\]]+)\]/
60
- RESOLVE_STRIP_RE = /\A["']|["']\z/
61
- DIGIT_RE = /\A\d+\z/
62
- FOR_RE = /\Afor\s+(\w+)(?:\s*,\s*(\w+))?\s+in\s+(.+)\z/
63
- SET_RE = /\Aset\s+(\w+)\s*=\s*(.+)\z/m
64
- INCLUDE_RE = /\Ainclude\s+["'](.+?)["'](?:\s+with\s+(.+))?\z/
65
- MACRO_RE = /\Amacro\s+(\w+)\s*\(([^)]*)\)/
66
- FROM_IMPORT_RE = /\Afrom\s+["'](.+?)["']\s+import\s+(.+)/
67
- CACHE_RE = /\Acache\s+["'](.+?)["']\s*(\d+)?/
68
- SPACELESS_RE = />\s+</
69
- AUTOESCAPE_RE = /\Aautoescape\s+(false|true)/
70
- STRIPTAGS_RE = /<[^>]+>/
71
- THOUSANDS_RE = /(\d)(?=(\d{3})+(?!\d))/
72
- SLUG_CLEAN_RE = /[^a-z0-9]+/
73
- SLUG_TRIM_RE = /\A-|-\z/
74
-
75
- # Set of common no-arg filter names that can be inlined for speed
76
- INLINE_FILTERS = %w[upper lower length trim capitalize title string int escape e].each_with_object({}) { |f, h| h[f] = true }.freeze
77
-
78
- # -- Lazy context overlay for for-loops (avoids full Hash#dup) --
79
- class LoopContext
80
- def initialize(parent)
81
- @parent = parent
82
- @local = {}
83
- end
84
-
85
- def [](key)
86
- @local.key?(key) ? @local[key] : @parent[key]
87
- end
88
-
89
- def []=(key, value)
90
- @local[key] = value
91
- end
92
-
93
- def key?(key)
94
- @local.key?(key) || @parent.key?(key)
95
- end
96
- alias include? key?
97
- alias has_key? key?
98
-
99
- def fetch(key, *args, &block)
100
- if @local.key?(key)
101
- @local[key]
102
- elsif @parent.key?(key)
103
- @parent[key]
104
- elsif block
105
- yield key
106
- elsif !args.empty?
107
- args[0]
108
- else
109
- raise KeyError, "key not found: #{key.inspect}"
110
- end
111
- end
112
-
113
- def merge(other)
114
- dup_hash = to_h
115
- dup_hash.merge!(other)
116
- dup_hash
117
- end
118
-
119
- def merge!(other)
120
- other.each { |k, v| @local[k] = v }
121
- self
122
- end
123
-
124
- def dup
125
- copy = LoopContext.new(@parent)
126
- @local.each { |k, v| copy[k] = v }
127
- copy
128
- end
129
-
130
- def to_h
131
- h = @parent.is_a?(LoopContext) ? @parent.to_h : @parent.dup
132
- @local.each { |k, v| h[k] = v }
133
- h
134
- end
135
-
136
- def each(&block)
137
- to_h.each(&block)
138
- end
139
-
140
- def respond_to_missing?(name, include_private = false)
141
- @parent.respond_to?(name, include_private) || super
142
- end
143
-
144
- def is_a?(klass)
145
- klass == Hash || super
146
- end
147
-
148
- def keys
149
- (@parent.is_a?(LoopContext) ? @parent.keys : @parent.keys) | @local.keys
150
- end
151
- end
152
-
153
- # -----------------------------------------------------------------------
154
- # Public API
155
- # -----------------------------------------------------------------------
156
-
157
- attr_reader :template_dir
158
-
159
- def initialize(template_dir: "src/templates")
160
- @template_dir = template_dir
161
- @filters = default_filters
162
- @globals = {}
163
- @tests = default_tests
164
- @auto_escape = true
165
-
166
- # Sandboxing
167
- @sandbox = false
168
- @allowed_filters = nil
169
- @allowed_tags = nil
170
- @allowed_vars = nil
171
-
172
- # Fragment cache: key => [html, expires_at]
173
- @fragment_cache = {}
174
-
175
- # Token pre-compilation cache
176
- @compiled = {} # {template_name => [tokens, mtime]}
177
- @compiled_strings = {} # {md5_hash => tokens}
178
-
179
- # Parsed filter chain cache: expr_string => [variable, filters]
180
- @filter_chain_cache = {}
181
-
182
- # Resolved dotted-path split cache: expr_string => parts_array
183
- @resolve_cache = {}
184
-
185
- # Sandbox root-var split cache: var_name => root_var_string
186
- @dotted_split_cache = {}
187
-
188
- # Built-in global functions
189
- register_builtin_globals
190
- end
191
-
192
- # Render a template file with data. Uses token caching for performance.
193
- def render(template, data = {})
194
- context = @globals.merge(stringify_keys(data))
195
-
196
- path = File.join(@template_dir, template)
197
- raise "Template not found: #{path}" unless File.exist?(path)
198
-
199
- debug_mode = ENV.fetch("TINA4_DEBUG", "").downcase == "true"
200
-
201
- unless debug_mode
202
- # Production: use permanent cache (no filesystem checks)
203
- cached = @compiled[template]
204
- return execute_cached(cached[0], context) if cached
205
- end
206
- # Dev mode: skip cache entirely — always re-read and re-tokenize
207
- # so edits to partials and extended base templates are detected
208
-
209
- # Cache miss — load, tokenize, cache
210
- source = File.read(path, encoding: "utf-8")
211
- mtime = File.mtime(path)
212
- tokens = tokenize(source)
213
- @compiled[template] = [tokens, mtime]
214
- execute_with_tokens(source, tokens, context)
215
- end
216
-
217
- # Render a template string directly. Uses token caching for performance.
218
- def render_string(source, data = {})
219
- context = @globals.merge(stringify_keys(data))
220
-
221
- key = Digest::MD5.hexdigest(source)
222
- cached_tokens = @compiled_strings[key]
223
-
224
- if cached_tokens
225
- return execute_cached(cached_tokens, context)
226
- end
227
-
228
- tokens = tokenize(source)
229
- @compiled_strings[key] = tokens
230
- execute_cached(tokens, context)
231
- end
232
-
233
- # Clear all compiled template caches.
234
- def clear_cache
235
- @compiled.clear
236
- @compiled_strings.clear
237
- @filter_chain_cache.clear
238
- @resolve_cache.clear
239
- @dotted_split_cache.clear
240
- end
241
-
242
- # Register a custom filter.
243
- def add_filter(name, &blk)
244
- @filters[name.to_s] = blk
245
- end
246
-
247
- # Register a custom test.
248
- def add_test(name, &blk)
249
- @tests[name.to_s] = blk
250
- end
251
-
252
- # Register a global variable available in all templates.
253
- def add_global(name, value)
254
- @globals[name.to_s] = value
255
- end
256
-
257
- # Enable sandbox mode.
258
- def sandbox(filters: nil, tags: nil, vars: nil)
259
- @sandbox = true
260
- @allowed_filters = filters ? filters.map(&:to_s) : nil
261
- @allowed_tags = tags ? tags.map(&:to_s) : nil
262
- @allowed_vars = vars ? vars.map(&:to_s) : nil
263
- self
264
- end
265
-
266
- # Disable sandbox mode.
267
- def unsandbox
268
- @sandbox = false
269
- @allowed_filters = nil
270
- @allowed_tags = nil
271
- @allowed_vars = nil
272
- self
273
- end
274
-
275
- # Utility: HTML escape
276
- def self.escape_html(str)
277
- str.to_s.gsub(HTML_ESCAPE_RE, HTML_ESCAPE_MAP)
278
- end
279
-
280
- private
281
-
282
- # -----------------------------------------------------------------------
283
- # Tokenizer
284
- # -----------------------------------------------------------------------
285
-
286
- # Regex to extract {% raw %}...{% endraw %} blocks before tokenizing
287
- RAW_BLOCK_RE = /\{%-?\s*raw\s*-?%\}(.*?)\{%-?\s*endraw\s*-?%\}/m
288
-
289
- def tokenize(source)
290
- # 1. Extract raw blocks and replace with placeholders
291
- raw_blocks = []
292
- source = source.gsub(RAW_BLOCK_RE) do
293
- idx = raw_blocks.length
294
- raw_blocks << Regexp.last_match(1)
295
- "\x00RAW_#{idx}\x00"
296
- end
297
-
298
- # 2. Normal tokenization
299
- tokens = []
300
- pos = 0
301
- source.scan(TOKEN_RE) do
302
- m = Regexp.last_match
303
- start = m.begin(0)
304
- tokens << [TEXT, source[pos...start]] if start > pos
305
-
306
- raw = m[0]
307
- if raw.start_with?("{#")
308
- tokens << [COMMENT, raw]
309
- elsif raw.start_with?("{{")
310
- tokens << [VAR, raw]
311
- elsif raw.start_with?("{%")
312
- tokens << [BLOCK, raw]
313
- end
314
- pos = m.end(0)
315
- end
316
- tokens << [TEXT, source[pos..]] if pos < source.length
317
-
318
- # 3. Restore raw block placeholders as literal TEXT
319
- unless raw_blocks.empty?
320
- tokens = tokens.map do |ttype, value|
321
- if ttype == TEXT && value.include?("\x00RAW_")
322
- raw_blocks.each_with_index do |content, idx|
323
- value = value.gsub("\x00RAW_#{idx}\x00", content)
324
- end
325
- end
326
- [ttype, value]
327
- end
328
- end
329
-
330
- tokens
331
- end
332
-
333
- # Strip delimiters from a tag and detect whitespace control markers.
334
- # Returns [content, strip_before, strip_after].
335
- def strip_tag(raw)
336
- inner = raw[2..-3] # remove {{ }} or {% %} or {# #}
337
- strip_before = false
338
- strip_after = false
339
-
340
- if inner.start_with?("-")
341
- strip_before = true
342
- inner = inner[1..]
343
- end
344
- if inner.end_with?("-")
345
- strip_after = true
346
- inner = inner[0..-2]
347
- end
348
-
349
- [inner.strip, strip_before, strip_after]
350
- end
351
-
352
- # -----------------------------------------------------------------------
353
- # Template loading
354
- # -----------------------------------------------------------------------
355
-
356
- def load_template(name)
357
- path = File.join(@template_dir, name)
358
- raise "Template not found: #{path}" unless File.exist?(path)
359
-
360
- File.read(path, encoding: "utf-8")
361
- end
362
-
363
- # -----------------------------------------------------------------------
364
- # Execution
365
- # -----------------------------------------------------------------------
366
-
367
- def execute_cached(tokens, context)
368
- # Check if first non-text token is an extends block
369
- tokens.each do |ttype, raw|
370
- next if ttype == TEXT && raw.strip.empty?
371
- if ttype == BLOCK
372
- content, _, _ = strip_tag(raw)
373
- if content.start_with?("extends ")
374
- # Extends requires source-based execution for block extraction
375
- source = tokens.map { |_, v| v }.join
376
- return execute(source, context)
377
- end
378
- end
379
- break
380
- end
381
- render_tokens(tokens, context)
382
- end
383
-
384
- def execute_with_tokens(source, tokens, context)
385
- # Handle extends first
386
- if source =~ EXTENDS_RE
387
- parent_name = Regexp.last_match(1)
388
- parent_source = load_template(parent_name)
389
- child_blocks = extract_blocks(source)
390
- return render_with_blocks(parent_source, context, child_blocks)
391
- end
392
-
393
- render_tokens(tokens, context)
394
- end
395
-
396
- def execute(source, context)
397
- # Handle extends first
398
- if source =~ EXTENDS_RE
399
- parent_name = Regexp.last_match(1)
400
- parent_source = load_template(parent_name)
401
- child_blocks = extract_blocks(source)
402
- return render_with_blocks(parent_source, context, child_blocks)
403
- end
404
-
405
- render_tokens(tokenize(source), context)
406
- end
407
-
408
- def extract_blocks(source)
409
- blocks = {}
410
- source.scan(BLOCK_RE) do
411
- blocks[Regexp.last_match(1)] = Regexp.last_match(2)
412
- end
413
- blocks
414
- end
415
-
416
- def render_with_blocks(parent_source, context, child_blocks)
417
- engine = self
418
- result = parent_source.gsub(BLOCK_RE) do
419
- name = Regexp.last_match(1)
420
- parent_content = Regexp.last_match(2)
421
- block_source = child_blocks.fetch(name, parent_content)
422
-
423
- # Make parent() and super() available inside child blocks
424
- rendered_parent = nil
425
- get_parent = lambda do
426
- rendered_parent ||= Tina4::SafeString.new(
427
- engine.send(:render_tokens, tokenize(parent_content), context)
428
- )
429
- rendered_parent
430
- end
431
-
432
- block_ctx = context.merge("parent" => get_parent, "super" => get_parent)
433
- render_tokens(tokenize(block_source), block_ctx)
434
- end
435
- render_tokens(tokenize(result), context)
436
- end
437
-
438
- # -----------------------------------------------------------------------
439
- # Token renderer
440
- # -----------------------------------------------------------------------
441
-
442
- def render_tokens(tokens, context)
443
- output = []
444
- i = 0
445
-
446
- while i < tokens.length
447
- ttype, raw = tokens[i]
448
-
449
- case ttype
450
- when TEXT
451
- output << raw
452
- i += 1
453
-
454
- when COMMENT
455
- i += 1
456
-
457
- when VAR
458
- content, strip_b, strip_a = strip_tag(raw)
459
- output[-1] = output[-1].rstrip if strip_b && !output.empty?
460
-
461
- result = eval_var(content, context)
462
- output << (result.nil? ? "" : result.to_s)
463
-
464
- if strip_a && i + 1 < tokens.length && tokens[i + 1][0] == TEXT
465
- tokens[i + 1] = [TEXT, tokens[i + 1][1].lstrip]
466
- end
467
- i += 1
468
-
469
- when BLOCK
470
- content, strip_b, strip_a = strip_tag(raw)
471
- output[-1] = output[-1].rstrip if strip_b && !output.empty?
472
-
473
- tag = content.split[0] || ""
474
-
475
- case tag
476
- when "if"
477
- result, i = handle_if(tokens, i, context)
478
- output << result
479
- when "for"
480
- result, i = handle_for(tokens, i, context)
481
- output << result
482
- when "set"
483
- handle_set(content, context)
484
- i += 1
485
- when "include"
486
- if @sandbox && @allowed_tags && !@allowed_tags.include?("include")
487
- i += 1
488
- else
489
- output << handle_include(content, context)
490
- i += 1
491
- end
492
- when "macro"
493
- i = handle_macro(tokens, i, context)
494
- when "from"
495
- handle_from_import(content, context)
496
- i += 1
497
- when "cache"
498
- result, i = handle_cache(tokens, i, context)
499
- output << result
500
- when "spaceless"
501
- result, i = handle_spaceless(tokens, i, context)
502
- output << result
503
- when "autoescape"
504
- result, i = handle_autoescape(tokens, i, context)
505
- output << result
506
- when "block", "endblock", "extends"
507
- i += 1
508
- else
509
- i += 1
510
- end
511
-
512
- if strip_a && i < tokens.length && tokens[i][0] == TEXT
513
- tokens[i] = [TEXT, tokens[i][1].lstrip]
514
- end
515
- else
516
- i += 1
517
- end
518
- end
519
-
520
- output.join
521
- end
522
-
523
- # -----------------------------------------------------------------------
524
- # Variable evaluation
525
- # -----------------------------------------------------------------------
526
-
527
- def eval_var(expr, context)
528
- # Check for top-level ternary BEFORE splitting filters so that
529
- # expressions like ``products|length != 1 ? "s" : ""`` work correctly.
530
- ternary_pos = find_ternary(expr)
531
- if ternary_pos != -1
532
- cond_part = expr[0...ternary_pos].strip
533
- rest = expr[(ternary_pos + 1)..]
534
- colon_pos = find_colon(rest)
535
- if colon_pos != -1
536
- true_part = rest[0...colon_pos].strip
537
- false_part = rest[(colon_pos + 1)..].strip
538
- cond = eval_var_raw(cond_part, context)
539
- return truthy?(cond) ? eval_var(true_part, context) : eval_var(false_part, context)
540
- end
541
- end
542
-
543
- eval_var_inner(expr, context)
544
- end
545
-
546
- def eval_var_raw(expr, context)
547
- var_name, filters = parse_filter_chain(expr)
548
- value = eval_expr(var_name, context)
549
- filters.each do |fname, args|
550
- next if fname == "raw" || fname == "safe"
551
- fn = @filters[fname]
552
- if fn
553
- evaluated_args = args.map { |a| eval_filter_arg(a, context) }
554
- value = fn.call(value, *evaluated_args)
555
- else
556
- # The filter name may include a trailing comparison operator,
557
- # e.g. "length != 1". Extract the real filter name and the
558
- # comparison suffix, apply the filter, then evaluate the comparison.
559
- m = fname.match(FILTER_CMP_RE)
560
- if m
561
- real_filter = m[1]
562
- op = m[2]
563
- right_expr = m[3].strip
564
- fn2 = @filters[real_filter]
565
- if fn2
566
- evaluated_args = args.map { |a| eval_filter_arg(a, context) }
567
- value = fn2.call(value, *evaluated_args)
568
- end
569
- right = eval_expr(right_expr, context)
570
- value = case op
571
- when "!=" then value != right
572
- when "==" then value == right
573
- when ">=" then value >= right
574
- when "<=" then value <= right
575
- when ">" then value > right
576
- when "<" then value < right
577
- else false
578
- end rescue false
579
- else
580
- value = eval_expr(fname, context)
581
- end
582
- end
583
- end
584
- value
585
- end
586
-
587
- def eval_var_inner(expr, context)
588
- var_name, filters = parse_filter_chain(expr)
589
-
590
- # Sandbox: check variable access
591
- if @sandbox && @allowed_vars
592
- root_var = @dotted_split_cache[var_name]
593
- unless root_var
594
- root_var = var_name.split(".")[0].split("[")[0].strip
595
- @dotted_split_cache[var_name] = root_var
596
- end
597
- return "" if !root_var.empty? && !@allowed_vars.include?(root_var) && root_var != "loop"
598
- end
599
-
600
- value = eval_expr(var_name, context)
601
-
602
- is_safe = false
603
- filters.each do |fname, args|
604
- if fname == "raw" || fname == "safe"
605
- is_safe = true
606
- next
607
- end
608
-
609
- # Sandbox: check filter access
610
- if @sandbox && @allowed_filters && !@allowed_filters.include?(fname)
611
- next
612
- end
613
-
614
- # Inline common no-arg filters for speed (skip generic dispatch)
615
- if args.empty? && INLINE_FILTERS.include?(fname)
616
- value = case fname
617
- when "upper" then value.to_s.upcase
618
- when "lower" then value.to_s.downcase
619
- when "length" then value.respond_to?(:length) ? value.length : value.to_s.length
620
- when "trim" then value.to_s.strip
621
- when "capitalize" then value.to_s.capitalize
622
- when "title" then value.to_s.split.map(&:capitalize).join(" ")
623
- when "string" then value.to_s
624
- when "int" then value.to_i
625
- when "escape", "e" then Frond.escape_html(value.to_s)
626
- else value
627
- end
628
- next
629
- end
630
-
631
- fn = @filters[fname]
632
- if fn
633
- evaluated_args = args.map { |a| eval_filter_arg(a, context) }
634
- value = fn.call(value, *evaluated_args)
635
- end
636
- end
637
-
638
- # Auto-escape HTML unless marked safe or SafeString
639
- if @auto_escape && !is_safe && value.is_a?(String) && !value.is_a?(SafeString)
640
- value = Frond.escape_html(value)
641
- end
642
-
643
- value
644
- end
645
-
646
- def eval_filter_arg(arg, context)
647
- return Regexp.last_match(1) if arg =~ STRING_LIT_RE
648
- return arg.to_i if arg =~ INTEGER_RE
649
- return arg.to_f if arg =~ FLOAT_RE
650
- eval_expr(arg, context)
651
- end
652
-
653
- # Find the first occurrence of +needle+ that is not inside quotes or
654
- # parentheses. Returns the index, or -1 if not found.
655
- def find_outside_quotes(expr, needle)
656
- in_q = nil
657
- depth = 0
658
- bracket_depth = 0
659
- i = 0
660
- nlen = needle.length
661
- while i <= expr.length - nlen
662
- ch = expr[i]
663
- if (ch == '"' || ch == "'") && depth == 0
664
- if in_q.nil?
665
- in_q = ch
666
- elsif ch == in_q
667
- in_q = nil
668
- end
669
- i += 1
670
- next
671
- end
672
- if in_q
673
- i += 1
674
- next
675
- end
676
- if ch == "("
677
- depth += 1
678
- elsif ch == ")"
679
- depth -= 1
680
- elsif ch == "["
681
- bracket_depth += 1
682
- elsif ch == "]"
683
- bracket_depth -= 1
684
- end
685
- if depth == 0 && bracket_depth == 0 && expr[i, nlen] == needle
686
- return i
687
- end
688
- i += 1
689
- end
690
- -1
691
- end
692
-
693
- # Find the index of a top-level ``?`` that is part of a ternary operator.
694
- # Respects quoted strings, parentheses, and skips ``??`` (null coalesce).
695
- # Returns -1 if not found.
696
- def find_ternary(expr)
697
- depth = 0
698
- in_quote = nil
699
- i = 0
700
- len = expr.length
701
- while i < len
702
- ch = expr[i]
703
- if in_quote
704
- in_quote = nil if ch == in_quote
705
- i += 1
706
- next
707
- end
708
- if ch == '"' || ch == "'"
709
- in_quote = ch
710
- i += 1
711
- next
712
- end
713
- if ch == "("
714
- depth += 1
715
- elsif ch == ")"
716
- depth -= 1
717
- elsif ch == "?" && depth == 0
718
- # Skip ``??`` (null coalesce)
719
- if i + 1 < len && expr[i + 1] == "?"
720
- i += 2
721
- next
722
- end
723
- return i
724
- end
725
- i += 1
726
- end
727
- -1
728
- end
729
-
730
- # Find the index of the top-level ``:`` that separates the true/false
731
- # branches of a ternary. Respects quotes and parentheses.
732
- def find_colon(expr)
733
- depth = 0
734
- in_quote = nil
735
- expr.each_char.with_index do |ch, i|
736
- if in_quote
737
- in_quote = nil if ch == in_quote
738
- next
739
- end
740
- if ch == '"' || ch == "'"
741
- in_quote = ch
742
- next
743
- end
744
- if ch == "("
745
- depth += 1
746
- elsif ch == ")"
747
- depth -= 1
748
- elsif ch == ":" && depth == 0
749
- return i
750
- end
751
- end
752
- -1
753
- end
754
-
755
- # -----------------------------------------------------------------------
756
- # Filter chain parser
757
- # -----------------------------------------------------------------------
758
-
759
- def parse_filter_chain(expr)
760
- cached = @filter_chain_cache[expr]
761
- return cached if cached
762
-
763
- parts = split_on_pipe(expr)
764
- variable = parts[0].strip
765
- filters = []
766
-
767
- parts[1..].each do |f|
768
- f = f.strip
769
- if f =~ FILTER_WITH_ARGS_RE
770
- name = Regexp.last_match(1)
771
- raw_args = Regexp.last_match(2).strip
772
- args = raw_args.empty? ? [] : parse_args(raw_args)
773
- filters << [name, args]
774
- else
775
- filters << [f.strip, []]
776
- end
777
- end
778
-
779
- result = [variable, filters].freeze
780
- @filter_chain_cache[expr] = result
781
- result
782
- end
783
-
784
- # Split expression on | but not inside quotes or parens.
785
- def split_on_pipe(expr)
786
- parts = []
787
- current = +""
788
- in_quote = nil
789
- depth = 0
790
-
791
- expr.each_char do |ch|
792
- if in_quote
793
- current << ch
794
- in_quote = nil if ch == in_quote
795
- elsif ch == '"' || ch == "'"
796
- in_quote = ch
797
- current << ch
798
- elsif ch == "("
799
- depth += 1
800
- current << ch
801
- elsif ch == ")"
802
- depth -= 1
803
- current << ch
804
- elsif ch == "|" && depth == 0
805
- parts << current
806
- current = +""
807
- else
808
- current << ch
809
- end
810
- end
811
- parts << current unless current.empty?
812
- parts
813
- end
814
-
815
- def parse_args(raw)
816
- args = []
817
- current = +""
818
- in_quote = nil
819
- depth = 0
820
-
821
- raw.each_char do |ch|
822
- if in_quote
823
- if ch == in_quote
824
- in_quote = nil
825
- end
826
- current << ch
827
- elsif ch == '"' || ch == "'"
828
- in_quote = ch
829
- current << ch
830
- elsif ch == "(" || ch == "{" || ch == "["
831
- depth += 1
832
- current << ch
833
- elsif ch == ")" || ch == "}" || ch == "]"
834
- depth -= 1
835
- current << ch
836
- elsif ch == "," && depth == 0
837
- args << current.strip
838
- current = +""
839
- else
840
- current << ch
841
- end
842
- end
843
- args << current.strip unless current.strip.empty?
844
- args
845
- end
846
-
847
- # -----------------------------------------------------------------------
848
- # Expression evaluator
849
- # -----------------------------------------------------------------------
850
-
851
- # ── Expression evaluator (dispatcher) ──────────────────────────────
852
- # Each expression type is handled by a focused helper method.
853
- # Helpers return :not_matched when the expression doesn't match their
854
- # type, so the dispatcher falls through to the next handler.
855
-
856
- def eval_expr(expr, context)
857
- expr = expr.strip
858
- return nil if expr.empty?
859
-
860
- result = eval_literal(expr)
861
- return result unless result == :not_literal
862
-
863
- result = eval_collection_literal(expr, context)
864
- return result unless result == :not_collection
865
-
866
- return eval_expr(expr[1..-2], context) if matched_parens?(expr)
867
-
868
- result = eval_ternary(expr, context)
869
- return result unless result == :not_ternary
870
-
871
- result = eval_inline_if(expr, context)
872
- return result unless result == :not_inline_if
873
-
874
- result = eval_null_coalesce(expr, context)
875
- return result unless result == :not_coalesce
876
-
877
- result = eval_concat(expr, context)
878
- return result unless result == :not_concat
879
-
880
- return eval_comparison(expr, context) if has_comparison?(expr)
881
-
882
- result = eval_arithmetic(expr, context)
883
- return result unless result == :not_arithmetic
884
-
885
- result = eval_function_call(expr, context)
886
- return result unless result == :not_function
887
-
888
- resolve(expr, context)
889
- end
890
-
891
- # ── Literal values: strings, numbers, booleans, null ──
892
-
893
- def eval_literal(expr)
894
- if (expr.start_with?('"') && expr.end_with?('"')) ||
895
- (expr.start_with?("'") && expr.end_with?("'"))
896
- return expr[1..-2]
897
- end
898
- return expr.to_i if expr =~ INTEGER_RE
899
- return expr.to_f if expr =~ FLOAT_RE
900
- return true if expr == "true"
901
- return false if expr == "false"
902
- return nil if expr == "null" || expr == "none" || expr == "nil"
903
- :not_literal
904
- end
905
-
906
- # ── Collection literals: arrays, hashes, ranges ──
907
-
908
- def eval_collection_literal(expr, context)
909
- if expr =~ ARRAY_LIT_RE
910
- inner = Regexp.last_match(1)
911
- return split_args_toplevel(inner).map { |item| eval_expr(item.strip, context) }
912
- end
913
- if expr =~ HASH_LIT_RE
914
- inner = Regexp.last_match(1)
915
- hash = {}
916
- split_args_toplevel(inner).each do |pair|
917
- if pair =~ HASH_PAIR_RE
918
- key = Regexp.last_match(1) || Regexp.last_match(2)
919
- hash[key] = eval_expr(Regexp.last_match(3).strip, context)
920
- end
921
- end
922
- return hash
923
- end
924
- if expr =~ RANGE_LIT_RE
925
- return (Regexp.last_match(1).to_i..Regexp.last_match(2).to_i).to_a
926
- end
927
- :not_collection
928
- end
929
-
930
- # ── Parenthesized sub-expression check ──
931
-
932
- def matched_parens?(expr)
933
- return false unless expr.start_with?("(") && expr.end_with?(")")
934
- depth = 0
935
- expr.each_char.with_index do |ch, pi|
936
- depth += 1 if ch == "("
937
- depth -= 1 if ch == ")"
938
- return false if depth == 0 && pi < expr.length - 1
939
- end
940
- true
941
- end
942
-
943
- # ── Ternary: condition ? "yes" : "no" ──
944
-
945
- def eval_ternary(expr, context)
946
- q_pos = find_outside_quotes(expr, "?")
947
- return :not_ternary unless q_pos && q_pos > 0
948
- cond_part = expr[0...q_pos].strip
949
- rest = expr[(q_pos + 1)..]
950
- c_pos = find_outside_quotes(rest, ":")
951
- return :not_ternary unless c_pos && c_pos >= 0
952
- true_part = rest[0...c_pos].strip
953
- false_part = rest[(c_pos + 1)..].strip
954
- cond = eval_expr(cond_part, context)
955
- truthy?(cond) ? eval_expr(true_part, context) : eval_expr(false_part, context)
956
- end
957
-
958
- # ── Inline if: value if condition else other_value ──
959
-
960
- def eval_inline_if(expr, context)
961
- if_pos = find_outside_quotes(expr, " if ")
962
- return :not_inline_if unless if_pos && if_pos >= 0
963
- else_pos = find_outside_quotes(expr, " else ")
964
- return :not_inline_if unless else_pos && else_pos > if_pos
965
- value_part = expr[0...if_pos].strip
966
- cond_part = expr[(if_pos + 4)...else_pos].strip
967
- else_part = expr[(else_pos + 6)..].strip
968
- cond = eval_expr(cond_part, context)
969
- truthy?(cond) ? eval_expr(value_part, context) : eval_expr(else_part, context)
970
- end
971
-
972
- # ── Null coalescing: value ?? "default" ──
973
-
974
- def eval_null_coalesce(expr, context)
975
- return :not_coalesce unless expr.include?("??")
976
- left, _, right = expr.partition("??")
977
- val = eval_expr(left.strip, context)
978
- val.nil? ? eval_expr(right.strip, context) : val
979
- end
980
-
981
- # ── String concatenation: a ~ b ──
982
-
983
- def eval_concat(expr, context)
984
- return :not_concat unless expr.include?("~")
985
- parts = expr.split("~")
986
- parts.map { |p| (eval_expr(p.strip, context) || "").to_s }.join
987
- end
988
-
989
- # ── Arithmetic: +, -, *, //, /, %, ** ──
990
-
991
- def eval_arithmetic(expr, context)
992
- ARITHMETIC_OPS.each do |op|
993
- pos = find_outside_quotes(expr, op)
994
- next unless pos && pos >= 0
995
- l_val = eval_expr(expr[0...pos].strip, context)
996
- r_val = eval_expr(expr[(pos + op.length)..].strip, context)
997
- return apply_math(l_val, op.strip, r_val)
998
- end
999
- :not_arithmetic
1000
- end
1001
-
1002
- # ── Function call: name(arg1, arg2) ──
1003
-
1004
- def eval_function_call(expr, context)
1005
- return :not_function unless expr =~ FUNC_CALL_RE
1006
- fn_name = Regexp.last_match(1)
1007
- raw_args = Regexp.last_match(2).strip
1008
- fn = context[fn_name]
1009
- return :not_function unless fn.respond_to?(:call)
1010
- args = raw_args.empty? ? [] : split_args_toplevel(raw_args).map { |a| eval_expr(a.strip, context) }
1011
- fn.call(*args)
1012
- end
1013
-
1014
- def has_comparison?(expr)
1015
- [" not in ", " in ", " is not ", " is ", "!=", "==", ">=", "<=", ">", "<",
1016
- " and ", " or ", " not "].any? { |op| expr.include?(op) }
1017
- end
1018
-
1019
- # Split comma-separated args at top level (not inside quotes/parens/brackets).
1020
- def split_args_toplevel(str)
1021
- parts = []
1022
- current = +""
1023
- in_quote = nil
1024
- depth = 0
1025
-
1026
- str.each_char do |ch|
1027
- if in_quote
1028
- current << ch
1029
- in_quote = nil if ch == in_quote
1030
- elsif ch == '"' || ch == "'"
1031
- in_quote = ch
1032
- current << ch
1033
- elsif ch == "(" || ch == "[" || ch == "{"
1034
- depth += 1
1035
- current << ch
1036
- elsif ch == ")" || ch == "]" || ch == "}"
1037
- depth -= 1
1038
- current << ch
1039
- elsif ch == "," && depth == 0
1040
- parts << current.strip
1041
- current = +""
1042
- else
1043
- current << ch
1044
- end
1045
- end
1046
- parts << current.strip unless current.strip.empty?
1047
- parts
1048
- end
1049
-
1050
- # -----------------------------------------------------------------------
1051
- # Comparison / logical evaluator
1052
- # -----------------------------------------------------------------------
1053
-
1054
- def eval_comparison(expr, context, eval_fn = nil)
1055
- eval_fn ||= method(:eval_expr)
1056
- expr = expr.strip
1057
-
1058
- # Handle 'not' prefix
1059
- if expr.start_with?("not ")
1060
- return !eval_comparison(expr[4..], context, eval_fn)
1061
- end
1062
-
1063
- # 'or' (lowest precedence)
1064
- or_parts = expr.split(OR_SPLIT_RE)
1065
- if or_parts.length > 1
1066
- return or_parts.any? { |p| eval_comparison(p, context, eval_fn) }
1067
- end
1068
-
1069
- # 'and'
1070
- and_parts = expr.split(AND_SPLIT_RE)
1071
- if and_parts.length > 1
1072
- return and_parts.all? { |p| eval_comparison(p, context, eval_fn) }
1073
- end
1074
-
1075
- # 'is not' test
1076
- if expr =~ IS_NOT_RE
1077
- return !eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
1078
- Regexp.last_match(3).strip, context, eval_fn)
1079
- end
1080
-
1081
- # 'is' test
1082
- if expr =~ IS_RE
1083
- return eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
1084
- Regexp.last_match(3).strip, context, eval_fn)
1085
- end
1086
-
1087
- # 'not in'
1088
- if expr =~ NOT_IN_RE
1089
- val = eval_fn.call(Regexp.last_match(1).strip, context)
1090
- collection = eval_fn.call(Regexp.last_match(2).strip, context)
1091
- return !(collection.respond_to?(:include?) && collection.include?(val))
1092
- end
1093
-
1094
- # 'in'
1095
- if expr =~ IN_RE
1096
- val = eval_fn.call(Regexp.last_match(1).strip, context)
1097
- collection = eval_fn.call(Regexp.last_match(2).strip, context)
1098
- return collection.respond_to?(:include?) ? collection.include?(val) : false
1099
- end
1100
-
1101
- # Binary comparison operators
1102
- [["!=", ->(a, b) { a != b }],
1103
- ["==", ->(a, b) { a == b }],
1104
- [">=", ->(a, b) { a.to_f >= b.to_f }],
1105
- ["<=", ->(a, b) { a.to_f <= b.to_f }],
1106
- [">", ->(a, b) { a.to_f > b.to_f }],
1107
- ["<", ->(a, b) { a.to_f < b.to_f }]].each do |op, fn|
1108
- if expr.include?(op)
1109
- left, _, right = expr.partition(op)
1110
- l = eval_fn.call(left.strip, context)
1111
- r = eval_fn.call(right.strip, context)
1112
- begin
1113
- return fn.call(l, r)
1114
- rescue
1115
- return false
1116
- end
1117
- end
1118
- end
1119
-
1120
- # Fall through to simple eval
1121
- val = eval_fn.call(expr, context)
1122
- truthy?(val)
1123
- end
1124
-
1125
- # -----------------------------------------------------------------------
1126
- # Tests ('is' expressions)
1127
- # -----------------------------------------------------------------------
1128
-
1129
- def eval_test(value_expr, test_name, args_str, context, eval_fn = nil)
1130
- eval_fn ||= method(:eval_expr)
1131
- val = eval_fn.call(value_expr, context)
1132
-
1133
- # 'divisible by(n)'
1134
- if test_name == "divisible"
1135
- if args_str =~ DIVISIBLE_BY_RE
1136
- n = Regexp.last_match(1).to_i
1137
- return val.is_a?(Integer) && (val % n).zero?
1138
- end
1139
- return false
1140
- end
1141
-
1142
- # Check custom tests first
1143
- custom = @tests[test_name]
1144
- return custom.call(val) if custom
1145
-
1146
- false
1147
- end
1148
-
1149
- def default_tests
1150
- {
1151
- "defined" => ->(v) { !v.nil? },
1152
- "empty" => ->(v) { v.nil? || (v.respond_to?(:empty?) && v.empty?) || v == 0 || v == false },
1153
- "null" => ->(v) { v.nil? },
1154
- "none" => ->(v) { v.nil? },
1155
- "even" => ->(v) { v.is_a?(Integer) && v.even? },
1156
- "odd" => ->(v) { v.is_a?(Integer) && v.odd? },
1157
- "iterable" => ->(v) { v.respond_to?(:each) && !v.is_a?(String) },
1158
- "string" => ->(v) { v.is_a?(String) },
1159
- "number" => ->(v) { v.is_a?(Numeric) },
1160
- "boolean" => ->(v) { v.is_a?(TrueClass) || v.is_a?(FalseClass) },
1161
- }
1162
- end
1163
-
1164
- # -----------------------------------------------------------------------
1165
- # Variable resolver
1166
- # -----------------------------------------------------------------------
1167
-
1168
- def resolve(expr, context)
1169
- parts = @resolve_cache[expr]
1170
- unless parts
1171
- parts = expr.split(RESOLVE_SPLIT_RE).reject(&:empty?)
1172
- @resolve_cache[expr] = parts
1173
- end
1174
-
1175
- value = context
1176
-
1177
- parts.each do |part|
1178
- part = part.strip.gsub(RESOLVE_STRIP_RE, "") # strip quotes from bracket access
1179
- if value.is_a?(Hash) || value.is_a?(LoopContext)
1180
- value = value[part] || value[part.to_sym]
1181
- elsif value.is_a?(Array)
1182
- # Slice syntax: value[1:5], value[:10], value[start:end]
1183
- if part.include?(":") && !(part.start_with?('"') || part.start_with?("'"))
1184
- slice_parts = part.split(":", 2)
1185
- s_start = slice_parts[0].strip.empty? ? nil : eval_expr(slice_parts[0].strip, context).to_i
1186
- s_end = slice_parts[1].strip.empty? ? nil : eval_expr(slice_parts[1].strip, context).to_i
1187
- if s_start && s_end
1188
- value = value[s_start...s_end]
1189
- elsif s_start
1190
- value = value[s_start..]
1191
- elsif s_end
1192
- value = value[0...s_end]
1193
- else
1194
- value = value.dup
1195
- end
1196
- next
1197
- end
1198
- idx = if part =~ DIGIT_RE
1199
- part.to_i
1200
- else
1201
- eval_expr(part, context)
1202
- end
1203
- idx = idx.to_i if idx.is_a?(Numeric)
1204
- value = idx.is_a?(Integer) ? value[idx] : nil
1205
- elsif value.respond_to?(part.to_sym)
1206
- value = value.send(part.to_sym)
1207
- else
1208
- return nil
1209
- end
1210
- return nil if value.nil?
1211
- end
1212
-
1213
- value
1214
- end
1215
-
1216
- # -----------------------------------------------------------------------
1217
- # Math
1218
- # -----------------------------------------------------------------------
1219
-
1220
- def apply_math(left, op, right)
1221
- l = (left || 0).to_f
1222
- r = (right || 0).to_f
1223
- # Preserve int type when both operands are int-like (except for / which returns float)
1224
- both_int = l == l.to_i && r == r.to_i && op != "/"
1225
- result = case op
1226
- when "+" then l + r
1227
- when "-" then l - r
1228
- when "*" then l * r
1229
- when "/" then r != 0 ? l / r : 0
1230
- when "//" then r != 0 ? (l / r).floor : 0
1231
- when "%" then r != 0 ? l % r : 0
1232
- when "**" then l ** r
1233
- else 0
1234
- end
1235
- both_int && result == result.to_i ? result.to_i : result.to_f == result.to_i ? result.to_i : result
1236
- end
1237
-
1238
- # -----------------------------------------------------------------------
1239
- # Block handlers
1240
- # -----------------------------------------------------------------------
1241
-
1242
- # {% if %}...{% elseif %}...{% else %}...{% endif %}
1243
- def handle_if(tokens, start, context)
1244
- content, _, strip_a_open = strip_tag(tokens[start][1])
1245
- condition_expr = content.sub(/\Aif\s+/, "").strip
1246
-
1247
- branches = []
1248
- current_tokens = []
1249
- current_cond = condition_expr
1250
- depth = 0
1251
- i = start + 1
1252
-
1253
- # If the opening {%- if -%} has strip_after, lstrip the first body text
1254
- pending_lstrip = strip_a_open
1255
-
1256
- while i < tokens.length
1257
- ttype, raw = tokens[i]
1258
- if ttype == BLOCK
1259
- tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
1260
- tag = tag_content.split[0] || ""
1261
-
1262
- if tag == "if"
1263
- depth += 1
1264
- current_tokens << tokens[i]
1265
- elsif tag == "endif" && depth > 0
1266
- depth -= 1
1267
- current_tokens << tokens[i]
1268
- elsif tag == "endif" && depth == 0
1269
- # Apply strip_before from endif to last body token
1270
- if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1271
- current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1272
- end
1273
- branches << [current_cond, current_tokens]
1274
- i += 1
1275
- break
1276
- elsif (tag == "elseif" || tag == "elif") && depth == 0
1277
- # Apply strip_before from elseif to last body token
1278
- if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1279
- current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1280
- end
1281
- branches << [current_cond, current_tokens]
1282
- current_cond = tag_content.sub(/\A(?:elseif|elif)\s+/, "").strip
1283
- current_tokens = []
1284
- pending_lstrip = strip_a_tag
1285
- elsif tag == "else" && depth == 0
1286
- # Apply strip_before from else to last body token
1287
- if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1288
- current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1289
- end
1290
- branches << [current_cond, current_tokens]
1291
- current_cond = nil
1292
- current_tokens = []
1293
- pending_lstrip = strip_a_tag
1294
- else
1295
- current_tokens << tokens[i]
1296
- end
1297
- else
1298
- tok = tokens[i]
1299
- if pending_lstrip && ttype == TEXT
1300
- tok = [TEXT, tok[1].lstrip]
1301
- pending_lstrip = false
1302
- end
1303
- current_tokens << tok
1304
- end
1305
- i += 1
1306
- end
1307
-
1308
- branches.each do |cond, branch_tokens|
1309
- if cond.nil? || eval_comparison(cond, context, method(:eval_var_raw))
1310
- return [render_tokens(branch_tokens.dup, context), i]
1311
- end
1312
- end
1313
-
1314
- ["", i]
1315
- end
1316
-
1317
- # {% for item in items %}...{% else %}...{% endfor %}
1318
- def handle_for(tokens, start, context)
1319
- content, _, strip_a_open = strip_tag(tokens[start][1])
1320
- m = content.match(FOR_RE)
1321
- return ["", start + 1] unless m
1322
-
1323
- var1 = m[1]
1324
- var2 = m[2]
1325
- iterable_expr = m[3].strip
1326
-
1327
- body_tokens = []
1328
- else_tokens = []
1329
- in_else = false
1330
- for_depth = 0
1331
- if_depth = 0
1332
- i = start + 1
1333
- pending_lstrip = strip_a_open
1334
-
1335
- while i < tokens.length
1336
- ttype, raw = tokens[i]
1337
- if ttype == BLOCK
1338
- tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
1339
- tag = tag_content.split[0] || ""
1340
-
1341
- if tag == "for"
1342
- for_depth += 1
1343
- (in_else ? else_tokens : body_tokens) << tokens[i]
1344
- elsif tag == "endfor" && for_depth > 0
1345
- for_depth -= 1
1346
- (in_else ? else_tokens : body_tokens) << tokens[i]
1347
- elsif tag == "endfor" && for_depth == 0
1348
- target = in_else ? else_tokens : body_tokens
1349
- if strip_b_tag && !target.empty? && target[-1][0] == TEXT
1350
- target[-1] = [TEXT, target[-1][1].rstrip]
1351
- end
1352
- i += 1
1353
- break
1354
- elsif tag == "if"
1355
- if_depth += 1
1356
- (in_else ? else_tokens : body_tokens) << tokens[i]
1357
- elsif tag == "endif"
1358
- if_depth -= 1
1359
- (in_else ? else_tokens : body_tokens) << tokens[i]
1360
- elsif tag == "else" && for_depth == 0 && if_depth == 0
1361
- if strip_b_tag && !body_tokens.empty? && body_tokens[-1][0] == TEXT
1362
- body_tokens[-1] = [TEXT, body_tokens[-1][1].rstrip]
1363
- end
1364
- in_else = true
1365
- pending_lstrip = strip_a_tag
1366
- else
1367
- (in_else ? else_tokens : body_tokens) << tokens[i]
1368
- end
1369
- else
1370
- tok = tokens[i]
1371
- if pending_lstrip && ttype == TEXT
1372
- tok = [TEXT, tok[1].lstrip]
1373
- pending_lstrip = false
1374
- end
1375
- (in_else ? else_tokens : body_tokens) << tok
1376
- end
1377
- i += 1
1378
- end
1379
-
1380
- iterable = eval_expr(iterable_expr, context)
1381
-
1382
- if iterable.nil? || (iterable.respond_to?(:empty?) && iterable.empty?)
1383
- if else_tokens.any?
1384
- return [render_tokens(else_tokens.dup, context), i]
1385
- end
1386
- return ["", i]
1387
- end
1388
-
1389
- output = []
1390
- items = iterable.is_a?(Hash) ? iterable.to_a : Array(iterable)
1391
- total = items.length
1392
-
1393
- items.each_with_index do |item, idx|
1394
- loop_ctx = LoopContext.new(context)
1395
- loop_ctx["loop"] = {
1396
- "index" => idx + 1,
1397
- "index0" => idx,
1398
- "first" => idx == 0,
1399
- "last" => idx == total - 1,
1400
- "length" => total,
1401
- "revindex" => total - idx,
1402
- "revindex0" => total - idx - 1,
1403
- "even" => ((idx + 1) % 2).zero?,
1404
- "odd" => ((idx + 1) % 2) != 0,
1405
- }
1406
-
1407
- if iterable.is_a?(Hash)
1408
- key, value = item
1409
- if var2
1410
- loop_ctx[var1] = key
1411
- loop_ctx[var2] = value
1412
- else
1413
- loop_ctx[var1] = key
1414
- end
1415
- else
1416
- if var2
1417
- loop_ctx[var1] = idx
1418
- loop_ctx[var2] = item
1419
- else
1420
- loop_ctx[var1] = item
1421
- end
1422
- end
1423
-
1424
- output << render_tokens(body_tokens.dup, loop_ctx)
1425
- end
1426
-
1427
- [output.join, i]
1428
- end
1429
-
1430
- # {% set name = expr %}
1431
- def handle_set(content, context)
1432
- if content =~ SET_RE
1433
- name = Regexp.last_match(1)
1434
- expr = Regexp.last_match(2).strip
1435
- context[name] = eval_var_raw(expr, context)
1436
- end
1437
- end
1438
-
1439
- # {% include "file.html" %}
1440
- def handle_include(content, context)
1441
- ignore_missing = content.include?("ignore missing")
1442
- content = content.gsub("ignore missing", "").strip
1443
-
1444
- m = content.match(INCLUDE_RE)
1445
- return "" unless m
1446
-
1447
- filename = m[1]
1448
- with_expr = m[2]
1449
-
1450
- begin
1451
- source = load_template(filename)
1452
- rescue
1453
- return "" if ignore_missing
1454
- raise
1455
- end
1456
-
1457
- inc_context = context.dup
1458
- if with_expr
1459
- extra = eval_expr(with_expr, context)
1460
- inc_context.merge!(stringify_keys(extra)) if extra.is_a?(Hash)
1461
- end
1462
-
1463
- execute(source, inc_context)
1464
- end
1465
-
1466
- # {% macro name(args) %}...{% endmacro %}
1467
- def handle_macro(tokens, start, context)
1468
- content, _, _ = strip_tag(tokens[start][1])
1469
- m = content.match(MACRO_RE)
1470
- unless m
1471
- i = start + 1
1472
- while i < tokens.length
1473
- if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1474
- return i + 1
1475
- end
1476
- i += 1
1477
- end
1478
- return i
1479
- end
1480
-
1481
- macro_name = m[1]
1482
- param_names = m[2].split(",").map(&:strip).reject(&:empty?)
1483
-
1484
- body_tokens = []
1485
- i = start + 1
1486
- while i < tokens.length
1487
- if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1488
- i += 1
1489
- break
1490
- end
1491
- body_tokens << tokens[i]
1492
- i += 1
1493
- end
1494
-
1495
- engine = self
1496
- captured_body = body_tokens.dup
1497
- captured_context = context
1498
-
1499
- context[macro_name] = lambda { |*args|
1500
- macro_ctx = captured_context.dup
1501
- param_names.each_with_index do |pname, pi|
1502
- macro_ctx[pname] = pi < args.length ? args[pi] : nil
1503
- end
1504
- Tina4::SafeString.new(engine.send(:render_tokens, captured_body.dup, macro_ctx))
1505
- }
1506
-
1507
- i
1508
- end
1509
-
1510
- # {% from "file" import macro1, macro2 %}
1511
- def handle_from_import(content, context)
1512
- m = content.match(FROM_IMPORT_RE)
1513
- return unless m
1514
-
1515
- filename = m[1]
1516
- names = m[2].split(",").map(&:strip).reject(&:empty?)
1517
-
1518
- source = load_template(filename)
1519
- tokens = tokenize(source)
1520
-
1521
- i = 0
1522
- while i < tokens.length
1523
- ttype, raw = tokens[i]
1524
- if ttype == BLOCK
1525
- tag_content, _, _ = strip_tag(raw)
1526
- tag = (tag_content.split[0] || "")
1527
- if tag == "macro"
1528
- macro_m = tag_content.match(MACRO_RE)
1529
- if macro_m && names.include?(macro_m[1])
1530
- macro_name = macro_m[1]
1531
- param_names = macro_m[2].split(",").map(&:strip).reject(&:empty?)
1532
-
1533
- body_tokens = []
1534
- i += 1
1535
- while i < tokens.length
1536
- if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1537
- i += 1
1538
- break
1539
- end
1540
- body_tokens << tokens[i]
1541
- i += 1
1542
- end
1543
-
1544
- context[macro_name] = _make_macro_fn(body_tokens.dup, param_names.dup, context.dup)
1545
- next
1546
- end
1547
- end
1548
- end
1549
- i += 1
1550
- end
1551
- end
1552
-
1553
- # Build an isolated lambda for a macro — avoids closure-in-loop variable sharing.
1554
- def _make_macro_fn(body_tokens, param_names, ctx)
1555
- engine = self
1556
- lambda { |*args|
1557
- macro_ctx = ctx.dup
1558
- param_names.each_with_index do |pname, pi|
1559
- macro_ctx[pname] = pi < args.length ? args[pi] : nil
1560
- end
1561
- Tina4::SafeString.new(engine.send(:render_tokens, body_tokens.dup, macro_ctx))
1562
- }
1563
- end
1564
-
1565
- # {% cache "key" ttl %}...{% endcache %}
1566
- def handle_cache(tokens, start, context)
1567
- content, _, _ = strip_tag(tokens[start][1])
1568
- m = content.match(CACHE_RE)
1569
- cache_key = m ? m[1] : "default"
1570
- ttl = m && m[2] ? m[2].to_i : 60
1571
-
1572
- # Check cache
1573
- cached = @fragment_cache[cache_key]
1574
- if cached
1575
- html_content, expires_at = cached
1576
- if Time.now.to_f < expires_at
1577
- # Skip to endcache
1578
- i = start + 1
1579
- depth = 0
1580
- while i < tokens.length
1581
- if tokens[i][0] == BLOCK
1582
- tc, _, _ = strip_tag(tokens[i][1])
1583
- tag = tc.split[0] || ""
1584
- if tag == "cache"
1585
- depth += 1
1586
- elsif tag == "endcache"
1587
- return [html_content, i + 1] if depth == 0
1588
- depth -= 1
1589
- end
1590
- end
1591
- i += 1
1592
- end
1593
- return [html_content, i]
1594
- end
1595
- end
1596
-
1597
- body_tokens = []
1598
- i = start + 1
1599
- depth = 0
1600
- while i < tokens.length
1601
- if tokens[i][0] == BLOCK
1602
- tc, _, _ = strip_tag(tokens[i][1])
1603
- tag = tc.split[0] || ""
1604
- if tag == "cache"
1605
- depth += 1
1606
- body_tokens << tokens[i]
1607
- elsif tag == "endcache"
1608
- if depth == 0
1609
- i += 1
1610
- break
1611
- end
1612
- depth -= 1
1613
- body_tokens << tokens[i]
1614
- else
1615
- body_tokens << tokens[i]
1616
- end
1617
- else
1618
- body_tokens << tokens[i]
1619
- end
1620
- i += 1
1621
- end
1622
-
1623
- rendered = render_tokens(body_tokens.dup, context)
1624
- @fragment_cache[cache_key] = [rendered, Time.now.to_f + ttl]
1625
- [rendered, i]
1626
- end
1627
-
1628
- def handle_spaceless(tokens, start, context)
1629
- body_tokens = []
1630
- i = start + 1
1631
- depth = 0
1632
- while i < tokens.length
1633
- if tokens[i][0] == BLOCK
1634
- tc, _, _ = strip_tag(tokens[i][1])
1635
- tag = tc.split[0] || ""
1636
- if tag == "spaceless"
1637
- depth += 1
1638
- body_tokens << tokens[i]
1639
- elsif tag == "endspaceless"
1640
- if depth == 0
1641
- i += 1
1642
- break
1643
- end
1644
- depth -= 1
1645
- body_tokens << tokens[i]
1646
- else
1647
- body_tokens << tokens[i]
1648
- end
1649
- else
1650
- body_tokens << tokens[i]
1651
- end
1652
- i += 1
1653
- end
1654
-
1655
- rendered = render_tokens(body_tokens.dup, context)
1656
- rendered = rendered.gsub(SPACELESS_RE, "><")
1657
- [rendered, i]
1658
- end
1659
-
1660
- def handle_autoescape(tokens, start, context)
1661
- content, _, _ = strip_tag(tokens[start][1])
1662
- mode_match = content.match(AUTOESCAPE_RE)
1663
- auto_escape_on = !(mode_match && mode_match[1] == "false")
1664
-
1665
- body_tokens = []
1666
- i = start + 1
1667
- depth = 0
1668
- while i < tokens.length
1669
- if tokens[i][0] == BLOCK
1670
- tc, _, _ = strip_tag(tokens[i][1])
1671
- tag = tc.split[0] || ""
1672
- if tag == "autoescape"
1673
- depth += 1
1674
- body_tokens << tokens[i]
1675
- elsif tag == "endautoescape"
1676
- if depth == 0
1677
- i += 1
1678
- break
1679
- end
1680
- depth -= 1
1681
- body_tokens << tokens[i]
1682
- else
1683
- body_tokens << tokens[i]
1684
- end
1685
- else
1686
- body_tokens << tokens[i]
1687
- end
1688
- i += 1
1689
- end
1690
-
1691
- if !auto_escape_on
1692
- old_auto_escape = @auto_escape
1693
- @auto_escape = false
1694
- rendered = render_tokens(body_tokens.dup, context)
1695
- @auto_escape = old_auto_escape
1696
- else
1697
- rendered = render_tokens(body_tokens.dup, context)
1698
- end
1699
-
1700
- [rendered, i]
1701
- end
1702
-
1703
- # -----------------------------------------------------------------------
1704
- # Helpers
1705
- # -----------------------------------------------------------------------
1706
-
1707
- def truthy?(val)
1708
- return false if val.nil? || val == false || val == 0 || val == ""
1709
- return false if val.respond_to?(:empty?) && val.empty?
1710
- true
1711
- end
1712
-
1713
- def stringify_keys(hash)
1714
- return {} unless hash.is_a?(Hash)
1715
- hash.each_with_object({}) { |(k, v), h| h[k.to_s] = v }
1716
- end
1717
-
1718
- # -----------------------------------------------------------------------
1719
- # Built-in filters (53 total)
1720
- # -----------------------------------------------------------------------
1721
-
1722
- def default_filters
1723
- {
1724
- # -- Text --
1725
- "upper" => ->(v, *_a) { v.to_s.upcase },
1726
- "lower" => ->(v, *_a) { v.to_s.downcase },
1727
- "capitalize" => ->(v, *_a) { v.to_s.capitalize },
1728
- "title" => ->(v, *_a) { v.to_s.split.map(&:capitalize).join(" ") },
1729
- "trim" => ->(v, *_a) { v.to_s.strip },
1730
- "ltrim" => ->(v, *_a) { v.to_s.lstrip },
1731
- "rtrim" => ->(v, *_a) { v.to_s.rstrip },
1732
- "replace" => ->(v, *a) {
1733
- if a.length == 1 && a[0].is_a?(Hash)
1734
- result = v.to_s
1735
- a[0].each { |old, new_val| result = result.gsub(old.to_s, new_val.to_s) }
1736
- result
1737
- elsif a.length >= 2
1738
- v.to_s.gsub(a[0].to_s, a[1].to_s)
1739
- else
1740
- v.to_s
1741
- end
1742
- },
1743
- "striptags" => ->(v, *_a) { v.to_s.gsub(STRIPTAGS_RE, "") },
1744
-
1745
- # -- Encoding --
1746
- "escape" => ->(v, *_a) { Frond.escape_html(v.to_s) },
1747
- "e" => ->(v, *_a) { Frond.escape_html(v.to_s) },
1748
- "raw" => ->(v, *_a) { v },
1749
- "safe" => ->(v, *_a) { v },
1750
- "json_encode" => ->(v, *_a) { JSON.generate(v) rescue v.to_s },
1751
- "json_decode" => ->(v, *_a) { v.is_a?(String) ? (JSON.parse(v) rescue v) : v },
1752
- "base64_encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
1753
- "base64encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
1754
- "base64_decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
1755
- "base64decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
1756
- "data_uri" => ->(v, *_a) {
1757
- if v.is_a?(Hash)
1758
- ct = v[:type] || v["type"] || "application/octet-stream"
1759
- raw = v[:content] || v["content"] || ""
1760
- raw = raw.respond_to?(:read) ? raw.read : raw
1761
- "data:#{ct};base64,#{Base64.strict_encode64(raw.to_s)}"
1762
- else
1763
- v.to_s
1764
- end
1765
- },
1766
- "url_encode" => ->(v, *_a) { CGI.escape(v.to_s) },
1767
-
1768
- # -- JSON / JS --
1769
- "to_json" => ->(v, *a) {
1770
- indent = a[0] ? a[0].to_i : nil
1771
- json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
1772
- # Escape <, >, & for safe HTML embedding
1773
- Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
1774
- },
1775
- "tojson" => ->(v, *a) {
1776
- indent = a[0] ? a[0].to_i : nil
1777
- json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
1778
- Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
1779
- },
1780
- "js_escape" => ->(v, *_a) {
1781
- Tina4::SafeString.new(
1782
- v.to_s.gsub("\\", "\\\\").gsub("'", "\\'").gsub('"', '\\"')
1783
- .gsub("\n", "\\n").gsub("\r", "\\r").gsub("\t", "\\t")
1784
- )
1785
- },
1786
-
1787
- # -- Hashing --
1788
- "md5" => ->(v, *_a) { Digest::MD5.hexdigest(v.to_s) },
1789
- "sha256" => ->(v, *_a) { Digest::SHA256.hexdigest(v.to_s) },
1790
-
1791
- # -- Numbers --
1792
- "abs" => ->(v, *_a) { v.is_a?(Numeric) ? v.abs : v.to_f.abs },
1793
- "round" => ->(v, *a) { v.to_f.round(a[0] ? a[0].to_i : 0) },
1794
- "int" => ->(v, *_a) { v.to_i },
1795
- "float" => ->(v, *_a) { v.to_f },
1796
- "number_format" => ->(v, *a) {
1797
- decimals = a[0] ? a[0].to_i : 0
1798
- formatted = format("%.#{decimals}f", v.to_f)
1799
- # Add comma thousands separator
1800
- parts = formatted.split(".")
1801
- parts[0] = parts[0].gsub(THOUSANDS_RE, '\\1,')
1802
- parts.join(".")
1803
- },
1804
-
1805
- # -- Date --
1806
- "date" => ->(v, *a) {
1807
- fmt = a[0] || "%Y-%m-%d"
1808
- begin
1809
- if v.is_a?(String)
1810
- dt = DateTime.parse(v)
1811
- dt.strftime(fmt)
1812
- elsif v.respond_to?(:strftime)
1813
- v.strftime(fmt)
1814
- else
1815
- v.to_s
1816
- end
1817
- rescue
1818
- v.to_s
1819
- end
1820
- },
1821
-
1822
- # -- Arrays --
1823
- "length" => ->(v, *_a) { v.respond_to?(:length) ? v.length : v.to_s.length },
1824
- "first" => ->(v, *_a) { v.respond_to?(:first) ? v.first : (v.to_s[0] rescue nil) },
1825
- "last" => ->(v, *_a) { v.respond_to?(:last) ? v.last : (v.to_s[-1] rescue nil) },
1826
- "reverse" => ->(v, *_a) { v.respond_to?(:reverse) ? v.reverse : v.to_s.reverse },
1827
- "sort" => ->(v, *_a) { v.respond_to?(:sort) ? v.sort : v },
1828
- "shuffle" => ->(v, *_a) { v.respond_to?(:shuffle) ? v.shuffle : v },
1829
- "unique" => ->(v, *_a) { v.is_a?(Array) ? v.uniq : v },
1830
- "join" => ->(v, *a) { v.respond_to?(:join) ? v.join(a[0] || ", ") : v.to_s },
1831
- "split" => ->(v, *a) { v.to_s.split(a[0] || " ") },
1832
- "slice" => ->(v, *a) {
1833
- if a.length >= 2
1834
- s = a[0].to_i
1835
- e = a[1].to_i
1836
- if v.is_a?(Array)
1837
- v[s...e]
1838
- else
1839
- v.to_s[s...e]
1840
- end
1841
- else
1842
- v
1843
- end
1844
- },
1845
- "batch" => ->(v, *a) {
1846
- if a[0] && v.respond_to?(:each_slice)
1847
- v.each_slice(a[0].to_i).to_a
1848
- else
1849
- [v]
1850
- end
1851
- },
1852
- "map" => ->(v, *a) {
1853
- if a[0] && v.is_a?(Array)
1854
- v.map { |item| item.is_a?(Hash) ? (item[a[0]] || item[a[0].to_sym]) : nil }
1855
- else
1856
- v
1857
- end
1858
- },
1859
- "filter" => ->(v, *_a) { v.is_a?(Array) ? v.select { |item| item } : v },
1860
- "column" => ->(v, *a) {
1861
- if a[0] && v.is_a?(Array)
1862
- v.map { |row| row.is_a?(Hash) ? (row[a[0]] || row[a[0].to_sym]) : nil }
1863
- else
1864
- v
1865
- end
1866
- },
1867
-
1868
- # -- Dict --
1869
- "keys" => ->(v, *_a) { v.respond_to?(:keys) ? v.keys : [] },
1870
- "values" => ->(v, *_a) { v.respond_to?(:values) ? v.values : [v] },
1871
- "merge" => ->(v, *a) {
1872
- if v.respond_to?(:merge) && a[0].is_a?(Hash)
1873
- v.merge(a[0])
1874
- elsif v.is_a?(Array) && a[0].is_a?(Array)
1875
- v + a[0]
1876
- else
1877
- v
1878
- end
1879
- },
1880
-
1881
- # -- Utility --
1882
- "default" => ->(v, *a) { (v.nil? || v.to_s.empty?) ? (a[0] || "") : v },
1883
- # dump filter — gated on TINA4_DEBUG=true via Frond.render_dump.
1884
- # Both the |dump filter and the dump() global delegate to the same
1885
- # helper so they produce identical output and obey the same gating.
1886
- "dump" => ->(v, *_a) { Frond.render_dump(v) },
1887
- "string" => ->(v, *_a) { v.to_s },
1888
- "truncate" => ->(v, *a) {
1889
- len = a[0] ? a[0].to_i : 50
1890
- str = v.to_s
1891
- str.length > len ? str[0...len] + "..." : str
1892
- },
1893
- "wordwrap" => ->(v, *a) {
1894
- width = a[0] ? a[0].to_i : 75
1895
- words = v.to_s.split
1896
- lines = []
1897
- current = +""
1898
- words.each do |word|
1899
- if !current.empty? && current.length + 1 + word.length > width
1900
- lines << current
1901
- current = word
1902
- else
1903
- current = current.empty? ? word : "#{current} #{word}"
1904
- end
1905
- end
1906
- lines << current unless current.empty?
1907
- lines.join("\n")
1908
- },
1909
- "slug" => ->(v, *_a) { v.to_s.downcase.gsub(SLUG_CLEAN_RE, "-").gsub(SLUG_TRIM_RE, "") },
1910
- "nl2br" => ->(v, *_a) { v.to_s.gsub("\n", "<br>\n") },
1911
- "format" => ->(v, *a) {
1912
- if a.any?
1913
- v.to_s % a
1914
- else
1915
- v.to_s
1916
- end
1917
- },
1918
- "form_token" => ->(_v, *_a) { Frond.generate_form_token(_v.to_s) },
1919
- }
1920
- end
1921
-
1922
- # -----------------------------------------------------------------------
1923
- # Built-in globals
1924
- # -----------------------------------------------------------------------
1925
-
1926
- def register_builtin_globals
1927
- @globals["form_token"] = ->(descriptor = "") { Frond.generate_form_token(descriptor.to_s) }
1928
- @globals["formTokenValue"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
1929
- @globals["form_token_value"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
1930
-
1931
- # Debug helper: {{ dump(x) }} gated on TINA4_DEBUG=true.
1932
- # Both this global and the |dump filter call Frond.render_dump which
1933
- # returns an empty SafeString in production so dump never leaks state.
1934
- @globals["dump"] = ->(value = nil) { Frond.render_dump(value) }
1935
- end
1936
-
1937
- # Render a value as a pre-formatted inspect() wrapped in <pre> tags.
1938
- #
1939
- # Gated on TINA4_DEBUG=true. In production (TINA4_DEBUG unset or false)
1940
- # this returns an empty SafeString to avoid leaking internal state,
1941
- # object shapes, or sensitive values into rendered HTML.
1942
- #
1943
- # Shared by the {{ value|dump }} filter and the {{ dump(value) }}
1944
- # global function so both produce identical output and obey the same
1945
- # gating.
1946
- def self.render_dump(value)
1947
- return SafeString.new("") unless ENV.fetch("TINA4_DEBUG", "").downcase == "true"
1948
-
1949
- dumped = value.inspect
1950
- escaped = dumped
1951
- .gsub("&", "&amp;")
1952
- .gsub("<", "&lt;")
1953
- .gsub(">", "&gt;")
1954
- .gsub('"', "&quot;")
1955
- SafeString.new("<pre>#{escaped}</pre>")
1956
- end
1957
-
1958
- # Generate a JWT form token and return a hidden input element.
1959
- #
1960
- # @param descriptor [String] Optional string to enrich the token payload.
1961
- # - Empty: payload is {"type" => "form"}
1962
- # - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
1963
- # - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
1964
- #
1965
- # @return [String] <input type="hidden" name="formToken" value="TOKEN">
1966
- # Session ID used by generate_form_token for CSRF session binding.
1967
- # Set this before rendering templates to bind tokens to the current session.
1968
- @form_token_session_id = ""
1969
-
1970
- class << self
1971
- attr_accessor :form_token_session_id
1972
-
1973
- # Set the session ID used for CSRF form token binding.
1974
- # Parity with Python/PHP/Node: Frond.set_form_token_session_id(id)
1975
- #
1976
- # @param session_id [String] The session ID to bind form tokens to
1977
- def set_form_token_session_id(session_id)
1978
- self.form_token_session_id = session_id
1979
- end
1980
- end
1981
-
1982
- # Generate a raw JWT form token string.
1983
- #
1984
- # @param descriptor [String] Optional string to enrich the token payload.
1985
- # - Empty: payload is {"type" => "form"}
1986
- # - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
1987
- # - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
1988
- #
1989
- # @return [String] The raw JWT token string.
1990
- def self.generate_form_jwt(descriptor = "")
1991
- require_relative "log"
1992
- require_relative "auth"
1993
-
1994
- payload = { "type" => "form", "nonce" => SecureRandom.hex(8) }
1995
- if descriptor && !descriptor.empty?
1996
- if descriptor.include?("|")
1997
- parts = descriptor.split("|", 2)
1998
- payload["context"] = parts[0]
1999
- payload["ref"] = parts[1]
2000
- else
2001
- payload["context"] = descriptor
2002
- end
2003
- end
2004
-
2005
- # Include session_id for CSRF session binding
2006
- sid = form_token_session_id.to_s
2007
- payload["session_id"] = sid unless sid.empty?
2008
-
2009
- ttl_minutes = (ENV["TINA4_TOKEN_LIMIT"] || "60").to_i
2010
- expires_in = ttl_minutes * 60
2011
- Tina4::Auth.create_token(payload, expires_in: expires_in)
2012
- end
2013
-
2014
- def self.generate_form_token(descriptor = "")
2015
- token = generate_form_jwt(descriptor)
2016
- Tina4::SafeString.new(%(<input type="hidden" name="formToken" value="#{CGI.escapeHTML(token)}">))
2017
- end
2018
-
2019
- # Return just the raw JWT form token string (no <input> wrapper).
2020
- # Registered as both formTokenValue and form_token_value template globals.
2021
- def self.generate_form_token_value(descriptor = "")
2022
- Tina4::SafeString.new(generate_form_jwt(descriptor))
2023
- end
2024
- end
2025
- end
1
+ # frozen_string_literal: true
2
+
3
+ # Tina4 Frond Engine -- Lexer, parser, and runtime.
4
+ # Zero-dependency twig-like template engine.
5
+ # Supports: variables, filters, if/elseif/else/endif, for/else/endfor,
6
+ # extends/block, include, macro, set, comments, whitespace control, tests,
7
+ # fragment caching, sandboxing, auto-escaping, custom filters/tests/globals.
8
+
9
+ require "json"
10
+ require "digest"
11
+ require "base64"
12
+ require "cgi"
13
+ require "uri"
14
+ require "date"
15
+ require "time"
16
+ require "securerandom"
17
+
18
+ module Tina4
19
+ # Marker class for strings that should not be auto-escaped in Frond.
20
+ class SafeString < String
21
+ end
22
+
23
+ class Frond
24
+ # -- Token types ----------------------------------------------------------
25
+ TEXT = :text
26
+ VAR = :var # {{ ... }}
27
+ BLOCK = :block # {% ... %}
28
+ COMMENT = :comment # {# ... #}
29
+
30
+ # Regex to split template source into tokens
31
+ TOKEN_RE = /(\{%-?\s*.*?\s*-?%\})|(\{\{-?\s*.*?\s*-?\}\})|(\{#.*?#\})/m
32
+
33
+ # HTML escape table
34
+ HTML_ESCAPE_MAP = { "&" => "&amp;", "<" => "&lt;", ">" => "&gt;",
35
+ '"' => "&quot;", "'" => "&#39;" }.freeze
36
+ HTML_ESCAPE_RE = /[&<>"']/
37
+
38
+ # -- Compiled regex constants (optimization: avoid re-compiling in methods) --
39
+ EXTENDS_RE = /\{%-?\s*extends\s+["'](.+?)["']\s*-?%\}/
40
+ BLOCK_RE = /\{%-?\s*block\s+(\w+)\s*-?%\}(.*?)\{%-?\s*endblock\s*-?%\}/m
41
+ STRING_LIT_RE = /\A["'](.*)["']\z/
42
+ INTEGER_RE = /\A-?\d+\z/
43
+ FLOAT_RE = /\A-?\d+\.\d+\z/
44
+ ARRAY_LIT_RE = /\A\[(.+)\]\z/m
45
+ HASH_LIT_RE = /\A\{(.+)\}\z/m
46
+ HASH_PAIR_RE = /\A\s*(?:["']([^"']+)["']|(\w+))\s*:\s*(.+)\z/
47
+ RANGE_LIT_RE = /\A(\d+)\.\.(\d+)\z/
48
+ ARITHMETIC_OPS = [" + ", " - ", " * ", " // ", " / ", " % ", " ** "].freeze
49
+ FUNC_CALL_RE = /\A(\w+)\s*\((.*)\)\z/m
50
+ FILTER_WITH_ARGS_RE = /\A(\w+)\s*\((.*)\)\z/m
51
+ FILTER_CMP_RE = /\A(\w+)\s*(!=|==|>=|<=|>|<)\s*(.+)\z/
52
+ OR_SPLIT_RE = /\s+or\s+/
53
+ AND_SPLIT_RE = /\s+and\s+/
54
+ IS_NOT_RE = /\A(.+?)\s+is\s+not\s+(\w+)(.*)\z/
55
+ IS_RE = /\A(.+?)\s+is\s+(\w+)(.*)\z/
56
+ NOT_IN_RE = /\A(.+?)\s+not\s+in\s+(.+)\z/
57
+ IN_RE = /\A(.+?)\s+in\s+(.+)\z/
58
+ DIVISIBLE_BY_RE = /\s*by\s*\(\s*(\d+)\s*\)/
59
+ RESOLVE_SPLIT_RE = /\.|\[([^\]]+)\]/
60
+ RESOLVE_STRIP_RE = /\A["']|["']\z/
61
+ DIGIT_RE = /\A\d+\z/
62
+ FOR_RE = /\Afor\s+(\w+)(?:\s*,\s*(\w+))?\s+in\s+(.+)\z/
63
+ SET_RE = /\Aset\s+(\w+)\s*=\s*(.+)\z/m
64
+ INCLUDE_RE = /\Ainclude\s+["'](.+?)["'](?:\s+with\s+(.+))?\z/
65
+ MACRO_RE = /\Amacro\s+(\w+)\s*\(([^)]*)\)/
66
+ FROM_IMPORT_RE = /\Afrom\s+["'](.+?)["']\s+import\s+(.+)/
67
+ CACHE_RE = /\Acache\s+["'](.+?)["']\s*(\d+)?/
68
+ SPACELESS_RE = />\s+</
69
+ AUTOESCAPE_RE = /\Aautoescape\s+(false|true)/
70
+ STRIPTAGS_RE = /<[^>]+>/
71
+ THOUSANDS_RE = /(\d)(?=(\d{3})+(?!\d))/
72
+ SLUG_CLEAN_RE = /[^a-z0-9]+/
73
+ SLUG_TRIM_RE = /\A-|-\z/
74
+
75
+ # Set of common no-arg filter names that can be inlined for speed
76
+ INLINE_FILTERS = %w[upper lower length trim capitalize title string int escape e].each_with_object({}) { |f, h| h[f] = true }.freeze
77
+
78
+ # -- Lazy context overlay for for-loops (avoids full Hash#dup) --
79
+ class LoopContext
80
+ def initialize(parent)
81
+ @parent = parent
82
+ @local = {}
83
+ end
84
+
85
+ def [](key)
86
+ @local.key?(key) ? @local[key] : @parent[key]
87
+ end
88
+
89
+ def []=(key, value)
90
+ @local[key] = value
91
+ end
92
+
93
+ def key?(key)
94
+ @local.key?(key) || @parent.key?(key)
95
+ end
96
+ alias include? key?
97
+ alias has_key? key?
98
+
99
+ def fetch(key, *args, &block)
100
+ if @local.key?(key)
101
+ @local[key]
102
+ elsif @parent.key?(key)
103
+ @parent[key]
104
+ elsif block
105
+ yield key
106
+ elsif !args.empty?
107
+ args[0]
108
+ else
109
+ raise KeyError, "key not found: #{key.inspect}"
110
+ end
111
+ end
112
+
113
+ def merge(other)
114
+ dup_hash = to_h
115
+ dup_hash.merge!(other)
116
+ dup_hash
117
+ end
118
+
119
+ def merge!(other)
120
+ other.each { |k, v| @local[k] = v }
121
+ self
122
+ end
123
+
124
+ def dup
125
+ copy = LoopContext.new(@parent)
126
+ @local.each { |k, v| copy[k] = v }
127
+ copy
128
+ end
129
+
130
+ def to_h
131
+ h = @parent.is_a?(LoopContext) ? @parent.to_h : @parent.dup
132
+ @local.each { |k, v| h[k] = v }
133
+ h
134
+ end
135
+
136
+ def each(&block)
137
+ to_h.each(&block)
138
+ end
139
+
140
+ def respond_to_missing?(name, include_private = false)
141
+ @parent.respond_to?(name, include_private) || super
142
+ end
143
+
144
+ def is_a?(klass)
145
+ klass == Hash || super
146
+ end
147
+
148
+ def keys
149
+ (@parent.is_a?(LoopContext) ? @parent.keys : @parent.keys) | @local.keys
150
+ end
151
+ end
152
+
153
+ # -----------------------------------------------------------------------
154
+ # Public API
155
+ # -----------------------------------------------------------------------
156
+
157
+ attr_reader :template_dir
158
+
159
+ def initialize(template_dir: "src/templates")
160
+ @template_dir = template_dir
161
+ @filters = default_filters
162
+ @globals = {}
163
+ @tests = default_tests
164
+ @auto_escape = true
165
+
166
+ # Sandboxing
167
+ @sandbox = false
168
+ @allowed_filters = nil
169
+ @allowed_tags = nil
170
+ @allowed_vars = nil
171
+
172
+ # Fragment cache: key => [html, expires_at]
173
+ @fragment_cache = {}
174
+
175
+ # Token pre-compilation cache
176
+ @compiled = {} # {template_name => [tokens, mtime]}
177
+ @compiled_strings = {} # {md5_hash => tokens}
178
+
179
+ # Parsed filter chain cache: expr_string => [variable, filters]
180
+ @filter_chain_cache = {}
181
+
182
+ # Resolved dotted-path split cache: expr_string => parts_array
183
+ @resolve_cache = {}
184
+
185
+ # Sandbox root-var split cache: var_name => root_var_string
186
+ @dotted_split_cache = {}
187
+
188
+ # Built-in global functions
189
+ register_builtin_globals
190
+ end
191
+
192
+ # Render a template file with data. Uses token caching for performance.
193
+ def render(template, data = {})
194
+ context = @globals.merge(stringify_keys(data))
195
+
196
+ path = File.join(@template_dir, template)
197
+ raise "Template not found: #{path}" unless File.exist?(path)
198
+
199
+ debug_mode = ENV.fetch("TINA4_DEBUG", "").downcase == "true"
200
+
201
+ unless debug_mode
202
+ # Production: use permanent cache (no filesystem checks)
203
+ cached = @compiled[template]
204
+ return execute_cached(cached[0], context) if cached
205
+ end
206
+ # Dev mode: skip cache entirely — always re-read and re-tokenize
207
+ # so edits to partials and extended base templates are detected
208
+
209
+ # Cache miss — load, tokenize, cache
210
+ source = File.read(path, encoding: "utf-8")
211
+ mtime = File.mtime(path)
212
+ tokens = tokenize(source)
213
+ @compiled[template] = [tokens, mtime]
214
+ execute_with_tokens(source, tokens, context)
215
+ end
216
+
217
+ # Render a template string directly. Uses token caching for performance.
218
+ def render_string(source, data = {})
219
+ context = @globals.merge(stringify_keys(data))
220
+
221
+ key = Digest::MD5.hexdigest(source)
222
+ cached_tokens = @compiled_strings[key]
223
+
224
+ if cached_tokens
225
+ return execute_cached(cached_tokens, context)
226
+ end
227
+
228
+ tokens = tokenize(source)
229
+ @compiled_strings[key] = tokens
230
+ execute_cached(tokens, context)
231
+ end
232
+
233
+ # Clear all compiled template caches.
234
+ def clear_cache
235
+ @compiled.clear
236
+ @compiled_strings.clear
237
+ @filter_chain_cache.clear
238
+ @resolve_cache.clear
239
+ @dotted_split_cache.clear
240
+ end
241
+
242
+ # Register a custom filter.
243
+ def add_filter(name, &blk)
244
+ @filters[name.to_s] = blk
245
+ end
246
+
247
+ # Register a custom test.
248
+ def add_test(name, &blk)
249
+ @tests[name.to_s] = blk
250
+ end
251
+
252
+ # Register a global variable available in all templates.
253
+ def add_global(name, value)
254
+ @globals[name.to_s] = value
255
+ end
256
+
257
+ # Enable sandbox mode.
258
+ def sandbox(filters: nil, tags: nil, vars: nil)
259
+ @sandbox = true
260
+ @allowed_filters = filters ? filters.map(&:to_s) : nil
261
+ @allowed_tags = tags ? tags.map(&:to_s) : nil
262
+ @allowed_vars = vars ? vars.map(&:to_s) : nil
263
+ self
264
+ end
265
+
266
+ # Disable sandbox mode.
267
+ def unsandbox
268
+ @sandbox = false
269
+ @allowed_filters = nil
270
+ @allowed_tags = nil
271
+ @allowed_vars = nil
272
+ self
273
+ end
274
+
275
+ # Utility: HTML escape
276
+ def self.escape_html(str)
277
+ str.to_s.gsub(HTML_ESCAPE_RE, HTML_ESCAPE_MAP)
278
+ end
279
+
280
+ private
281
+
282
+ # -----------------------------------------------------------------------
283
+ # Tokenizer
284
+ # -----------------------------------------------------------------------
285
+
286
+ # Regex to extract {% raw %}...{% endraw %} blocks before tokenizing
287
+ RAW_BLOCK_RE = /\{%-?\s*raw\s*-?%\}(.*?)\{%-?\s*endraw\s*-?%\}/m
288
+
289
+ def tokenize(source)
290
+ # 1. Extract raw blocks and replace with placeholders
291
+ raw_blocks = []
292
+ source = source.gsub(RAW_BLOCK_RE) do
293
+ idx = raw_blocks.length
294
+ raw_blocks << Regexp.last_match(1)
295
+ "\x00RAW_#{idx}\x00"
296
+ end
297
+
298
+ # 2. Normal tokenization
299
+ tokens = []
300
+ pos = 0
301
+ source.scan(TOKEN_RE) do
302
+ m = Regexp.last_match
303
+ start = m.begin(0)
304
+ tokens << [TEXT, source[pos...start]] if start > pos
305
+
306
+ raw = m[0]
307
+ if raw.start_with?("{#")
308
+ tokens << [COMMENT, raw]
309
+ elsif raw.start_with?("{{")
310
+ tokens << [VAR, raw]
311
+ elsif raw.start_with?("{%")
312
+ tokens << [BLOCK, raw]
313
+ end
314
+ pos = m.end(0)
315
+ end
316
+ tokens << [TEXT, source[pos..]] if pos < source.length
317
+
318
+ # 3. Restore raw block placeholders as literal TEXT
319
+ unless raw_blocks.empty?
320
+ tokens = tokens.map do |ttype, value|
321
+ if ttype == TEXT && value.include?("\x00RAW_")
322
+ raw_blocks.each_with_index do |content, idx|
323
+ value = value.gsub("\x00RAW_#{idx}\x00", content)
324
+ end
325
+ end
326
+ [ttype, value]
327
+ end
328
+ end
329
+
330
+ tokens
331
+ end
332
+
333
+ # Strip delimiters from a tag and detect whitespace control markers.
334
+ # Returns [content, strip_before, strip_after].
335
+ def strip_tag(raw)
336
+ inner = raw[2..-3] # remove {{ }} or {% %} or {# #}
337
+ strip_before = false
338
+ strip_after = false
339
+
340
+ if inner.start_with?("-")
341
+ strip_before = true
342
+ inner = inner[1..]
343
+ end
344
+ if inner.end_with?("-")
345
+ strip_after = true
346
+ inner = inner[0..-2]
347
+ end
348
+
349
+ [inner.strip, strip_before, strip_after]
350
+ end
351
+
352
+ # -----------------------------------------------------------------------
353
+ # Template loading
354
+ # -----------------------------------------------------------------------
355
+
356
+ def load_template(name)
357
+ path = File.join(@template_dir, name)
358
+ raise "Template not found: #{path}" unless File.exist?(path)
359
+
360
+ File.read(path, encoding: "utf-8")
361
+ end
362
+
363
+ # -----------------------------------------------------------------------
364
+ # Execution
365
+ # -----------------------------------------------------------------------
366
+
367
+ def execute_cached(tokens, context)
368
+ # Check if first non-text token is an extends block
369
+ tokens.each do |ttype, raw|
370
+ next if ttype == TEXT && raw.strip.empty?
371
+ if ttype == BLOCK
372
+ content, _, _ = strip_tag(raw)
373
+ if content.start_with?("extends ")
374
+ # Extends requires source-based execution for block extraction
375
+ source = tokens.map { |_, v| v }.join
376
+ return execute(source, context)
377
+ end
378
+ end
379
+ break
380
+ end
381
+ render_tokens(tokens, context)
382
+ end
383
+
384
+ def execute_with_tokens(source, tokens, context)
385
+ # Handle extends first
386
+ if source =~ EXTENDS_RE
387
+ parent_name = Regexp.last_match(1)
388
+ parent_source = load_template(parent_name)
389
+ child_blocks = extract_blocks(source)
390
+ return render_with_blocks(parent_source, context, child_blocks)
391
+ end
392
+
393
+ render_tokens(tokens, context)
394
+ end
395
+
396
+ def execute(source, context)
397
+ # Handle extends first
398
+ if source =~ EXTENDS_RE
399
+ parent_name = Regexp.last_match(1)
400
+ parent_source = load_template(parent_name)
401
+ child_blocks = extract_blocks(source)
402
+ return render_with_blocks(parent_source, context, child_blocks)
403
+ end
404
+
405
+ render_tokens(tokenize(source), context)
406
+ end
407
+
408
+ def extract_blocks(source)
409
+ blocks = {}
410
+ source.scan(BLOCK_RE) do
411
+ blocks[Regexp.last_match(1)] = Regexp.last_match(2)
412
+ end
413
+ blocks
414
+ end
415
+
416
+ def render_with_blocks(parent_source, context, child_blocks)
417
+ engine = self
418
+ result = parent_source.gsub(BLOCK_RE) do
419
+ name = Regexp.last_match(1)
420
+ parent_content = Regexp.last_match(2)
421
+ block_source = child_blocks.fetch(name, parent_content)
422
+
423
+ # Make parent() and super() available inside child blocks
424
+ rendered_parent = nil
425
+ get_parent = lambda do
426
+ rendered_parent ||= Tina4::SafeString.new(
427
+ engine.send(:render_tokens, tokenize(parent_content), context)
428
+ )
429
+ rendered_parent
430
+ end
431
+
432
+ block_ctx = context.merge("parent" => get_parent, "super" => get_parent)
433
+ render_tokens(tokenize(block_source), block_ctx)
434
+ end
435
+ render_tokens(tokenize(result), context)
436
+ end
437
+
438
+ # -----------------------------------------------------------------------
439
+ # Token renderer
440
+ # -----------------------------------------------------------------------
441
+
442
+ def render_tokens(tokens, context)
443
+ output = []
444
+ i = 0
445
+
446
+ while i < tokens.length
447
+ ttype, raw = tokens[i]
448
+
449
+ case ttype
450
+ when TEXT
451
+ output << raw
452
+ i += 1
453
+
454
+ when COMMENT
455
+ i += 1
456
+
457
+ when VAR
458
+ content, strip_b, strip_a = strip_tag(raw)
459
+ output[-1] = output[-1].rstrip if strip_b && !output.empty?
460
+
461
+ result = eval_var(content, context)
462
+ output << (result.nil? ? "" : result.to_s)
463
+
464
+ if strip_a && i + 1 < tokens.length && tokens[i + 1][0] == TEXT
465
+ tokens[i + 1] = [TEXT, tokens[i + 1][1].lstrip]
466
+ end
467
+ i += 1
468
+
469
+ when BLOCK
470
+ content, strip_b, strip_a = strip_tag(raw)
471
+ output[-1] = output[-1].rstrip if strip_b && !output.empty?
472
+
473
+ tag = content.split[0] || ""
474
+
475
+ case tag
476
+ when "if"
477
+ result, i = handle_if(tokens, i, context)
478
+ output << result
479
+ when "for"
480
+ result, i = handle_for(tokens, i, context)
481
+ output << result
482
+ when "set"
483
+ handle_set(content, context)
484
+ i += 1
485
+ when "include"
486
+ if @sandbox && @allowed_tags && !@allowed_tags.include?("include")
487
+ i += 1
488
+ else
489
+ output << handle_include(content, context)
490
+ i += 1
491
+ end
492
+ when "macro"
493
+ i = handle_macro(tokens, i, context)
494
+ when "from"
495
+ handle_from_import(content, context)
496
+ i += 1
497
+ when "cache"
498
+ result, i = handle_cache(tokens, i, context)
499
+ output << result
500
+ when "spaceless"
501
+ result, i = handle_spaceless(tokens, i, context)
502
+ output << result
503
+ when "autoescape"
504
+ result, i = handle_autoescape(tokens, i, context)
505
+ output << result
506
+ when "block", "endblock", "extends"
507
+ i += 1
508
+ else
509
+ i += 1
510
+ end
511
+
512
+ if strip_a && i < tokens.length && tokens[i][0] == TEXT
513
+ tokens[i] = [TEXT, tokens[i][1].lstrip]
514
+ end
515
+ else
516
+ i += 1
517
+ end
518
+ end
519
+
520
+ output.join
521
+ end
522
+
523
+ # -----------------------------------------------------------------------
524
+ # Variable evaluation
525
+ # -----------------------------------------------------------------------
526
+
527
+ def eval_var(expr, context)
528
+ # Check for top-level ternary BEFORE splitting filters so that
529
+ # expressions like ``products|length != 1 ? "s" : ""`` work correctly.
530
+ ternary_pos = find_ternary(expr)
531
+ if ternary_pos != -1
532
+ cond_part = expr[0...ternary_pos].strip
533
+ rest = expr[(ternary_pos + 1)..]
534
+ colon_pos = find_colon(rest)
535
+ if colon_pos != -1
536
+ true_part = rest[0...colon_pos].strip
537
+ false_part = rest[(colon_pos + 1)..].strip
538
+ cond = eval_var_raw(cond_part, context)
539
+ return truthy?(cond) ? eval_var(true_part, context) : eval_var(false_part, context)
540
+ end
541
+ end
542
+
543
+ eval_var_inner(expr, context)
544
+ end
545
+
546
+ def eval_var_raw(expr, context)
547
+ var_name, filters = parse_filter_chain(expr)
548
+ value = eval_expr(var_name, context)
549
+ filters.each do |fname, args|
550
+ next if fname == "raw" || fname == "safe"
551
+
552
+ # Filter + property-access chain: `first.groupSummary` — apply
553
+ # the filter, then traverse the path on the result using a
554
+ # synthetic context so eval_expr's dotted resolution does the
555
+ # work. Parity with tina4-python + tina4-php.
556
+ real_fname, tail_path = split_filter_name_and_path(fname)
557
+ if !tail_path.empty? && @filters[real_fname]
558
+ evaluated_args = args.map { |a| eval_filter_arg(a, context) }
559
+ value = @filters[real_fname].call(value, *evaluated_args)
560
+ value = eval_expr("__frond_filter_tmp.#{tail_path}",
561
+ { "__frond_filter_tmp" => value })
562
+ next
563
+ end
564
+
565
+ fn = @filters[fname]
566
+ if fn
567
+ evaluated_args = args.map { |a| eval_filter_arg(a, context) }
568
+ value = fn.call(value, *evaluated_args)
569
+ else
570
+ # The filter name may include a trailing comparison operator,
571
+ # e.g. "length != 1". Extract the real filter name and the
572
+ # comparison suffix, apply the filter, then evaluate the comparison.
573
+ m = fname.match(FILTER_CMP_RE)
574
+ if m
575
+ real_filter = m[1]
576
+ op = m[2]
577
+ right_expr = m[3].strip
578
+ fn2 = @filters[real_filter]
579
+ if fn2
580
+ evaluated_args = args.map { |a| eval_filter_arg(a, context) }
581
+ value = fn2.call(value, *evaluated_args)
582
+ end
583
+ right = eval_expr(right_expr, context)
584
+ value = case op
585
+ when "!=" then value != right
586
+ when "==" then value == right
587
+ when ">=" then value >= right
588
+ when "<=" then value <= right
589
+ when ">" then value > right
590
+ when "<" then value < right
591
+ else false
592
+ end rescue false
593
+ else
594
+ value = eval_expr(fname, context)
595
+ end
596
+ end
597
+ end
598
+ value
599
+ end
600
+
601
+ def eval_var_inner(expr, context)
602
+ var_name, filters = parse_filter_chain(expr)
603
+
604
+ # Sandbox: check variable access
605
+ if @sandbox && @allowed_vars
606
+ root_var = @dotted_split_cache[var_name]
607
+ unless root_var
608
+ root_var = var_name.split(".")[0].split("[")[0].strip
609
+ @dotted_split_cache[var_name] = root_var
610
+ end
611
+ return "" if !root_var.empty? && !@allowed_vars.include?(root_var) && root_var != "loop"
612
+ end
613
+
614
+ value = eval_expr(var_name, context)
615
+
616
+ is_safe = false
617
+ filters.each do |fname, args|
618
+ if fname == "raw" || fname == "safe"
619
+ is_safe = true
620
+ next
621
+ end
622
+
623
+ # Sandbox: check filter access
624
+ if @sandbox && @allowed_filters && !@allowed_filters.include?(fname)
625
+ next
626
+ end
627
+
628
+ # Filter + property-access chain: `first.groupSummary` — apply
629
+ # the filter, then traverse the path on the result. Done BEFORE
630
+ # the inline fast-path so cases like `items|first.name` work
631
+ # regardless of whether `first` is an inline filter too.
632
+ real_fname, tail_path = split_filter_name_and_path(fname)
633
+ if !tail_path.empty? && @filters[real_fname]
634
+ evaluated_args = args.map { |a| eval_filter_arg(a, context) }
635
+ value = @filters[real_fname].call(value, *evaluated_args)
636
+ value = eval_expr("__frond_filter_tmp.#{tail_path}",
637
+ { "__frond_filter_tmp" => value })
638
+ next
639
+ end
640
+
641
+ # Inline common no-arg filters for speed (skip generic dispatch)
642
+ if args.empty? && INLINE_FILTERS.include?(fname)
643
+ value = case fname
644
+ when "upper" then value.to_s.upcase
645
+ when "lower" then value.to_s.downcase
646
+ when "length" then value.respond_to?(:length) ? value.length : value.to_s.length
647
+ when "trim" then value.to_s.strip
648
+ when "capitalize" then value.to_s.capitalize
649
+ when "title" then value.to_s.split.map(&:capitalize).join(" ")
650
+ when "string" then value.to_s
651
+ when "int" then value.to_i
652
+ when "escape", "e" then Frond.escape_html(value.to_s)
653
+ else value
654
+ end
655
+ next
656
+ end
657
+
658
+ fn = @filters[fname]
659
+ if fn
660
+ evaluated_args = args.map { |a| eval_filter_arg(a, context) }
661
+ value = fn.call(value, *evaluated_args)
662
+ end
663
+ end
664
+
665
+ # Auto-escape HTML unless marked safe or SafeString
666
+ if @auto_escape && !is_safe && value.is_a?(String) && !value.is_a?(SafeString)
667
+ value = Frond.escape_html(value)
668
+ end
669
+
670
+ value
671
+ end
672
+
673
+ def eval_filter_arg(arg, context)
674
+ return Regexp.last_match(1) if arg =~ STRING_LIT_RE
675
+ return arg.to_i if arg =~ INTEGER_RE
676
+ return arg.to_f if arg =~ FLOAT_RE
677
+ eval_expr(arg, context)
678
+ end
679
+
680
+ # Find the first occurrence of +needle+ that is not inside quotes or
681
+ # parentheses. Returns the index, or -1 if not found.
682
+ def find_outside_quotes(expr, needle)
683
+ in_q = nil
684
+ depth = 0
685
+ bracket_depth = 0
686
+ i = 0
687
+ nlen = needle.length
688
+ while i <= expr.length - nlen
689
+ ch = expr[i]
690
+ if (ch == '"' || ch == "'") && depth == 0
691
+ if in_q.nil?
692
+ in_q = ch
693
+ elsif ch == in_q
694
+ in_q = nil
695
+ end
696
+ i += 1
697
+ next
698
+ end
699
+ if in_q
700
+ i += 1
701
+ next
702
+ end
703
+ if ch == "("
704
+ depth += 1
705
+ elsif ch == ")"
706
+ depth -= 1
707
+ elsif ch == "["
708
+ bracket_depth += 1
709
+ elsif ch == "]"
710
+ bracket_depth -= 1
711
+ end
712
+ if depth == 0 && bracket_depth == 0 && expr[i, nlen] == needle
713
+ return i
714
+ end
715
+ i += 1
716
+ end
717
+ -1
718
+ end
719
+
720
+ # Find the index of a top-level ``?`` that is part of a ternary operator.
721
+ # Respects quoted strings, parentheses, and skips ``??`` (null coalesce).
722
+ # Returns -1 if not found.
723
+ def find_ternary(expr)
724
+ depth = 0
725
+ in_quote = nil
726
+ i = 0
727
+ len = expr.length
728
+ while i < len
729
+ ch = expr[i]
730
+ if in_quote
731
+ in_quote = nil if ch == in_quote
732
+ i += 1
733
+ next
734
+ end
735
+ if ch == '"' || ch == "'"
736
+ in_quote = ch
737
+ i += 1
738
+ next
739
+ end
740
+ if ch == "("
741
+ depth += 1
742
+ elsif ch == ")"
743
+ depth -= 1
744
+ elsif ch == "?" && depth == 0
745
+ # Skip ``??`` (null coalesce)
746
+ if i + 1 < len && expr[i + 1] == "?"
747
+ i += 2
748
+ next
749
+ end
750
+ return i
751
+ end
752
+ i += 1
753
+ end
754
+ -1
755
+ end
756
+
757
+ # Find the index of the top-level ``:`` that separates the true/false
758
+ # branches of a ternary. Respects quotes and parentheses.
759
+ def find_colon(expr)
760
+ depth = 0
761
+ in_quote = nil
762
+ expr.each_char.with_index do |ch, i|
763
+ if in_quote
764
+ in_quote = nil if ch == in_quote
765
+ next
766
+ end
767
+ if ch == '"' || ch == "'"
768
+ in_quote = ch
769
+ next
770
+ end
771
+ if ch == "("
772
+ depth += 1
773
+ elsif ch == ")"
774
+ depth -= 1
775
+ elsif ch == ":" && depth == 0
776
+ return i
777
+ end
778
+ end
779
+ -1
780
+ end
781
+
782
+ # -----------------------------------------------------------------------
783
+ # Filter chain parser
784
+ # -----------------------------------------------------------------------
785
+
786
+ # Split "first.groupSummary" into ["first", "groupSummary"] so a
787
+ # filter segment followed by property access — `{{ x | first.name }}`
788
+ # applies the filter then traverses the path on the result.
789
+ # Returns [fname, ""] when no structural dot is present.
790
+ #
791
+ # The split point must sit outside parens/brackets/braces and quotes
792
+ # so filter args like `round(1.5)` or `date("Y.m.d")` don't false-
793
+ # trigger. Parity with tina4-python and tina4-php.
794
+ def split_filter_name_and_path(fname)
795
+ depth = 0
796
+ in_q = nil
797
+ i = 0
798
+ n = fname.length
799
+ while i < n
800
+ ch = fname[i]
801
+ if in_q
802
+ in_q = nil if ch == in_q && (i.zero? || fname[i - 1] != "\\")
803
+ i += 1
804
+ next
805
+ end
806
+ case ch
807
+ when '"', "'"
808
+ in_q = ch
809
+ when "(", "[", "{"
810
+ depth += 1
811
+ when ")", "]", "}"
812
+ depth -= 1
813
+ when "."
814
+ return [fname[0...i], fname[(i + 1)..]] if depth.zero?
815
+ end
816
+ i += 1
817
+ end
818
+ [fname, ""]
819
+ end
820
+
821
+ def parse_filter_chain(expr)
822
+ cached = @filter_chain_cache[expr]
823
+ return cached if cached
824
+
825
+ parts = split_on_pipe(expr)
826
+ variable = parts[0].strip
827
+ filters = []
828
+
829
+ parts[1..].each do |f|
830
+ f = f.strip
831
+ if f =~ FILTER_WITH_ARGS_RE
832
+ name = Regexp.last_match(1)
833
+ raw_args = Regexp.last_match(2).strip
834
+ args = raw_args.empty? ? [] : parse_args(raw_args)
835
+ filters << [name, args]
836
+ else
837
+ filters << [f.strip, []]
838
+ end
839
+ end
840
+
841
+ result = [variable, filters].freeze
842
+ @filter_chain_cache[expr] = result
843
+ result
844
+ end
845
+
846
+ # Split expression on | but not inside quotes or parens.
847
+ def split_on_pipe(expr)
848
+ parts = []
849
+ current = +""
850
+ in_quote = nil
851
+ depth = 0
852
+
853
+ expr.each_char do |ch|
854
+ if in_quote
855
+ current << ch
856
+ in_quote = nil if ch == in_quote
857
+ elsif ch == '"' || ch == "'"
858
+ in_quote = ch
859
+ current << ch
860
+ elsif ch == "("
861
+ depth += 1
862
+ current << ch
863
+ elsif ch == ")"
864
+ depth -= 1
865
+ current << ch
866
+ elsif ch == "|" && depth == 0
867
+ parts << current
868
+ current = +""
869
+ else
870
+ current << ch
871
+ end
872
+ end
873
+ parts << current unless current.empty?
874
+ parts
875
+ end
876
+
877
+ def parse_args(raw)
878
+ args = []
879
+ current = +""
880
+ in_quote = nil
881
+ depth = 0
882
+
883
+ raw.each_char do |ch|
884
+ if in_quote
885
+ if ch == in_quote
886
+ in_quote = nil
887
+ end
888
+ current << ch
889
+ elsif ch == '"' || ch == "'"
890
+ in_quote = ch
891
+ current << ch
892
+ elsif ch == "(" || ch == "{" || ch == "["
893
+ depth += 1
894
+ current << ch
895
+ elsif ch == ")" || ch == "}" || ch == "]"
896
+ depth -= 1
897
+ current << ch
898
+ elsif ch == "," && depth == 0
899
+ args << current.strip
900
+ current = +""
901
+ else
902
+ current << ch
903
+ end
904
+ end
905
+ args << current.strip unless current.strip.empty?
906
+ args
907
+ end
908
+
909
+ # -----------------------------------------------------------------------
910
+ # Expression evaluator
911
+ # -----------------------------------------------------------------------
912
+
913
+ # ── Expression evaluator (dispatcher) ──────────────────────────────
914
+ # Each expression type is handled by a focused helper method.
915
+ # Helpers return :not_matched when the expression doesn't match their
916
+ # type, so the dispatcher falls through to the next handler.
917
+
918
+ def eval_expr(expr, context)
919
+ expr = expr.strip
920
+ return nil if expr.empty?
921
+
922
+ result = eval_literal(expr)
923
+ return result unless result == :not_literal
924
+
925
+ result = eval_collection_literal(expr, context)
926
+ return result unless result == :not_collection
927
+
928
+ return eval_expr(expr[1..-2], context) if matched_parens?(expr)
929
+
930
+ result = eval_ternary(expr, context)
931
+ return result unless result == :not_ternary
932
+
933
+ result = eval_inline_if(expr, context)
934
+ return result unless result == :not_inline_if
935
+
936
+ result = eval_null_coalesce(expr, context)
937
+ return result unless result == :not_coalesce
938
+
939
+ result = eval_concat(expr, context)
940
+ return result unless result == :not_concat
941
+
942
+ return eval_comparison(expr, context) if has_comparison?(expr)
943
+
944
+ result = eval_arithmetic(expr, context)
945
+ return result unless result == :not_arithmetic
946
+
947
+ result = eval_function_call(expr, context)
948
+ return result unless result == :not_function
949
+
950
+ resolve(expr, context)
951
+ end
952
+
953
+ # ── Literal values: strings, numbers, booleans, null ──
954
+
955
+ def eval_literal(expr)
956
+ if (expr.start_with?('"') && expr.end_with?('"')) ||
957
+ (expr.start_with?("'") && expr.end_with?("'"))
958
+ return expr[1..-2]
959
+ end
960
+ return expr.to_i if expr =~ INTEGER_RE
961
+ return expr.to_f if expr =~ FLOAT_RE
962
+ return true if expr == "true"
963
+ return false if expr == "false"
964
+ return nil if expr == "null" || expr == "none" || expr == "nil"
965
+ :not_literal
966
+ end
967
+
968
+ # ── Collection literals: arrays, hashes, ranges ──
969
+
970
+ def eval_collection_literal(expr, context)
971
+ if expr =~ ARRAY_LIT_RE
972
+ inner = Regexp.last_match(1)
973
+ return split_args_toplevel(inner).map { |item| eval_expr(item.strip, context) }
974
+ end
975
+ if expr =~ HASH_LIT_RE
976
+ inner = Regexp.last_match(1)
977
+ hash = {}
978
+ split_args_toplevel(inner).each do |pair|
979
+ if pair =~ HASH_PAIR_RE
980
+ key = Regexp.last_match(1) || Regexp.last_match(2)
981
+ hash[key] = eval_expr(Regexp.last_match(3).strip, context)
982
+ end
983
+ end
984
+ return hash
985
+ end
986
+ if expr =~ RANGE_LIT_RE
987
+ return (Regexp.last_match(1).to_i..Regexp.last_match(2).to_i).to_a
988
+ end
989
+ :not_collection
990
+ end
991
+
992
+ # ── Parenthesized sub-expression check ──
993
+
994
+ def matched_parens?(expr)
995
+ return false unless expr.start_with?("(") && expr.end_with?(")")
996
+ depth = 0
997
+ expr.each_char.with_index do |ch, pi|
998
+ depth += 1 if ch == "("
999
+ depth -= 1 if ch == ")"
1000
+ return false if depth == 0 && pi < expr.length - 1
1001
+ end
1002
+ true
1003
+ end
1004
+
1005
+ # ── Ternary: condition ? "yes" : "no" ──
1006
+
1007
+ def eval_ternary(expr, context)
1008
+ q_pos = find_outside_quotes(expr, "?")
1009
+ return :not_ternary unless q_pos && q_pos > 0
1010
+ cond_part = expr[0...q_pos].strip
1011
+ rest = expr[(q_pos + 1)..]
1012
+ c_pos = find_outside_quotes(rest, ":")
1013
+ return :not_ternary unless c_pos && c_pos >= 0
1014
+ true_part = rest[0...c_pos].strip
1015
+ false_part = rest[(c_pos + 1)..].strip
1016
+ cond = eval_expr(cond_part, context)
1017
+ truthy?(cond) ? eval_expr(true_part, context) : eval_expr(false_part, context)
1018
+ end
1019
+
1020
+ # ── Inline if: value if condition else other_value ──
1021
+
1022
+ def eval_inline_if(expr, context)
1023
+ if_pos = find_outside_quotes(expr, " if ")
1024
+ return :not_inline_if unless if_pos && if_pos >= 0
1025
+ else_pos = find_outside_quotes(expr, " else ")
1026
+ return :not_inline_if unless else_pos && else_pos > if_pos
1027
+ value_part = expr[0...if_pos].strip
1028
+ cond_part = expr[(if_pos + 4)...else_pos].strip
1029
+ else_part = expr[(else_pos + 6)..].strip
1030
+ cond = eval_expr(cond_part, context)
1031
+ truthy?(cond) ? eval_expr(value_part, context) : eval_expr(else_part, context)
1032
+ end
1033
+
1034
+ # ── Null coalescing: value ?? "default" ──
1035
+
1036
+ def eval_null_coalesce(expr, context)
1037
+ return :not_coalesce unless expr.include?("??")
1038
+ left, _, right = expr.partition("??")
1039
+ val = eval_expr(left.strip, context)
1040
+ val.nil? ? eval_expr(right.strip, context) : val
1041
+ end
1042
+
1043
+ # ── String concatenation: a ~ b ──
1044
+
1045
+ def eval_concat(expr, context)
1046
+ return :not_concat unless expr.include?("~")
1047
+ parts = expr.split("~")
1048
+ parts.map { |p| (eval_expr(p.strip, context) || "").to_s }.join
1049
+ end
1050
+
1051
+ # ── Arithmetic: +, -, *, //, /, %, ** ──
1052
+
1053
+ def eval_arithmetic(expr, context)
1054
+ ARITHMETIC_OPS.each do |op|
1055
+ pos = find_outside_quotes(expr, op)
1056
+ next unless pos && pos >= 0
1057
+ l_val = eval_expr(expr[0...pos].strip, context)
1058
+ r_val = eval_expr(expr[(pos + op.length)..].strip, context)
1059
+ return apply_math(l_val, op.strip, r_val)
1060
+ end
1061
+ :not_arithmetic
1062
+ end
1063
+
1064
+ # ── Function call: name(arg1, arg2) ──
1065
+
1066
+ def eval_function_call(expr, context)
1067
+ return :not_function unless expr =~ FUNC_CALL_RE
1068
+ fn_name = Regexp.last_match(1)
1069
+ raw_args = Regexp.last_match(2).strip
1070
+ fn = context[fn_name]
1071
+ return :not_function unless fn.respond_to?(:call)
1072
+ args = raw_args.empty? ? [] : split_args_toplevel(raw_args).map { |a| eval_expr(a.strip, context) }
1073
+ fn.call(*args)
1074
+ end
1075
+
1076
+ def has_comparison?(expr)
1077
+ [" not in ", " in ", " is not ", " is ", "!=", "==", ">=", "<=", ">", "<",
1078
+ " and ", " or ", " not "].any? { |op| expr.include?(op) }
1079
+ end
1080
+
1081
+ # Split comma-separated args at top level (not inside quotes/parens/brackets).
1082
+ def split_args_toplevel(str)
1083
+ parts = []
1084
+ current = +""
1085
+ in_quote = nil
1086
+ depth = 0
1087
+
1088
+ str.each_char do |ch|
1089
+ if in_quote
1090
+ current << ch
1091
+ in_quote = nil if ch == in_quote
1092
+ elsif ch == '"' || ch == "'"
1093
+ in_quote = ch
1094
+ current << ch
1095
+ elsif ch == "(" || ch == "[" || ch == "{"
1096
+ depth += 1
1097
+ current << ch
1098
+ elsif ch == ")" || ch == "]" || ch == "}"
1099
+ depth -= 1
1100
+ current << ch
1101
+ elsif ch == "," && depth == 0
1102
+ parts << current.strip
1103
+ current = +""
1104
+ else
1105
+ current << ch
1106
+ end
1107
+ end
1108
+ parts << current.strip unless current.strip.empty?
1109
+ parts
1110
+ end
1111
+
1112
+ # -----------------------------------------------------------------------
1113
+ # Comparison / logical evaluator
1114
+ # -----------------------------------------------------------------------
1115
+
1116
+ def eval_comparison(expr, context, eval_fn = nil)
1117
+ eval_fn ||= method(:eval_expr)
1118
+ expr = expr.strip
1119
+
1120
+ # Handle 'not' prefix
1121
+ if expr.start_with?("not ")
1122
+ return !eval_comparison(expr[4..], context, eval_fn)
1123
+ end
1124
+
1125
+ # 'or' (lowest precedence)
1126
+ or_parts = expr.split(OR_SPLIT_RE)
1127
+ if or_parts.length > 1
1128
+ return or_parts.any? { |p| eval_comparison(p, context, eval_fn) }
1129
+ end
1130
+
1131
+ # 'and'
1132
+ and_parts = expr.split(AND_SPLIT_RE)
1133
+ if and_parts.length > 1
1134
+ return and_parts.all? { |p| eval_comparison(p, context, eval_fn) }
1135
+ end
1136
+
1137
+ # 'is not' test
1138
+ if expr =~ IS_NOT_RE
1139
+ return !eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
1140
+ Regexp.last_match(3).strip, context, eval_fn)
1141
+ end
1142
+
1143
+ # 'is' test
1144
+ if expr =~ IS_RE
1145
+ return eval_test(Regexp.last_match(1).strip, Regexp.last_match(2),
1146
+ Regexp.last_match(3).strip, context, eval_fn)
1147
+ end
1148
+
1149
+ # 'not in'
1150
+ if expr =~ NOT_IN_RE
1151
+ val = eval_fn.call(Regexp.last_match(1).strip, context)
1152
+ collection = eval_fn.call(Regexp.last_match(2).strip, context)
1153
+ return !(collection.respond_to?(:include?) && collection.include?(val))
1154
+ end
1155
+
1156
+ # 'in'
1157
+ if expr =~ IN_RE
1158
+ val = eval_fn.call(Regexp.last_match(1).strip, context)
1159
+ collection = eval_fn.call(Regexp.last_match(2).strip, context)
1160
+ return collection.respond_to?(:include?) ? collection.include?(val) : false
1161
+ end
1162
+
1163
+ # Binary comparison operators
1164
+ [["!=", ->(a, b) { a != b }],
1165
+ ["==", ->(a, b) { a == b }],
1166
+ [">=", ->(a, b) { a.to_f >= b.to_f }],
1167
+ ["<=", ->(a, b) { a.to_f <= b.to_f }],
1168
+ [">", ->(a, b) { a.to_f > b.to_f }],
1169
+ ["<", ->(a, b) { a.to_f < b.to_f }]].each do |op, fn|
1170
+ if expr.include?(op)
1171
+ left, _, right = expr.partition(op)
1172
+ l = eval_fn.call(left.strip, context)
1173
+ r = eval_fn.call(right.strip, context)
1174
+ begin
1175
+ return fn.call(l, r)
1176
+ rescue
1177
+ return false
1178
+ end
1179
+ end
1180
+ end
1181
+
1182
+ # Fall through to simple eval
1183
+ val = eval_fn.call(expr, context)
1184
+ truthy?(val)
1185
+ end
1186
+
1187
+ # -----------------------------------------------------------------------
1188
+ # Tests ('is' expressions)
1189
+ # -----------------------------------------------------------------------
1190
+
1191
+ def eval_test(value_expr, test_name, args_str, context, eval_fn = nil)
1192
+ eval_fn ||= method(:eval_expr)
1193
+ val = eval_fn.call(value_expr, context)
1194
+
1195
+ # 'divisible by(n)'
1196
+ if test_name == "divisible"
1197
+ if args_str =~ DIVISIBLE_BY_RE
1198
+ n = Regexp.last_match(1).to_i
1199
+ return val.is_a?(Integer) && (val % n).zero?
1200
+ end
1201
+ return false
1202
+ end
1203
+
1204
+ # Check custom tests first
1205
+ custom = @tests[test_name]
1206
+ return custom.call(val) if custom
1207
+
1208
+ false
1209
+ end
1210
+
1211
+ def default_tests
1212
+ {
1213
+ "defined" => ->(v) { !v.nil? },
1214
+ "empty" => ->(v) { v.nil? || (v.respond_to?(:empty?) && v.empty?) || v == 0 || v == false },
1215
+ "null" => ->(v) { v.nil? },
1216
+ "none" => ->(v) { v.nil? },
1217
+ "even" => ->(v) { v.is_a?(Integer) && v.even? },
1218
+ "odd" => ->(v) { v.is_a?(Integer) && v.odd? },
1219
+ "iterable" => ->(v) { v.respond_to?(:each) && !v.is_a?(String) },
1220
+ "string" => ->(v) { v.is_a?(String) },
1221
+ "number" => ->(v) { v.is_a?(Numeric) },
1222
+ "boolean" => ->(v) { v.is_a?(TrueClass) || v.is_a?(FalseClass) },
1223
+ }
1224
+ end
1225
+
1226
+ # -----------------------------------------------------------------------
1227
+ # Variable resolver
1228
+ # -----------------------------------------------------------------------
1229
+
1230
+ def resolve(expr, context)
1231
+ parts = @resolve_cache[expr]
1232
+ unless parts
1233
+ parts = expr.split(RESOLVE_SPLIT_RE).reject(&:empty?)
1234
+ @resolve_cache[expr] = parts
1235
+ end
1236
+
1237
+ value = context
1238
+
1239
+ parts.each do |part|
1240
+ part = part.strip.gsub(RESOLVE_STRIP_RE, "") # strip quotes from bracket access
1241
+ if value.is_a?(Hash) || value.is_a?(LoopContext)
1242
+ value = value[part] || value[part.to_sym]
1243
+ elsif value.is_a?(Array)
1244
+ # Slice syntax: value[1:5], value[:10], value[start:end]
1245
+ if part.include?(":") && !(part.start_with?('"') || part.start_with?("'"))
1246
+ slice_parts = part.split(":", 2)
1247
+ s_start = slice_parts[0].strip.empty? ? nil : eval_expr(slice_parts[0].strip, context).to_i
1248
+ s_end = slice_parts[1].strip.empty? ? nil : eval_expr(slice_parts[1].strip, context).to_i
1249
+ if s_start && s_end
1250
+ value = value[s_start...s_end]
1251
+ elsif s_start
1252
+ value = value[s_start..]
1253
+ elsif s_end
1254
+ value = value[0...s_end]
1255
+ else
1256
+ value = value.dup
1257
+ end
1258
+ next
1259
+ end
1260
+ idx = if part =~ DIGIT_RE
1261
+ part.to_i
1262
+ else
1263
+ eval_expr(part, context)
1264
+ end
1265
+ idx = idx.to_i if idx.is_a?(Numeric)
1266
+ value = idx.is_a?(Integer) ? value[idx] : nil
1267
+ elsif value.respond_to?(part.to_sym)
1268
+ value = value.send(part.to_sym)
1269
+ else
1270
+ return nil
1271
+ end
1272
+ return nil if value.nil?
1273
+ end
1274
+
1275
+ value
1276
+ end
1277
+
1278
+ # -----------------------------------------------------------------------
1279
+ # Math
1280
+ # -----------------------------------------------------------------------
1281
+
1282
+ def apply_math(left, op, right)
1283
+ l = (left || 0).to_f
1284
+ r = (right || 0).to_f
1285
+ # Preserve int type when both operands are int-like (except for / which returns float)
1286
+ both_int = l == l.to_i && r == r.to_i && op != "/"
1287
+ result = case op
1288
+ when "+" then l + r
1289
+ when "-" then l - r
1290
+ when "*" then l * r
1291
+ when "/" then r != 0 ? l / r : 0
1292
+ when "//" then r != 0 ? (l / r).floor : 0
1293
+ when "%" then r != 0 ? l % r : 0
1294
+ when "**" then l ** r
1295
+ else 0
1296
+ end
1297
+ both_int && result == result.to_i ? result.to_i : result.to_f == result.to_i ? result.to_i : result
1298
+ end
1299
+
1300
+ # -----------------------------------------------------------------------
1301
+ # Block handlers
1302
+ # -----------------------------------------------------------------------
1303
+
1304
+ # {% if %}...{% elseif %}...{% else %}...{% endif %}
1305
+ def handle_if(tokens, start, context)
1306
+ content, _, strip_a_open = strip_tag(tokens[start][1])
1307
+ condition_expr = content.sub(/\Aif\s+/, "").strip
1308
+
1309
+ branches = []
1310
+ current_tokens = []
1311
+ current_cond = condition_expr
1312
+ depth = 0
1313
+ i = start + 1
1314
+
1315
+ # If the opening {%- if -%} has strip_after, lstrip the first body text
1316
+ pending_lstrip = strip_a_open
1317
+
1318
+ while i < tokens.length
1319
+ ttype, raw = tokens[i]
1320
+ if ttype == BLOCK
1321
+ tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
1322
+ tag = tag_content.split[0] || ""
1323
+
1324
+ if tag == "if"
1325
+ depth += 1
1326
+ current_tokens << tokens[i]
1327
+ elsif tag == "endif" && depth > 0
1328
+ depth -= 1
1329
+ current_tokens << tokens[i]
1330
+ elsif tag == "endif" && depth == 0
1331
+ # Apply strip_before from endif to last body token
1332
+ if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1333
+ current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1334
+ end
1335
+ branches << [current_cond, current_tokens]
1336
+ i += 1
1337
+ break
1338
+ elsif (tag == "elseif" || tag == "elif") && depth == 0
1339
+ # Apply strip_before from elseif to last body token
1340
+ if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1341
+ current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1342
+ end
1343
+ branches << [current_cond, current_tokens]
1344
+ current_cond = tag_content.sub(/\A(?:elseif|elif)\s+/, "").strip
1345
+ current_tokens = []
1346
+ pending_lstrip = strip_a_tag
1347
+ elsif tag == "else" && depth == 0
1348
+ # Apply strip_before from else to last body token
1349
+ if strip_b_tag && !current_tokens.empty? && current_tokens[-1][0] == TEXT
1350
+ current_tokens[-1] = [TEXT, current_tokens[-1][1].rstrip]
1351
+ end
1352
+ branches << [current_cond, current_tokens]
1353
+ current_cond = nil
1354
+ current_tokens = []
1355
+ pending_lstrip = strip_a_tag
1356
+ else
1357
+ current_tokens << tokens[i]
1358
+ end
1359
+ else
1360
+ tok = tokens[i]
1361
+ if pending_lstrip && ttype == TEXT
1362
+ tok = [TEXT, tok[1].lstrip]
1363
+ pending_lstrip = false
1364
+ end
1365
+ current_tokens << tok
1366
+ end
1367
+ i += 1
1368
+ end
1369
+
1370
+ branches.each do |cond, branch_tokens|
1371
+ if cond.nil? || eval_comparison(cond, context, method(:eval_var_raw))
1372
+ return [render_tokens(branch_tokens.dup, context), i]
1373
+ end
1374
+ end
1375
+
1376
+ ["", i]
1377
+ end
1378
+
1379
+ # {% for item in items %}...{% else %}...{% endfor %}
1380
+ def handle_for(tokens, start, context)
1381
+ content, _, strip_a_open = strip_tag(tokens[start][1])
1382
+ m = content.match(FOR_RE)
1383
+ return ["", start + 1] unless m
1384
+
1385
+ var1 = m[1]
1386
+ var2 = m[2]
1387
+ iterable_expr = m[3].strip
1388
+
1389
+ body_tokens = []
1390
+ else_tokens = []
1391
+ in_else = false
1392
+ for_depth = 0
1393
+ if_depth = 0
1394
+ i = start + 1
1395
+ pending_lstrip = strip_a_open
1396
+
1397
+ while i < tokens.length
1398
+ ttype, raw = tokens[i]
1399
+ if ttype == BLOCK
1400
+ tag_content, strip_b_tag, strip_a_tag = strip_tag(raw)
1401
+ tag = tag_content.split[0] || ""
1402
+
1403
+ if tag == "for"
1404
+ for_depth += 1
1405
+ (in_else ? else_tokens : body_tokens) << tokens[i]
1406
+ elsif tag == "endfor" && for_depth > 0
1407
+ for_depth -= 1
1408
+ (in_else ? else_tokens : body_tokens) << tokens[i]
1409
+ elsif tag == "endfor" && for_depth == 0
1410
+ target = in_else ? else_tokens : body_tokens
1411
+ if strip_b_tag && !target.empty? && target[-1][0] == TEXT
1412
+ target[-1] = [TEXT, target[-1][1].rstrip]
1413
+ end
1414
+ i += 1
1415
+ break
1416
+ elsif tag == "if"
1417
+ if_depth += 1
1418
+ (in_else ? else_tokens : body_tokens) << tokens[i]
1419
+ elsif tag == "endif"
1420
+ if_depth -= 1
1421
+ (in_else ? else_tokens : body_tokens) << tokens[i]
1422
+ elsif tag == "else" && for_depth == 0 && if_depth == 0
1423
+ if strip_b_tag && !body_tokens.empty? && body_tokens[-1][0] == TEXT
1424
+ body_tokens[-1] = [TEXT, body_tokens[-1][1].rstrip]
1425
+ end
1426
+ in_else = true
1427
+ pending_lstrip = strip_a_tag
1428
+ else
1429
+ (in_else ? else_tokens : body_tokens) << tokens[i]
1430
+ end
1431
+ else
1432
+ tok = tokens[i]
1433
+ if pending_lstrip && ttype == TEXT
1434
+ tok = [TEXT, tok[1].lstrip]
1435
+ pending_lstrip = false
1436
+ end
1437
+ (in_else ? else_tokens : body_tokens) << tok
1438
+ end
1439
+ i += 1
1440
+ end
1441
+
1442
+ iterable = eval_expr(iterable_expr, context)
1443
+
1444
+ if iterable.nil? || (iterable.respond_to?(:empty?) && iterable.empty?)
1445
+ if else_tokens.any?
1446
+ return [render_tokens(else_tokens.dup, context), i]
1447
+ end
1448
+ return ["", i]
1449
+ end
1450
+
1451
+ output = []
1452
+ items = iterable.is_a?(Hash) ? iterable.to_a : Array(iterable)
1453
+ total = items.length
1454
+
1455
+ items.each_with_index do |item, idx|
1456
+ loop_ctx = LoopContext.new(context)
1457
+ loop_ctx["loop"] = {
1458
+ "index" => idx + 1,
1459
+ "index0" => idx,
1460
+ "first" => idx == 0,
1461
+ "last" => idx == total - 1,
1462
+ "length" => total,
1463
+ "revindex" => total - idx,
1464
+ "revindex0" => total - idx - 1,
1465
+ "even" => ((idx + 1) % 2).zero?,
1466
+ "odd" => ((idx + 1) % 2) != 0,
1467
+ }
1468
+
1469
+ if iterable.is_a?(Hash)
1470
+ key, value = item
1471
+ if var2
1472
+ loop_ctx[var1] = key
1473
+ loop_ctx[var2] = value
1474
+ else
1475
+ loop_ctx[var1] = key
1476
+ end
1477
+ else
1478
+ if var2
1479
+ loop_ctx[var1] = idx
1480
+ loop_ctx[var2] = item
1481
+ else
1482
+ loop_ctx[var1] = item
1483
+ end
1484
+ end
1485
+
1486
+ output << render_tokens(body_tokens.dup, loop_ctx)
1487
+ end
1488
+
1489
+ [output.join, i]
1490
+ end
1491
+
1492
+ # {% set name = expr %}
1493
+ def handle_set(content, context)
1494
+ if content =~ SET_RE
1495
+ name = Regexp.last_match(1)
1496
+ expr = Regexp.last_match(2).strip
1497
+ context[name] = eval_var_raw(expr, context)
1498
+ end
1499
+ end
1500
+
1501
+ # {% include "file.html" %}
1502
+ def handle_include(content, context)
1503
+ ignore_missing = content.include?("ignore missing")
1504
+ content = content.gsub("ignore missing", "").strip
1505
+
1506
+ m = content.match(INCLUDE_RE)
1507
+ return "" unless m
1508
+
1509
+ filename = m[1]
1510
+ with_expr = m[2]
1511
+
1512
+ begin
1513
+ source = load_template(filename)
1514
+ rescue
1515
+ return "" if ignore_missing
1516
+ raise
1517
+ end
1518
+
1519
+ inc_context = context.dup
1520
+ if with_expr
1521
+ extra = eval_expr(with_expr, context)
1522
+ inc_context.merge!(stringify_keys(extra)) if extra.is_a?(Hash)
1523
+ end
1524
+
1525
+ execute(source, inc_context)
1526
+ end
1527
+
1528
+ # {% macro name(args) %}...{% endmacro %}
1529
+ def handle_macro(tokens, start, context)
1530
+ content, _, _ = strip_tag(tokens[start][1])
1531
+ m = content.match(MACRO_RE)
1532
+ unless m
1533
+ i = start + 1
1534
+ while i < tokens.length
1535
+ if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1536
+ return i + 1
1537
+ end
1538
+ i += 1
1539
+ end
1540
+ return i
1541
+ end
1542
+
1543
+ macro_name = m[1]
1544
+ param_names = m[2].split(",").map(&:strip).reject(&:empty?)
1545
+
1546
+ body_tokens = []
1547
+ i = start + 1
1548
+ while i < tokens.length
1549
+ if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1550
+ i += 1
1551
+ break
1552
+ end
1553
+ body_tokens << tokens[i]
1554
+ i += 1
1555
+ end
1556
+
1557
+ engine = self
1558
+ captured_body = body_tokens.dup
1559
+ captured_context = context
1560
+
1561
+ context[macro_name] = lambda { |*args|
1562
+ macro_ctx = captured_context.dup
1563
+ param_names.each_with_index do |pname, pi|
1564
+ macro_ctx[pname] = pi < args.length ? args[pi] : nil
1565
+ end
1566
+ Tina4::SafeString.new(engine.send(:render_tokens, captured_body.dup, macro_ctx))
1567
+ }
1568
+
1569
+ i
1570
+ end
1571
+
1572
+ # {% from "file" import macro1, macro2 %}
1573
+ def handle_from_import(content, context)
1574
+ m = content.match(FROM_IMPORT_RE)
1575
+ return unless m
1576
+
1577
+ filename = m[1]
1578
+ names = m[2].split(",").map(&:strip).reject(&:empty?)
1579
+
1580
+ source = load_template(filename)
1581
+ tokens = tokenize(source)
1582
+
1583
+ i = 0
1584
+ while i < tokens.length
1585
+ ttype, raw = tokens[i]
1586
+ if ttype == BLOCK
1587
+ tag_content, _, _ = strip_tag(raw)
1588
+ tag = (tag_content.split[0] || "")
1589
+ if tag == "macro"
1590
+ macro_m = tag_content.match(MACRO_RE)
1591
+ if macro_m && names.include?(macro_m[1])
1592
+ macro_name = macro_m[1]
1593
+ param_names = macro_m[2].split(",").map(&:strip).reject(&:empty?)
1594
+
1595
+ body_tokens = []
1596
+ i += 1
1597
+ while i < tokens.length
1598
+ if tokens[i][0] == BLOCK && tokens[i][1].include?("endmacro")
1599
+ i += 1
1600
+ break
1601
+ end
1602
+ body_tokens << tokens[i]
1603
+ i += 1
1604
+ end
1605
+
1606
+ context[macro_name] = _make_macro_fn(body_tokens.dup, param_names.dup, context.dup)
1607
+ next
1608
+ end
1609
+ end
1610
+ end
1611
+ i += 1
1612
+ end
1613
+ end
1614
+
1615
+ # Build an isolated lambda for a macro — avoids closure-in-loop variable sharing.
1616
+ def _make_macro_fn(body_tokens, param_names, ctx)
1617
+ engine = self
1618
+ lambda { |*args|
1619
+ macro_ctx = ctx.dup
1620
+ param_names.each_with_index do |pname, pi|
1621
+ macro_ctx[pname] = pi < args.length ? args[pi] : nil
1622
+ end
1623
+ Tina4::SafeString.new(engine.send(:render_tokens, body_tokens.dup, macro_ctx))
1624
+ }
1625
+ end
1626
+
1627
+ # {% cache "key" ttl %}...{% endcache %}
1628
+ def handle_cache(tokens, start, context)
1629
+ content, _, _ = strip_tag(tokens[start][1])
1630
+ m = content.match(CACHE_RE)
1631
+ cache_key = m ? m[1] : "default"
1632
+ ttl = m && m[2] ? m[2].to_i : 60
1633
+
1634
+ # Check cache
1635
+ cached = @fragment_cache[cache_key]
1636
+ if cached
1637
+ html_content, expires_at = cached
1638
+ if Time.now.to_f < expires_at
1639
+ # Skip to endcache
1640
+ i = start + 1
1641
+ depth = 0
1642
+ while i < tokens.length
1643
+ if tokens[i][0] == BLOCK
1644
+ tc, _, _ = strip_tag(tokens[i][1])
1645
+ tag = tc.split[0] || ""
1646
+ if tag == "cache"
1647
+ depth += 1
1648
+ elsif tag == "endcache"
1649
+ return [html_content, i + 1] if depth == 0
1650
+ depth -= 1
1651
+ end
1652
+ end
1653
+ i += 1
1654
+ end
1655
+ return [html_content, i]
1656
+ end
1657
+ end
1658
+
1659
+ body_tokens = []
1660
+ i = start + 1
1661
+ depth = 0
1662
+ while i < tokens.length
1663
+ if tokens[i][0] == BLOCK
1664
+ tc, _, _ = strip_tag(tokens[i][1])
1665
+ tag = tc.split[0] || ""
1666
+ if tag == "cache"
1667
+ depth += 1
1668
+ body_tokens << tokens[i]
1669
+ elsif tag == "endcache"
1670
+ if depth == 0
1671
+ i += 1
1672
+ break
1673
+ end
1674
+ depth -= 1
1675
+ body_tokens << tokens[i]
1676
+ else
1677
+ body_tokens << tokens[i]
1678
+ end
1679
+ else
1680
+ body_tokens << tokens[i]
1681
+ end
1682
+ i += 1
1683
+ end
1684
+
1685
+ rendered = render_tokens(body_tokens.dup, context)
1686
+ @fragment_cache[cache_key] = [rendered, Time.now.to_f + ttl]
1687
+ [rendered, i]
1688
+ end
1689
+
1690
+ def handle_spaceless(tokens, start, context)
1691
+ body_tokens = []
1692
+ i = start + 1
1693
+ depth = 0
1694
+ while i < tokens.length
1695
+ if tokens[i][0] == BLOCK
1696
+ tc, _, _ = strip_tag(tokens[i][1])
1697
+ tag = tc.split[0] || ""
1698
+ if tag == "spaceless"
1699
+ depth += 1
1700
+ body_tokens << tokens[i]
1701
+ elsif tag == "endspaceless"
1702
+ if depth == 0
1703
+ i += 1
1704
+ break
1705
+ end
1706
+ depth -= 1
1707
+ body_tokens << tokens[i]
1708
+ else
1709
+ body_tokens << tokens[i]
1710
+ end
1711
+ else
1712
+ body_tokens << tokens[i]
1713
+ end
1714
+ i += 1
1715
+ end
1716
+
1717
+ rendered = render_tokens(body_tokens.dup, context)
1718
+ rendered = rendered.gsub(SPACELESS_RE, "><")
1719
+ [rendered, i]
1720
+ end
1721
+
1722
+ def handle_autoescape(tokens, start, context)
1723
+ content, _, _ = strip_tag(tokens[start][1])
1724
+ mode_match = content.match(AUTOESCAPE_RE)
1725
+ auto_escape_on = !(mode_match && mode_match[1] == "false")
1726
+
1727
+ body_tokens = []
1728
+ i = start + 1
1729
+ depth = 0
1730
+ while i < tokens.length
1731
+ if tokens[i][0] == BLOCK
1732
+ tc, _, _ = strip_tag(tokens[i][1])
1733
+ tag = tc.split[0] || ""
1734
+ if tag == "autoescape"
1735
+ depth += 1
1736
+ body_tokens << tokens[i]
1737
+ elsif tag == "endautoescape"
1738
+ if depth == 0
1739
+ i += 1
1740
+ break
1741
+ end
1742
+ depth -= 1
1743
+ body_tokens << tokens[i]
1744
+ else
1745
+ body_tokens << tokens[i]
1746
+ end
1747
+ else
1748
+ body_tokens << tokens[i]
1749
+ end
1750
+ i += 1
1751
+ end
1752
+
1753
+ if !auto_escape_on
1754
+ old_auto_escape = @auto_escape
1755
+ @auto_escape = false
1756
+ rendered = render_tokens(body_tokens.dup, context)
1757
+ @auto_escape = old_auto_escape
1758
+ else
1759
+ rendered = render_tokens(body_tokens.dup, context)
1760
+ end
1761
+
1762
+ [rendered, i]
1763
+ end
1764
+
1765
+ # -----------------------------------------------------------------------
1766
+ # Helpers
1767
+ # -----------------------------------------------------------------------
1768
+
1769
+ def truthy?(val)
1770
+ return false if val.nil? || val == false || val == 0 || val == ""
1771
+ return false if val.respond_to?(:empty?) && val.empty?
1772
+ true
1773
+ end
1774
+
1775
+ def stringify_keys(hash)
1776
+ return {} unless hash.is_a?(Hash)
1777
+ hash.each_with_object({}) { |(k, v), h| h[k.to_s] = v }
1778
+ end
1779
+
1780
+ # -----------------------------------------------------------------------
1781
+ # Built-in filters (53 total)
1782
+ # -----------------------------------------------------------------------
1783
+
1784
+ def default_filters
1785
+ {
1786
+ # -- Text --
1787
+ "upper" => ->(v, *_a) { v.to_s.upcase },
1788
+ "lower" => ->(v, *_a) { v.to_s.downcase },
1789
+ "capitalize" => ->(v, *_a) { v.to_s.capitalize },
1790
+ "title" => ->(v, *_a) { v.to_s.split.map(&:capitalize).join(" ") },
1791
+ "trim" => ->(v, *_a) { v.to_s.strip },
1792
+ "ltrim" => ->(v, *_a) { v.to_s.lstrip },
1793
+ "rtrim" => ->(v, *_a) { v.to_s.rstrip },
1794
+ "replace" => ->(v, *a) {
1795
+ if a.length == 1 && a[0].is_a?(Hash)
1796
+ result = v.to_s
1797
+ a[0].each { |old, new_val| result = result.gsub(old.to_s, new_val.to_s) }
1798
+ result
1799
+ elsif a.length >= 2
1800
+ v.to_s.gsub(a[0].to_s, a[1].to_s)
1801
+ else
1802
+ v.to_s
1803
+ end
1804
+ },
1805
+ "striptags" => ->(v, *_a) { v.to_s.gsub(STRIPTAGS_RE, "") },
1806
+
1807
+ # -- Encoding --
1808
+ "escape" => ->(v, *_a) { Frond.escape_html(v.to_s) },
1809
+ "e" => ->(v, *_a) { Frond.escape_html(v.to_s) },
1810
+ "raw" => ->(v, *_a) { v },
1811
+ "safe" => ->(v, *_a) { v },
1812
+ "json_encode" => ->(v, *_a) { JSON.generate(v) rescue v.to_s },
1813
+ "json_decode" => ->(v, *_a) { v.is_a?(String) ? (JSON.parse(v) rescue v) : v },
1814
+ "base64_encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
1815
+ "base64encode" => ->(v, *_a) { Base64.strict_encode64(v.is_a?(String) ? v : v.to_s) },
1816
+ "base64_decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
1817
+ "base64decode" => ->(v, *_a) { Base64.decode64(v.to_s) },
1818
+ "data_uri" => ->(v, *_a) {
1819
+ if v.is_a?(Hash)
1820
+ ct = v[:type] || v["type"] || "application/octet-stream"
1821
+ raw = v[:content] || v["content"] || ""
1822
+ raw = raw.respond_to?(:read) ? raw.read : raw
1823
+ "data:#{ct};base64,#{Base64.strict_encode64(raw.to_s)}"
1824
+ else
1825
+ v.to_s
1826
+ end
1827
+ },
1828
+ "url_encode" => ->(v, *_a) { CGI.escape(v.to_s) },
1829
+
1830
+ # -- JSON / JS --
1831
+ "to_json" => ->(v, *a) {
1832
+ indent = a[0] ? a[0].to_i : nil
1833
+ json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
1834
+ # Escape <, >, & for safe HTML embedding
1835
+ Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
1836
+ },
1837
+ "tojson" => ->(v, *a) {
1838
+ indent = a[0] ? a[0].to_i : nil
1839
+ json = indent ? JSON.pretty_generate(v) : JSON.generate(v)
1840
+ Tina4::SafeString.new(json.gsub("<", '\u003c').gsub(">", '\u003e').gsub("&", '\u0026'))
1841
+ },
1842
+ "js_escape" => ->(v, *_a) {
1843
+ Tina4::SafeString.new(
1844
+ v.to_s.gsub("\\", "\\\\").gsub("'", "\\'").gsub('"', '\\"')
1845
+ .gsub("\n", "\\n").gsub("\r", "\\r").gsub("\t", "\\t")
1846
+ )
1847
+ },
1848
+
1849
+ # -- Hashing --
1850
+ "md5" => ->(v, *_a) { Digest::MD5.hexdigest(v.to_s) },
1851
+ "sha256" => ->(v, *_a) { Digest::SHA256.hexdigest(v.to_s) },
1852
+
1853
+ # -- Numbers --
1854
+ "abs" => ->(v, *_a) { v.is_a?(Numeric) ? v.abs : v.to_f.abs },
1855
+ "round" => ->(v, *a) { v.to_f.round(a[0] ? a[0].to_i : 0) },
1856
+ "int" => ->(v, *_a) { v.to_i },
1857
+ "float" => ->(v, *_a) { v.to_f },
1858
+ "number_format" => ->(v, *a) {
1859
+ decimals = a[0] ? a[0].to_i : 0
1860
+ formatted = format("%.#{decimals}f", v.to_f)
1861
+ # Add comma thousands separator
1862
+ parts = formatted.split(".")
1863
+ parts[0] = parts[0].gsub(THOUSANDS_RE, '\\1,')
1864
+ parts.join(".")
1865
+ },
1866
+
1867
+ # -- Date --
1868
+ "date" => ->(v, *a) {
1869
+ fmt = a[0] || "%Y-%m-%d"
1870
+ begin
1871
+ if v.is_a?(String)
1872
+ dt = DateTime.parse(v)
1873
+ dt.strftime(fmt)
1874
+ elsif v.respond_to?(:strftime)
1875
+ v.strftime(fmt)
1876
+ else
1877
+ v.to_s
1878
+ end
1879
+ rescue
1880
+ v.to_s
1881
+ end
1882
+ },
1883
+
1884
+ # -- Arrays --
1885
+ "length" => ->(v, *_a) { v.respond_to?(:length) ? v.length : v.to_s.length },
1886
+ "first" => ->(v, *_a) { v.respond_to?(:first) ? v.first : (v.to_s[0] rescue nil) },
1887
+ "last" => ->(v, *_a) { v.respond_to?(:last) ? v.last : (v.to_s[-1] rescue nil) },
1888
+ "reverse" => ->(v, *_a) { v.respond_to?(:reverse) ? v.reverse : v.to_s.reverse },
1889
+ "sort" => ->(v, *_a) { v.respond_to?(:sort) ? v.sort : v },
1890
+ "shuffle" => ->(v, *_a) { v.respond_to?(:shuffle) ? v.shuffle : v },
1891
+ "unique" => ->(v, *_a) { v.is_a?(Array) ? v.uniq : v },
1892
+ "join" => ->(v, *a) { v.respond_to?(:join) ? v.join(a[0] || ", ") : v.to_s },
1893
+ "split" => ->(v, *a) { v.to_s.split(a[0] || " ") },
1894
+ "slice" => ->(v, *a) {
1895
+ if a.length >= 2
1896
+ s = a[0].to_i
1897
+ e = a[1].to_i
1898
+ if v.is_a?(Array)
1899
+ v[s...e]
1900
+ else
1901
+ v.to_s[s...e]
1902
+ end
1903
+ else
1904
+ v
1905
+ end
1906
+ },
1907
+ "batch" => ->(v, *a) {
1908
+ if a[0] && v.respond_to?(:each_slice)
1909
+ v.each_slice(a[0].to_i).to_a
1910
+ else
1911
+ [v]
1912
+ end
1913
+ },
1914
+ "map" => ->(v, *a) {
1915
+ if a[0] && v.is_a?(Array)
1916
+ v.map { |item| item.is_a?(Hash) ? (item[a[0]] || item[a[0].to_sym]) : nil }
1917
+ else
1918
+ v
1919
+ end
1920
+ },
1921
+ "filter" => ->(v, *_a) { v.is_a?(Array) ? v.select { |item| item } : v },
1922
+ "column" => ->(v, *a) {
1923
+ if a[0] && v.is_a?(Array)
1924
+ v.map { |row| row.is_a?(Hash) ? (row[a[0]] || row[a[0].to_sym]) : nil }
1925
+ else
1926
+ v
1927
+ end
1928
+ },
1929
+
1930
+ # -- Dict --
1931
+ "keys" => ->(v, *_a) { v.respond_to?(:keys) ? v.keys : [] },
1932
+ "values" => ->(v, *_a) { v.respond_to?(:values) ? v.values : [v] },
1933
+ "merge" => ->(v, *a) {
1934
+ if v.respond_to?(:merge) && a[0].is_a?(Hash)
1935
+ v.merge(a[0])
1936
+ elsif v.is_a?(Array) && a[0].is_a?(Array)
1937
+ v + a[0]
1938
+ else
1939
+ v
1940
+ end
1941
+ },
1942
+
1943
+ # -- Utility --
1944
+ "default" => ->(v, *a) { (v.nil? || v.to_s.empty?) ? (a[0] || "") : v },
1945
+ # dump filter — gated on TINA4_DEBUG=true via Frond.render_dump.
1946
+ # Both the |dump filter and the dump() global delegate to the same
1947
+ # helper so they produce identical output and obey the same gating.
1948
+ "dump" => ->(v, *_a) { Frond.render_dump(v) },
1949
+ "string" => ->(v, *_a) { v.to_s },
1950
+ "truncate" => ->(v, *a) {
1951
+ len = a[0] ? a[0].to_i : 50
1952
+ str = v.to_s
1953
+ str.length > len ? str[0...len] + "..." : str
1954
+ },
1955
+ "wordwrap" => ->(v, *a) {
1956
+ width = a[0] ? a[0].to_i : 75
1957
+ words = v.to_s.split
1958
+ lines = []
1959
+ current = +""
1960
+ words.each do |word|
1961
+ if !current.empty? && current.length + 1 + word.length > width
1962
+ lines << current
1963
+ current = word
1964
+ else
1965
+ current = current.empty? ? word : "#{current} #{word}"
1966
+ end
1967
+ end
1968
+ lines << current unless current.empty?
1969
+ lines.join("\n")
1970
+ },
1971
+ "slug" => ->(v, *_a) { v.to_s.downcase.gsub(SLUG_CLEAN_RE, "-").gsub(SLUG_TRIM_RE, "") },
1972
+ "nl2br" => ->(v, *_a) { v.to_s.gsub("\n", "<br>\n") },
1973
+ "format" => ->(v, *a) {
1974
+ if a.any?
1975
+ v.to_s % a
1976
+ else
1977
+ v.to_s
1978
+ end
1979
+ },
1980
+ "form_token" => ->(_v, *_a) { Frond.generate_form_token(_v.to_s) },
1981
+ }
1982
+ end
1983
+
1984
+ # -----------------------------------------------------------------------
1985
+ # Built-in globals
1986
+ # -----------------------------------------------------------------------
1987
+
1988
+ def register_builtin_globals
1989
+ @globals["form_token"] = ->(descriptor = "") { Frond.generate_form_token(descriptor.to_s) }
1990
+ @globals["formTokenValue"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
1991
+ @globals["form_token_value"] = ->(descriptor = "") { Frond.generate_form_token_value(descriptor.to_s) }
1992
+
1993
+ # Debug helper: {{ dump(x) }} — gated on TINA4_DEBUG=true.
1994
+ # Both this global and the |dump filter call Frond.render_dump which
1995
+ # returns an empty SafeString in production so dump never leaks state.
1996
+ @globals["dump"] = ->(value = nil) { Frond.render_dump(value) }
1997
+ end
1998
+
1999
+ # Render a value as a pre-formatted inspect() wrapped in <pre> tags.
2000
+ #
2001
+ # Gated on TINA4_DEBUG=true. In production (TINA4_DEBUG unset or false)
2002
+ # this returns an empty SafeString to avoid leaking internal state,
2003
+ # object shapes, or sensitive values into rendered HTML.
2004
+ #
2005
+ # Shared by the {{ value|dump }} filter and the {{ dump(value) }}
2006
+ # global function so both produce identical output and obey the same
2007
+ # gating.
2008
+ def self.render_dump(value)
2009
+ return SafeString.new("") unless ENV.fetch("TINA4_DEBUG", "").downcase == "true"
2010
+
2011
+ dumped = value.inspect
2012
+ escaped = dumped
2013
+ .gsub("&", "&amp;")
2014
+ .gsub("<", "&lt;")
2015
+ .gsub(">", "&gt;")
2016
+ .gsub('"', "&quot;")
2017
+ SafeString.new("<pre>#{escaped}</pre>")
2018
+ end
2019
+
2020
+ # Generate a JWT form token and return a hidden input element.
2021
+ #
2022
+ # @param descriptor [String] Optional string to enrich the token payload.
2023
+ # - Empty: payload is {"type" => "form"}
2024
+ # - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
2025
+ # - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
2026
+ #
2027
+ # @return [String] <input type="hidden" name="formToken" value="TOKEN">
2028
+ # Session ID used by generate_form_token for CSRF session binding.
2029
+ # Set this before rendering templates to bind tokens to the current session.
2030
+ @form_token_session_id = ""
2031
+
2032
+ class << self
2033
+ attr_accessor :form_token_session_id
2034
+
2035
+ # Set the session ID used for CSRF form token binding.
2036
+ # Parity with Python/PHP/Node: Frond.set_form_token_session_id(id)
2037
+ #
2038
+ # @param session_id [String] The session ID to bind form tokens to
2039
+ def set_form_token_session_id(session_id)
2040
+ self.form_token_session_id = session_id
2041
+ end
2042
+ end
2043
+
2044
+ # Generate a raw JWT form token string.
2045
+ #
2046
+ # @param descriptor [String] Optional string to enrich the token payload.
2047
+ # - Empty: payload is {"type" => "form"}
2048
+ # - "admin_panel": payload is {"type" => "form", "context" => "admin_panel"}
2049
+ # - "checkout|order_123": payload is {"type" => "form", "context" => "checkout", "ref" => "order_123"}
2050
+ #
2051
+ # @return [String] The raw JWT token string.
2052
+ def self.generate_form_jwt(descriptor = "")
2053
+ require_relative "log"
2054
+ require_relative "auth"
2055
+
2056
+ payload = { "type" => "form", "nonce" => SecureRandom.hex(8) }
2057
+ if descriptor && !descriptor.empty?
2058
+ if descriptor.include?("|")
2059
+ parts = descriptor.split("|", 2)
2060
+ payload["context"] = parts[0]
2061
+ payload["ref"] = parts[1]
2062
+ else
2063
+ payload["context"] = descriptor
2064
+ end
2065
+ end
2066
+
2067
+ # Include session_id for CSRF session binding
2068
+ sid = form_token_session_id.to_s
2069
+ payload["session_id"] = sid unless sid.empty?
2070
+
2071
+ ttl_minutes = (ENV["TINA4_TOKEN_LIMIT"] || "60").to_i
2072
+ expires_in = ttl_minutes * 60
2073
+ Tina4::Auth.create_token(payload, expires_in: expires_in)
2074
+ end
2075
+
2076
+ def self.generate_form_token(descriptor = "")
2077
+ token = generate_form_jwt(descriptor)
2078
+ Tina4::SafeString.new(%(<input type="hidden" name="formToken" value="#{CGI.escapeHTML(token)}">))
2079
+ end
2080
+
2081
+ # Return just the raw JWT form token string (no <input> wrapper).
2082
+ # Registered as both formTokenValue and form_token_value template globals.
2083
+ def self.generate_form_token_value(descriptor = "")
2084
+ Tina4::SafeString.new(generate_form_jwt(descriptor))
2085
+ end
2086
+ end
2087
+ end