zexus 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +0 -0
- package/README.md +2513 -0
- package/bin/zexus +2 -0
- package/bin/zpics +2 -0
- package/bin/zpm +2 -0
- package/bin/zx +2 -0
- package/bin/zx-deploy +2 -0
- package/bin/zx-dev +2 -0
- package/bin/zx-run +2 -0
- package/package.json +66 -0
- package/scripts/README.md +24 -0
- package/scripts/postinstall.js +44 -0
- package/shared_config.json +24 -0
- package/src/README.md +1525 -0
- package/src/tests/run_zexus_tests.py +117 -0
- package/src/tests/test_all_phases.zx +346 -0
- package/src/tests/test_blockchain_features.zx +306 -0
- package/src/tests/test_complexity_features.zx +321 -0
- package/src/tests/test_core_integration.py +185 -0
- package/src/tests/test_phase10_ecosystem.zx +177 -0
- package/src/tests/test_phase1_modifiers.zx +87 -0
- package/src/tests/test_phase2_plugins.zx +80 -0
- package/src/tests/test_phase3_security.zx +97 -0
- package/src/tests/test_phase4_vfs.zx +116 -0
- package/src/tests/test_phase5_types.zx +117 -0
- package/src/tests/test_phase6_metaprogramming.zx +125 -0
- package/src/tests/test_phase7_optimization.zx +132 -0
- package/src/tests/test_phase9_advanced_types.zx +157 -0
- package/src/tests/test_security_features.py +419 -0
- package/src/tests/test_security_features.zx +276 -0
- package/src/tests/test_simple_zx.zx +1 -0
- package/src/tests/test_verification_simple.zx +69 -0
- package/src/zexus/__init__.py +28 -0
- package/src/zexus/__main__.py +5 -0
- package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/advanced_types.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/builtin_modules.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/complexity_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/concurrency_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/config.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/dependency_injection.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/ecosystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/hybrid_orchestrator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/metaprogramming.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/optimization.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/plugin_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/policy_engine.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/stdlib_integration.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/strategy_recovery.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/type_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/virtual_filesystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
- package/src/zexus/advanced_types.py +401 -0
- package/src/zexus/blockchain/__init__.py +40 -0
- package/src/zexus/blockchain/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/crypto.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/ledger.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/transaction.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/crypto.py +463 -0
- package/src/zexus/blockchain/ledger.py +255 -0
- package/src/zexus/blockchain/transaction.py +267 -0
- package/src/zexus/builtin_modules.py +284 -0
- package/src/zexus/builtin_plugins.py +317 -0
- package/src/zexus/capability_system.py +372 -0
- package/src/zexus/cli/__init__.py +2 -0
- package/src/zexus/cli/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
- package/src/zexus/cli/main.py +707 -0
- package/src/zexus/cli/zpm.py +203 -0
- package/src/zexus/compare_interpreter_compiler.py +146 -0
- package/src/zexus/compiler/__init__.py +169 -0
- package/src/zexus/compiler/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/compiler/bytecode.py +266 -0
- package/src/zexus/compiler/compat_runtime.py +277 -0
- package/src/zexus/compiler/lexer.py +257 -0
- package/src/zexus/compiler/parser.py +779 -0
- package/src/zexus/compiler/semantic.py +118 -0
- package/src/zexus/compiler/zexus_ast.py +454 -0
- package/src/zexus/complexity_system.py +575 -0
- package/src/zexus/concurrency_system.py +493 -0
- package/src/zexus/config.py +201 -0
- package/src/zexus/crypto_bridge.py +19 -0
- package/src/zexus/dependency_injection.py +423 -0
- package/src/zexus/ecosystem.py +434 -0
- package/src/zexus/environment.py +101 -0
- package/src/zexus/environment_manager.py +119 -0
- package/src/zexus/error_reporter.py +314 -0
- package/src/zexus/evaluator/__init__.py +12 -0
- package/src/zexus/evaluator/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/integration.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/bytecode_compiler.py +700 -0
- package/src/zexus/evaluator/core.py +891 -0
- package/src/zexus/evaluator/expressions.py +827 -0
- package/src/zexus/evaluator/functions.py +3989 -0
- package/src/zexus/evaluator/integration.py +396 -0
- package/src/zexus/evaluator/statements.py +4303 -0
- package/src/zexus/evaluator/utils.py +126 -0
- package/src/zexus/evaluator_original.py +2041 -0
- package/src/zexus/external_bridge.py +16 -0
- package/src/zexus/find_affected_imports.sh +155 -0
- package/src/zexus/hybrid_orchestrator.py +152 -0
- package/src/zexus/input_validation.py +259 -0
- package/src/zexus/lexer.py +571 -0
- package/src/zexus/logging.py +89 -0
- package/src/zexus/lsp/__init__.py +9 -0
- package/src/zexus/lsp/completion_provider.py +207 -0
- package/src/zexus/lsp/definition_provider.py +22 -0
- package/src/zexus/lsp/hover_provider.py +71 -0
- package/src/zexus/lsp/server.py +269 -0
- package/src/zexus/lsp/symbol_provider.py +31 -0
- package/src/zexus/metaprogramming.py +321 -0
- package/src/zexus/module_cache.py +89 -0
- package/src/zexus/module_manager.py +107 -0
- package/src/zexus/object.py +973 -0
- package/src/zexus/optimization.py +424 -0
- package/src/zexus/parser/__init__.py +31 -0
- package/src/zexus/parser/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
- package/src/zexus/parser/integration.py +86 -0
- package/src/zexus/parser/parser.py +3977 -0
- package/src/zexus/parser/strategy_context.py +7254 -0
- package/src/zexus/parser/strategy_structural.py +1033 -0
- package/src/zexus/persistence.py +391 -0
- package/src/zexus/plugin_system.py +290 -0
- package/src/zexus/policy_engine.py +365 -0
- package/src/zexus/profiler/__init__.py +5 -0
- package/src/zexus/profiler/profiler.py +233 -0
- package/src/zexus/purity_system.py +398 -0
- package/src/zexus/runtime/__init__.py +20 -0
- package/src/zexus/runtime/async_runtime.py +324 -0
- package/src/zexus/search_old_imports.sh +65 -0
- package/src/zexus/security.py +1407 -0
- package/src/zexus/stack_trace.py +233 -0
- package/src/zexus/stdlib/__init__.py +27 -0
- package/src/zexus/stdlib/blockchain.py +341 -0
- package/src/zexus/stdlib/compression.py +167 -0
- package/src/zexus/stdlib/crypto.py +124 -0
- package/src/zexus/stdlib/datetime.py +163 -0
- package/src/zexus/stdlib/db_mongo.py +199 -0
- package/src/zexus/stdlib/db_mysql.py +162 -0
- package/src/zexus/stdlib/db_postgres.py +163 -0
- package/src/zexus/stdlib/db_sqlite.py +133 -0
- package/src/zexus/stdlib/encoding.py +230 -0
- package/src/zexus/stdlib/fs.py +195 -0
- package/src/zexus/stdlib/http.py +219 -0
- package/src/zexus/stdlib/http_server.py +248 -0
- package/src/zexus/stdlib/json_module.py +61 -0
- package/src/zexus/stdlib/math.py +360 -0
- package/src/zexus/stdlib/os_module.py +265 -0
- package/src/zexus/stdlib/regex.py +148 -0
- package/src/zexus/stdlib/sockets.py +253 -0
- package/src/zexus/stdlib/test_framework.zx +208 -0
- package/src/zexus/stdlib/test_runner.zx +119 -0
- package/src/zexus/stdlib_integration.py +341 -0
- package/src/zexus/strategy_recovery.py +256 -0
- package/src/zexus/syntax_validator.py +356 -0
- package/src/zexus/testing/zpics.py +407 -0
- package/src/zexus/testing/zpics_runtime.py +369 -0
- package/src/zexus/type_system.py +374 -0
- package/src/zexus/validation_system.py +569 -0
- package/src/zexus/virtual_filesystem.py +355 -0
- package/src/zexus/vm/__init__.py +8 -0
- package/src/zexus/vm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_manager.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_pool.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/peephole_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/profiler.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_allocator.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/ssa_converter.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/async_optimizer.py +420 -0
- package/src/zexus/vm/bytecode.py +428 -0
- package/src/zexus/vm/bytecode_converter.py +297 -0
- package/src/zexus/vm/cache.py +532 -0
- package/src/zexus/vm/jit.py +720 -0
- package/src/zexus/vm/memory_manager.py +520 -0
- package/src/zexus/vm/memory_pool.py +511 -0
- package/src/zexus/vm/optimizer.py +478 -0
- package/src/zexus/vm/parallel_vm.py +899 -0
- package/src/zexus/vm/peephole_optimizer.py +452 -0
- package/src/zexus/vm/profiler.py +527 -0
- package/src/zexus/vm/register_allocator.py +462 -0
- package/src/zexus/vm/register_vm.py +520 -0
- package/src/zexus/vm/ssa_converter.py +757 -0
- package/src/zexus/vm/vm.py +1392 -0
- package/src/zexus/zexus_ast.py +1782 -0
- package/src/zexus/zexus_token.py +253 -0
- package/src/zexus/zpm/__init__.py +15 -0
- package/src/zexus/zpm/installer.py +116 -0
- package/src/zexus/zpm/package_manager.py +208 -0
- package/src/zexus/zpm/publisher.py +98 -0
- package/src/zexus/zpm/registry.py +110 -0
- package/src/zexus.egg-info/PKG-INFO +2235 -0
- package/src/zexus.egg-info/SOURCES.txt +876 -0
- package/src/zexus.egg-info/dependency_links.txt +1 -0
- package/src/zexus.egg-info/entry_points.txt +3 -0
- package/src/zexus.egg-info/not-zip-safe +1 -0
- package/src/zexus.egg-info/requires.txt +14 -0
- package/src/zexus.egg-info/top_level.txt +2 -0
- package/zexus.json +14 -0
|
@@ -0,0 +1,2041 @@
|
|
|
1
|
+
# evaluator.py (FIXED VERSION)
|
|
2
|
+
import sys
|
|
3
|
+
import traceback
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import asyncio
|
|
7
|
+
from . import zexus_ast
|
|
8
|
+
from .zexus_ast import (
|
|
9
|
+
Program, ExpressionStatement, BlockStatement, ReturnStatement, LetStatement,
|
|
10
|
+
ActionStatement, IfStatement, WhileStatement, ForEachStatement, MethodCallExpression,
|
|
11
|
+
EmbeddedLiteral, PrintStatement, ScreenStatement, EmbeddedCodeStatement, UseStatement,
|
|
12
|
+
ExactlyStatement, TryCatchStatement, IntegerLiteral, StringLiteral, ListLiteral, MapLiteral, Identifier,
|
|
13
|
+
ActionLiteral, CallExpression, PrefixExpression, InfixExpression, IfExpression,
|
|
14
|
+
Boolean as AST_Boolean, AssignmentExpression, PropertyAccessExpression,
|
|
15
|
+
ExportStatement, LambdaExpression, FromStatement, ComponentStatement, ThemeStatement,
|
|
16
|
+
DebugStatement, ExternalDeclaration, EntityStatement, SealStatement # Add all missing types
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from .object import (
|
|
20
|
+
Environment, Integer, Float, String, List, Map, Null, Boolean as BooleanObj,
|
|
21
|
+
Builtin, Action, EmbeddedCode, ReturnValue, LambdaFunction, DateTime, Math, File, Debug,
|
|
22
|
+
EvaluationError as ObjectEvaluationError
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
NULL, TRUE, FALSE = Null(), BooleanObj(True), BooleanObj(False)
|
|
26
|
+
|
|
27
|
+
# Registry for builtin functions (populated later)
|
|
28
|
+
builtins = {}
|
|
29
|
+
|
|
30
|
+
# Use the unified EvaluationError from object.py
|
|
31
|
+
EvaluationError = ObjectEvaluationError
|
|
32
|
+
|
|
33
|
+
# Helper to centralize error checks (includes the new FixedEvaluationError)
|
|
34
|
+
def is_error(obj):
|
|
35
|
+
return isinstance(obj, (EvaluationError, ObjectEvaluationError))
|
|
36
|
+
|
|
37
|
+
# Summary counters for lightweight, 5-line summary logging when debug is off
|
|
38
|
+
EVAL_SUMMARY = {
|
|
39
|
+
'parsed_statements': 0,
|
|
40
|
+
'evaluated_statements': 0,
|
|
41
|
+
'errors': 0,
|
|
42
|
+
'async_tasks_run': 0,
|
|
43
|
+
'max_statements_in_block': 0
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
def _is_awaitable(obj):
|
|
47
|
+
try:
|
|
48
|
+
return asyncio.iscoroutine(obj) or isinstance(obj, asyncio.Future)
|
|
49
|
+
except Exception:
|
|
50
|
+
return False
|
|
51
|
+
|
|
52
|
+
def _resolve_awaitable(obj):
|
|
53
|
+
"""If obj is a coroutine/future, run it to completion and return the result.
|
|
54
|
+
If already in an event loop, return the coroutine (caller may handle it).
|
|
55
|
+
"""
|
|
56
|
+
if _is_awaitable(obj):
|
|
57
|
+
try:
|
|
58
|
+
EVAL_SUMMARY['async_tasks_run'] += 1
|
|
59
|
+
return asyncio.run(obj)
|
|
60
|
+
except RuntimeError:
|
|
61
|
+
# Already running event loop (e.g., invoked from async VM). Return as-is.
|
|
62
|
+
return obj
|
|
63
|
+
return obj
|
|
64
|
+
|
|
65
|
+
# === DEBUG FLAGS (controlled by user config) ===
|
|
66
|
+
from .config import config as zexus_config
|
|
67
|
+
|
|
68
|
+
def debug_log(message, data=None, level='debug'):
|
|
69
|
+
"""Conditional debug logging that respects the user's persistent config.
|
|
70
|
+
level: 'debug' (very verbose), 'info', 'warn', 'error'
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
if not zexus_config.should_log(level):
|
|
74
|
+
return
|
|
75
|
+
except Exception:
|
|
76
|
+
# If config fails for any reason, default to not logging
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
if data is not None:
|
|
80
|
+
print(f"🔍 [EVAL DEBUG] {message}: {data}")
|
|
81
|
+
else:
|
|
82
|
+
print(f"🔍 [EVAL DEBUG] {message}")
|
|
83
|
+
|
|
84
|
+
# === FIXED HELPER FUNCTIONS ===
|
|
85
|
+
|
|
86
|
+
def eval_program(statements, env):
|
|
87
|
+
debug_log("eval_program", f"Processing {len(statements)} statements")
|
|
88
|
+
try:
|
|
89
|
+
EVAL_SUMMARY['parsed_statements'] = max(EVAL_SUMMARY.get('parsed_statements', 0), len(statements))
|
|
90
|
+
except Exception:
|
|
91
|
+
pass
|
|
92
|
+
|
|
93
|
+
result = NULL
|
|
94
|
+
for i, stmt in enumerate(statements):
|
|
95
|
+
debug_log(f" Statement {i+1}", type(stmt).__name__)
|
|
96
|
+
res = eval_node(stmt, env)
|
|
97
|
+
res = _resolve_awaitable(res)
|
|
98
|
+
EVAL_SUMMARY['evaluated_statements'] += 1
|
|
99
|
+
if isinstance(res, ReturnValue):
|
|
100
|
+
debug_log(" ReturnValue encountered", res.value)
|
|
101
|
+
return res.value
|
|
102
|
+
if is_error(res):
|
|
103
|
+
debug_log(" Error encountered", res)
|
|
104
|
+
try:
|
|
105
|
+
EVAL_SUMMARY['errors'] += 1
|
|
106
|
+
except Exception:
|
|
107
|
+
pass
|
|
108
|
+
return res
|
|
109
|
+
result = res
|
|
110
|
+
debug_log("eval_program completed", result)
|
|
111
|
+
return result
|
|
112
|
+
|
|
113
|
+
def eval_assignment_expression(node, env):
|
|
114
|
+
"""Handle assignment expressions like: x = 5"""
|
|
115
|
+
debug_log("eval_assignment_expression", f"Assigning to {node.name.value}")
|
|
116
|
+
|
|
117
|
+
# CRITICAL FIX: Add sealed object check before assignment
|
|
118
|
+
from .security import SealedObject
|
|
119
|
+
target_obj = env.get(node.name.value)
|
|
120
|
+
if isinstance(target_obj, SealedObject):
|
|
121
|
+
return EvaluationError(f"Cannot assign to sealed object: {node.name.value}")
|
|
122
|
+
|
|
123
|
+
value = eval_node(node.value, env)
|
|
124
|
+
# Check using helper
|
|
125
|
+
if is_error(value):
|
|
126
|
+
debug_log(" Assignment error", value)
|
|
127
|
+
return value
|
|
128
|
+
env.set(node.name.value, value)
|
|
129
|
+
debug_log(" Assignment successful", f"{node.name.value} = {value}")
|
|
130
|
+
return value
|
|
131
|
+
|
|
132
|
+
def eval_block_statement(block, env):
|
|
133
|
+
debug_log("eval_block_statement", f"Processing {len(block.statements)} statements in block")
|
|
134
|
+
try:
|
|
135
|
+
EVAL_SUMMARY['max_statements_in_block'] = max(EVAL_SUMMARY.get('max_statements_in_block', 0), len(block.statements))
|
|
136
|
+
except Exception:
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
result = NULL
|
|
140
|
+
for stmt in block.statements:
|
|
141
|
+
res = eval_node(stmt, env)
|
|
142
|
+
res = _resolve_awaitable(res)
|
|
143
|
+
EVAL_SUMMARY['evaluated_statements'] += 1
|
|
144
|
+
if isinstance(res, (ReturnValue, EvaluationError, ObjectEvaluationError)):
|
|
145
|
+
debug_log(" Block interrupted", res)
|
|
146
|
+
if is_error(res):
|
|
147
|
+
try:
|
|
148
|
+
EVAL_SUMMARY['errors'] += 1
|
|
149
|
+
except Exception:
|
|
150
|
+
pass
|
|
151
|
+
return res
|
|
152
|
+
result = res
|
|
153
|
+
debug_log(" Block completed", result)
|
|
154
|
+
return result
|
|
155
|
+
|
|
156
|
+
def eval_expressions(expressions, env):
|
|
157
|
+
debug_log("eval_expressions", f"Evaluating {len(expressions)} expressions")
|
|
158
|
+
results = []
|
|
159
|
+
for i, expr in enumerate(expressions):
|
|
160
|
+
debug_log(f" Expression {i+1}", type(expr).__name__)
|
|
161
|
+
res = eval_node(expr, env)
|
|
162
|
+
res = _resolve_awaitable(res)
|
|
163
|
+
if is_error(res):
|
|
164
|
+
debug_log(" Expression evaluation interrupted", res)
|
|
165
|
+
try:
|
|
166
|
+
EVAL_SUMMARY['errors'] += 1
|
|
167
|
+
except Exception:
|
|
168
|
+
pass
|
|
169
|
+
return res
|
|
170
|
+
results.append(res)
|
|
171
|
+
EVAL_SUMMARY['evaluated_statements'] += 1
|
|
172
|
+
debug_log(f" Expression {i+1} result", res)
|
|
173
|
+
debug_log(" All expressions evaluated", results)
|
|
174
|
+
return results
|
|
175
|
+
|
|
176
|
+
def eval_identifier(node, env):
|
|
177
|
+
debug_log("eval_identifier", f"Looking up: {node.value}")
|
|
178
|
+
val = env.get(node.value)
|
|
179
|
+
if val:
|
|
180
|
+
debug_log(" Found in environment", f"{node.value} = {val}")
|
|
181
|
+
return val
|
|
182
|
+
# Check builtins
|
|
183
|
+
builtin = builtins.get(node.value)
|
|
184
|
+
if builtin:
|
|
185
|
+
debug_log(" Found builtin", f"{node.value} = {builtin}")
|
|
186
|
+
return builtin
|
|
187
|
+
|
|
188
|
+
debug_log(" Identifier not found", node.value)
|
|
189
|
+
# FIXED: Return the new FixedEvaluationError so downstream code won't crash if len() is used
|
|
190
|
+
return EvaluationError(f"Identifier '{node.value}' not found")
|
|
191
|
+
|
|
192
|
+
def is_truthy(obj):
|
|
193
|
+
# FIXED: Handle all error types
|
|
194
|
+
if is_error(obj):
|
|
195
|
+
return False
|
|
196
|
+
result = not (obj == NULL or obj == FALSE)
|
|
197
|
+
debug_log("is_truthy", f"{obj} -> {result}")
|
|
198
|
+
return result
|
|
199
|
+
|
|
200
|
+
def eval_prefix_expression(operator, right):
|
|
201
|
+
debug_log("eval_prefix_expression", f"{operator} {right}")
|
|
202
|
+
if is_error(right): # Use is_error helper
|
|
203
|
+
return right
|
|
204
|
+
|
|
205
|
+
if operator == "!":
|
|
206
|
+
result = eval_bang_operator_expression(right)
|
|
207
|
+
elif operator == "-":
|
|
208
|
+
result = eval_minus_prefix_operator_expression(right)
|
|
209
|
+
else:
|
|
210
|
+
result = EvaluationError(f"Unknown operator: {operator}{right.type()}")
|
|
211
|
+
|
|
212
|
+
debug_log(" Prefix result", result)
|
|
213
|
+
return result
|
|
214
|
+
|
|
215
|
+
def eval_bang_operator_expression(right):
|
|
216
|
+
if right == TRUE:
|
|
217
|
+
return FALSE
|
|
218
|
+
elif right == FALSE:
|
|
219
|
+
return TRUE
|
|
220
|
+
elif right == NULL:
|
|
221
|
+
return TRUE
|
|
222
|
+
return FALSE
|
|
223
|
+
|
|
224
|
+
def eval_minus_prefix_operator_expression(right):
|
|
225
|
+
if isinstance(right, Integer):
|
|
226
|
+
return Integer(-right.value)
|
|
227
|
+
elif isinstance(right, Float):
|
|
228
|
+
return Float(-right.value)
|
|
229
|
+
return EvaluationError(f"Unknown operator: -{right.type()}")
|
|
230
|
+
|
|
231
|
+
def eval_infix_expression(operator, left, right):
|
|
232
|
+
debug_log("eval_infix_expression", f"{left} {operator} {right}")
|
|
233
|
+
# Handle errors first
|
|
234
|
+
if is_error(left): # Use is_error helper
|
|
235
|
+
return left
|
|
236
|
+
if is_error(right): # Use is_error helper
|
|
237
|
+
return right
|
|
238
|
+
|
|
239
|
+
# Logical operators
|
|
240
|
+
if operator == "&&":
|
|
241
|
+
result = TRUE if is_truthy(left) and is_truthy(right) else FALSE
|
|
242
|
+
elif operator == "||":
|
|
243
|
+
result = TRUE if is_truthy(left) or is_truthy(right) else FALSE
|
|
244
|
+
elif operator == "==":
|
|
245
|
+
# FIXED: Handle different object types properly
|
|
246
|
+
if hasattr(left, 'value') and hasattr(right, 'value'):
|
|
247
|
+
result = TRUE if left.value == right.value else FALSE
|
|
248
|
+
else:
|
|
249
|
+
result = TRUE if left == right else FALSE
|
|
250
|
+
elif operator == "!=":
|
|
251
|
+
if hasattr(left, 'value') and hasattr(right, 'value'):
|
|
252
|
+
result = TRUE if left.value != right.value else FALSE
|
|
253
|
+
else:
|
|
254
|
+
result = TRUE if left != right else FALSE
|
|
255
|
+
elif operator == "<=":
|
|
256
|
+
if hasattr(left, 'value') and hasattr(right, 'value'):
|
|
257
|
+
result = TRUE if left.value <= right.value else FALSE
|
|
258
|
+
else:
|
|
259
|
+
result = EvaluationError(f"Cannot compare: {left.type()} <= {right.type()}")
|
|
260
|
+
elif operator == ">=":
|
|
261
|
+
if hasattr(left, 'value') and hasattr(right, 'value'):
|
|
262
|
+
result = TRUE if left.value >= right.value else FALSE
|
|
263
|
+
else:
|
|
264
|
+
result = EvaluationError(f"Cannot compare: {left.type()} >= {right.type()}")
|
|
265
|
+
# Type-specific operations
|
|
266
|
+
elif isinstance(left, Integer) and isinstance(right, Integer):
|
|
267
|
+
result = eval_integer_infix_expression(operator, left, right)
|
|
268
|
+
elif isinstance(left, Float) and isinstance(right, Float):
|
|
269
|
+
result = eval_float_infix_expression(operator, left, right)
|
|
270
|
+
elif isinstance(left, String) and isinstance(right, String):
|
|
271
|
+
result = eval_string_infix_expression(operator, left, right)
|
|
272
|
+
# NEW: Handle string concatenation with different types
|
|
273
|
+
elif operator == "+":
|
|
274
|
+
if isinstance(left, String):
|
|
275
|
+
# Convert right to string and concatenate
|
|
276
|
+
right_str = right.inspect() if not isinstance(right, String) else right.value
|
|
277
|
+
result = String(left.value + str(right_str))
|
|
278
|
+
elif isinstance(right, String):
|
|
279
|
+
# Convert left to string and concatenate
|
|
280
|
+
left_str = left.inspect() if not isinstance(left, String) else left.value
|
|
281
|
+
result = String(str(left_str) + right.value)
|
|
282
|
+
elif isinstance(left, Integer) and isinstance(right, Integer):
|
|
283
|
+
result = Integer(left.value + right.value)
|
|
284
|
+
elif isinstance(left, Float) and isinstance(right, Float):
|
|
285
|
+
result = Float(left.value + right.value)
|
|
286
|
+
elif isinstance(left, (Integer, Float)) and isinstance(right, (Integer, Float)):
|
|
287
|
+
# Mixed numeric types
|
|
288
|
+
left_val = left.value if isinstance(left, (Integer, Float)) else float(left.value) if hasattr(left, 'value') else 0
|
|
289
|
+
right_val = right.value if isinstance(right, (Integer, Float)) else float(right.value) if hasattr(right, 'value') else 0
|
|
290
|
+
result = Float(left_val + right_val)
|
|
291
|
+
else:
|
|
292
|
+
result = EvaluationError(f"Type mismatch: {left.type()} {operator} {right.type()}")
|
|
293
|
+
else:
|
|
294
|
+
result = EvaluationError(f"Type mismatch: {left.type()} {operator} {right.type()}")
|
|
295
|
+
|
|
296
|
+
debug_log(" Infix result", result)
|
|
297
|
+
return result
|
|
298
|
+
|
|
299
|
+
def eval_integer_infix_expression(operator, left, right):
|
|
300
|
+
left_val = left.value
|
|
301
|
+
right_val = right.value
|
|
302
|
+
|
|
303
|
+
if operator == "+": return Integer(left_val + right_val)
|
|
304
|
+
elif operator == "-": return Integer(left_val - right_val)
|
|
305
|
+
elif operator == "*": return Integer(left_val * right_val)
|
|
306
|
+
elif operator == "/":
|
|
307
|
+
if right_val == 0:
|
|
308
|
+
return EvaluationError("Division by zero")
|
|
309
|
+
return Integer(left_val // right_val)
|
|
310
|
+
elif operator == "%":
|
|
311
|
+
if right_val == 0:
|
|
312
|
+
return EvaluationError("Modulo by zero")
|
|
313
|
+
return Integer(left_val % right_val)
|
|
314
|
+
elif operator == "<": return TRUE if left_val < right_val else FALSE
|
|
315
|
+
elif operator == ">": return TRUE if left_val > right_val else FALSE
|
|
316
|
+
elif operator == "<=": return TRUE if left_val <= right_val else FALSE
|
|
317
|
+
elif operator == ">=": return TRUE if left_val >= right_val else FALSE
|
|
318
|
+
elif operator == "==": return TRUE if left_val == right_val else FALSE
|
|
319
|
+
elif operator == "!=": return TRUE if left_val != right_val else FALSE
|
|
320
|
+
return EvaluationError(f"Unknown integer operator: {operator}")
|
|
321
|
+
|
|
322
|
+
def eval_float_infix_expression(operator, left, right):
|
|
323
|
+
left_val = left.value
|
|
324
|
+
right_val = right.value
|
|
325
|
+
|
|
326
|
+
if operator == "+": return Float(left_val + right_val)
|
|
327
|
+
elif operator == "-": return Float(left_val - right_val)
|
|
328
|
+
elif operator == "*": return Float(left_val * right_val)
|
|
329
|
+
elif operator == "/":
|
|
330
|
+
if right_val == 0:
|
|
331
|
+
return EvaluationError("Division by zero")
|
|
332
|
+
return Float(left_val / right_val)
|
|
333
|
+
elif operator == "%":
|
|
334
|
+
if right_val == 0:
|
|
335
|
+
return EvaluationError("Modulo by zero")
|
|
336
|
+
return Float(left_val % right_val)
|
|
337
|
+
elif operator == "<": return TRUE if left_val < right_val else FALSE
|
|
338
|
+
elif operator == ">": return TRUE if left_val > right_val else FALSE
|
|
339
|
+
elif operator == "<=": return TRUE if left_val <= right_val else FALSE
|
|
340
|
+
elif operator == ">=": return TRUE if left_val >= right_val else FALSE
|
|
341
|
+
elif operator == "==": return TRUE if left_val == right_val else FALSE
|
|
342
|
+
elif operator == "!=": return TRUE if left_val != right_val else FALSE
|
|
343
|
+
return EvaluationError(f"Unknown float operator: {operator}")
|
|
344
|
+
|
|
345
|
+
def eval_string_infix_expression(operator, left, right):
|
|
346
|
+
if operator == "+": return String(left.value + right.value)
|
|
347
|
+
elif operator == "==": return TRUE if left.value == right.value else FALSE
|
|
348
|
+
elif operator == "!=": return TRUE if left.value != right.value else FALSE
|
|
349
|
+
return EvaluationError(f"Unknown string operator: {operator}")
|
|
350
|
+
|
|
351
|
+
def eval_if_expression(ie, env):
|
|
352
|
+
debug_log("eval_if_expression", "Evaluating condition")
|
|
353
|
+
condition = eval_node(ie.condition, env)
|
|
354
|
+
if is_error(condition): # Use is_error helper
|
|
355
|
+
return condition
|
|
356
|
+
|
|
357
|
+
if is_truthy(condition):
|
|
358
|
+
debug_log(" Condition true, evaluating consequence")
|
|
359
|
+
return eval_node(ie.consequence, env)
|
|
360
|
+
elif ie.alternative:
|
|
361
|
+
debug_log(" Condition false, evaluating alternative")
|
|
362
|
+
return eval_node(ie.alternative, env)
|
|
363
|
+
debug_log(" Condition false, no alternative")
|
|
364
|
+
return NULL
|
|
365
|
+
|
|
366
|
+
def apply_function(fn, args, call_site=None):
|
|
367
|
+
# SAFE debug: avoid len() on errors
|
|
368
|
+
arg_count = len(args) if isinstance(args, (list, tuple)) else ("err" if is_error(args) else "unknown")
|
|
369
|
+
debug_log("apply_function", f"Calling {fn} with {arg_count} arguments: {args}")
|
|
370
|
+
|
|
371
|
+
if isinstance(fn, (Action, LambdaFunction)):
|
|
372
|
+
debug_log(" Calling user-defined function")
|
|
373
|
+
extended_env = extend_function_env(fn, args)
|
|
374
|
+
evaluated = eval_node(fn.body, extended_env)
|
|
375
|
+
evaluated = _resolve_awaitable(evaluated)
|
|
376
|
+
return unwrap_return_value(evaluated)
|
|
377
|
+
elif isinstance(fn, Builtin):
|
|
378
|
+
debug_log(" Calling builtin function", f"{fn.name} with args: {args}")
|
|
379
|
+
try:
|
|
380
|
+
# Builtin functions expect Zexus objects as args; ensure args is a list
|
|
381
|
+
if not isinstance(args, (list, tuple)):
|
|
382
|
+
return EvaluationError("Invalid arguments to builtin")
|
|
383
|
+
result = fn.fn(*args)
|
|
384
|
+
# If builtin returned a coroutine/future, resolve it when possible
|
|
385
|
+
if _is_awaitable(result):
|
|
386
|
+
result = _resolve_awaitable(result)
|
|
387
|
+
debug_log(" Builtin result", result)
|
|
388
|
+
return result
|
|
389
|
+
except Exception as e:
|
|
390
|
+
error = EvaluationError(f"Builtin function error: {str(e)}")
|
|
391
|
+
debug_log(" Builtin error", error)
|
|
392
|
+
return error
|
|
393
|
+
error = EvaluationError(f"Not a function: {fn.type()}")
|
|
394
|
+
debug_log(" Not a function error", error)
|
|
395
|
+
return error
|
|
396
|
+
|
|
397
|
+
def extend_function_env(fn, args):
|
|
398
|
+
env = Environment(outer=fn.env)
|
|
399
|
+
for param, arg in zip(fn.parameters, args):
|
|
400
|
+
env.set(param.value, arg)
|
|
401
|
+
return env
|
|
402
|
+
|
|
403
|
+
def unwrap_return_value(obj):
|
|
404
|
+
if isinstance(obj, ReturnValue):
|
|
405
|
+
return obj.value
|
|
406
|
+
return obj
|
|
407
|
+
|
|
408
|
+
# NEW: Lambda function evaluation
|
|
409
|
+
def eval_lambda_expression(node, env):
|
|
410
|
+
debug_log("eval_lambda_expression", f"Creating lambda with {len(node.parameters)} parameters")
|
|
411
|
+
return LambdaFunction(node.parameters, node.body, env)
|
|
412
|
+
|
|
413
|
+
# NEW: Array method implementations
|
|
414
|
+
def array_reduce(array_obj, lambda_fn, initial_value=None, env=None):
|
|
415
|
+
"""Implement array.reduce(lambda, initial_value)"""
|
|
416
|
+
if not isinstance(array_obj, List):
|
|
417
|
+
return EvaluationError("reduce() called on non-array object")
|
|
418
|
+
if not isinstance(lambda_fn, (LambdaFunction, Action)):
|
|
419
|
+
return EvaluationError("reduce() requires a lambda function as first argument")
|
|
420
|
+
|
|
421
|
+
accumulator = initial_value if initial_value is not None else array_obj.elements[0] if array_obj.elements else NULL
|
|
422
|
+
start_index = 0 if initial_value is not None else 1
|
|
423
|
+
|
|
424
|
+
for i in range(start_index, len(array_obj.elements)):
|
|
425
|
+
element = array_obj.elements[i]
|
|
426
|
+
result = apply_function(lambda_fn, [accumulator, element])
|
|
427
|
+
if is_error(result): # Use is_error helper
|
|
428
|
+
return result
|
|
429
|
+
accumulator = result
|
|
430
|
+
|
|
431
|
+
return accumulator
|
|
432
|
+
|
|
433
|
+
def array_map(array_obj, lambda_fn, env=None):
|
|
434
|
+
"""Implement array.map(lambda)"""
|
|
435
|
+
if not isinstance(array_obj, List):
|
|
436
|
+
return EvaluationError("map() called on non-array object")
|
|
437
|
+
if not isinstance(lambda_fn, (LambdaFunction, Action)):
|
|
438
|
+
return EvaluationError("map() requires a lambda function")
|
|
439
|
+
|
|
440
|
+
mapped_elements = []
|
|
441
|
+
for element in array_obj.elements:
|
|
442
|
+
result = apply_function(lambda_fn, [element])
|
|
443
|
+
if is_error(result): # Use is_error helper
|
|
444
|
+
return result
|
|
445
|
+
mapped_elements.append(result)
|
|
446
|
+
|
|
447
|
+
return List(mapped_elements)
|
|
448
|
+
|
|
449
|
+
def array_filter(array_obj, lambda_fn, env=None):
|
|
450
|
+
"""Implement array.filter(lambda)"""
|
|
451
|
+
if not isinstance(array_obj, List):
|
|
452
|
+
return EvaluationError("filter() called on non-array object")
|
|
453
|
+
if not isinstance(lambda_fn, (LambdaFunction, Action)):
|
|
454
|
+
return EvaluationError("filter() requires a lambda function")
|
|
455
|
+
|
|
456
|
+
filtered_elements = []
|
|
457
|
+
for element in array_obj.elements:
|
|
458
|
+
result = apply_function(lambda_fn, [element])
|
|
459
|
+
if is_error(result): # Use is_error helper
|
|
460
|
+
return result
|
|
461
|
+
if is_truthy(result):
|
|
462
|
+
filtered_elements.append(element)
|
|
463
|
+
|
|
464
|
+
return List(filtered_elements)
|
|
465
|
+
|
|
466
|
+
# NEW: Export system
|
|
467
|
+
def eval_export_statement(node, env):
|
|
468
|
+
"""Handle export statements - FIXED VERSION"""
|
|
469
|
+
# Support single-name and multi-name ExportStatement
|
|
470
|
+
names = []
|
|
471
|
+
if hasattr(node, 'names') and node.names:
|
|
472
|
+
names = [n.value if hasattr(n, 'value') else str(n) for n in node.names]
|
|
473
|
+
elif hasattr(node, 'name') and node.name is not None:
|
|
474
|
+
names = [node.name.value if hasattr(node.name, 'value') else str(node.name)]
|
|
475
|
+
|
|
476
|
+
if not names:
|
|
477
|
+
return EvaluationError("export: no identifiers provided to export")
|
|
478
|
+
|
|
479
|
+
debug_log("eval_export_statement", f"Exporting {len(names)} names: {names}")
|
|
480
|
+
|
|
481
|
+
for nm in names:
|
|
482
|
+
value = env.get(nm)
|
|
483
|
+
if not value:
|
|
484
|
+
return EvaluationError(f"Cannot export undefined identifier: {nm}")
|
|
485
|
+
|
|
486
|
+
debug_log(f" Exporting '{nm}'", f"value: {value}")
|
|
487
|
+
|
|
488
|
+
# CRITICAL FIX: Use the Environment's export method
|
|
489
|
+
try:
|
|
490
|
+
env.export(nm, value)
|
|
491
|
+
debug_log(f" Successfully exported via env.export()", "success")
|
|
492
|
+
except Exception as e:
|
|
493
|
+
debug_log(f" env.export() failed", str(e))
|
|
494
|
+
return EvaluationError(f"Failed to export '{nm}': {str(e)}")
|
|
495
|
+
|
|
496
|
+
debug_log("eval_export_statement", f"All exports completed. Total exports: {len(env.exports)}")
|
|
497
|
+
return NULL
|
|
498
|
+
|
|
499
|
+
def check_import_permission(exported_value, importer_file, env):
|
|
500
|
+
"""Check if importer has permission to access exported value"""
|
|
501
|
+
# For now, implement basic file-based permission checking
|
|
502
|
+
allowed_files = getattr(exported_value, '_allowed_files', [])
|
|
503
|
+
permission = getattr(exported_value, '_export_permission', 'read_only')
|
|
504
|
+
|
|
505
|
+
# If no restrictions, allow
|
|
506
|
+
if not allowed_files or allowed_files == []:
|
|
507
|
+
return True
|
|
508
|
+
|
|
509
|
+
# Normalize paths for comparison
|
|
510
|
+
importer_normalized = os.path.normpath(os.path.abspath(importer_file)) if importer_file else None
|
|
511
|
+
|
|
512
|
+
for allowed in allowed_files:
|
|
513
|
+
allowed_normalized = os.path.normpath(os.path.abspath(allowed)) if allowed else None
|
|
514
|
+
if importer_normalized and allowed_normalized and importer_normalized == allowed_normalized:
|
|
515
|
+
return True
|
|
516
|
+
# Also allow if the allowed file is a substring (module path match)
|
|
517
|
+
if importer_file and allowed in importer_file:
|
|
518
|
+
return True
|
|
519
|
+
|
|
520
|
+
return EvaluationError(f"File '{importer_file}' not authorized to import restricted export (permission: {permission})")
|
|
521
|
+
|
|
522
|
+
# === FIXED: JSON CONVERSION FUNCTIONS ===
|
|
523
|
+
def _zexus_to_python(value):
|
|
524
|
+
"""Convert Zexus objects to Python native types for JSON serialization"""
|
|
525
|
+
debug_log("_zexus_to_python", f"Converting {type(value).__name__}: {value}")
|
|
526
|
+
|
|
527
|
+
if isinstance(value, Map):
|
|
528
|
+
python_dict = {}
|
|
529
|
+
for key, val in value.pairs.items():
|
|
530
|
+
python_key = key.inspect() if hasattr(key, 'inspect') else str(key)
|
|
531
|
+
python_dict[python_key] = _zexus_to_python(val)
|
|
532
|
+
debug_log(" Converted Map to dict", python_dict)
|
|
533
|
+
return python_dict
|
|
534
|
+
elif isinstance(value, List):
|
|
535
|
+
python_list = [_zexus_to_python(item) for item in value.elements]
|
|
536
|
+
debug_log(" Converted List to list", python_list)
|
|
537
|
+
return python_list
|
|
538
|
+
elif isinstance(value, String):
|
|
539
|
+
debug_log(" Converted String to str", value.value)
|
|
540
|
+
return value.value
|
|
541
|
+
elif isinstance(value, Integer):
|
|
542
|
+
debug_log(" Converted Integer to int", value.value)
|
|
543
|
+
return value.value
|
|
544
|
+
elif isinstance(value, Float):
|
|
545
|
+
debug_log(" Converted Float to float", value.value)
|
|
546
|
+
return value.value
|
|
547
|
+
elif isinstance(value, BooleanObj):
|
|
548
|
+
debug_log(" Converted Boolean to bool", value.value)
|
|
549
|
+
return value.value
|
|
550
|
+
elif value == NULL:
|
|
551
|
+
debug_log(" Converted NULL to None")
|
|
552
|
+
return None
|
|
553
|
+
elif isinstance(value, Builtin):
|
|
554
|
+
debug_log(" Converted Builtin to string")
|
|
555
|
+
return f"<builtin: {value.name}>"
|
|
556
|
+
elif isinstance(value, DateTime):
|
|
557
|
+
debug_log(" Converted DateTime to float", value.timestamp)
|
|
558
|
+
return value.timestamp
|
|
559
|
+
else:
|
|
560
|
+
debug_log(" Converted unknown to string", str(value))
|
|
561
|
+
return str(value)
|
|
562
|
+
|
|
563
|
+
def _python_to_zexus(value):
|
|
564
|
+
"""Convert Python native types to Zexus objects"""
|
|
565
|
+
debug_log("_python_to_zexus", f"Converting Python type: {type(value)}: {value}")
|
|
566
|
+
|
|
567
|
+
if isinstance(value, dict):
|
|
568
|
+
pairs = {}
|
|
569
|
+
for k, v in value.items():
|
|
570
|
+
pairs[k] = _python_to_zexus(v)
|
|
571
|
+
debug_log(" Converted dict to Map", pairs)
|
|
572
|
+
return Map(pairs)
|
|
573
|
+
elif isinstance(value, list):
|
|
574
|
+
zexus_list = List([_python_to_zexus(item) for item in value])
|
|
575
|
+
debug_log(" Converted list to List", zexus_list)
|
|
576
|
+
return zexus_list
|
|
577
|
+
elif isinstance(value, str):
|
|
578
|
+
debug_log(" Converted str to String", value)
|
|
579
|
+
return String(value)
|
|
580
|
+
elif isinstance(value, int):
|
|
581
|
+
debug_log(" Converted int to Integer", value)
|
|
582
|
+
return Integer(value)
|
|
583
|
+
elif isinstance(value, float):
|
|
584
|
+
debug_log(" Converted float to Float", value)
|
|
585
|
+
return Float(value)
|
|
586
|
+
elif isinstance(value, bool):
|
|
587
|
+
debug_log(" Converted bool to Boolean", value)
|
|
588
|
+
return BooleanObj(value)
|
|
589
|
+
elif value is None:
|
|
590
|
+
debug_log(" Converted None to NULL")
|
|
591
|
+
return NULL
|
|
592
|
+
else:
|
|
593
|
+
debug_log(" Converted unknown to String", str(value))
|
|
594
|
+
return String(str(value))
|
|
595
|
+
|
|
596
|
+
# === FIXED BUILTIN FUNCTIONS FOR PHASE 1 ===
|
|
597
|
+
|
|
598
|
+
def builtin_datetime_now(*args):
|
|
599
|
+
debug_log("builtin_datetime_now", "called")
|
|
600
|
+
return DateTime.now()
|
|
601
|
+
|
|
602
|
+
def builtin_timestamp(*args):
|
|
603
|
+
debug_log("builtin_timestamp", f"called with {len(args)} args")
|
|
604
|
+
if len(args) == 0:
|
|
605
|
+
return DateTime.now().to_timestamp()
|
|
606
|
+
elif len(args) == 1 and isinstance(args[0], DateTime):
|
|
607
|
+
return args[0].to_timestamp()
|
|
608
|
+
return EvaluationError("timestamp() takes 0 or 1 DateTime argument")
|
|
609
|
+
|
|
610
|
+
def builtin_math_random(*args):
|
|
611
|
+
debug_log("builtin_math_random", f"called with {len(args)} args")
|
|
612
|
+
if len(args) == 0:
|
|
613
|
+
return Math.random_int(0, 100)
|
|
614
|
+
elif len(args) == 1 and isinstance(args[0], Integer):
|
|
615
|
+
return Math.random_int(0, args[0].value)
|
|
616
|
+
elif len(args) == 2 and all(isinstance(a, Integer) for a in args):
|
|
617
|
+
return Math.random_int(args[0].value, args[1].value)
|
|
618
|
+
return EvaluationError("random() takes 0, 1, or 2 integer arguments")
|
|
619
|
+
|
|
620
|
+
def builtin_to_hex(*args):
|
|
621
|
+
debug_log("builtin_to_hex", f"called with {args}")
|
|
622
|
+
if len(args) != 1:
|
|
623
|
+
return EvaluationError("to_hex() takes exactly 1 argument")
|
|
624
|
+
return Math.to_hex_string(args[0])
|
|
625
|
+
|
|
626
|
+
def builtin_from_hex(*args):
|
|
627
|
+
debug_log("builtin_from_hex", f"called with {args}")
|
|
628
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
629
|
+
return EvaluationError("from_hex() takes exactly 1 string argument")
|
|
630
|
+
return Math.hex_to_int(args[0])
|
|
631
|
+
|
|
632
|
+
def builtin_sqrt(*args):
|
|
633
|
+
debug_log("builtin_sqrt", f"called with {args}")
|
|
634
|
+
if len(args) != 1:
|
|
635
|
+
return EvaluationError("sqrt() takes exactly 1 argument")
|
|
636
|
+
return Math.sqrt(args[0])
|
|
637
|
+
|
|
638
|
+
# File I/O builtins - FIXED VERSIONS
|
|
639
|
+
def builtin_file_read_text(*args):
|
|
640
|
+
debug_log("builtin_file_read_text", f"called with {args}")
|
|
641
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
642
|
+
return EvaluationError("file_read_text() takes exactly 1 string argument")
|
|
643
|
+
return File.read_text(args[0])
|
|
644
|
+
|
|
645
|
+
def builtin_file_write_text(*args):
|
|
646
|
+
debug_log("builtin_file_write_text", f"called with {args}")
|
|
647
|
+
if len(args) != 2 or not all(isinstance(a, String) for a in args):
|
|
648
|
+
return EvaluationError("file_write_text() takes exactly 2 string arguments")
|
|
649
|
+
return File.write_text(args[0], args[1])
|
|
650
|
+
|
|
651
|
+
def builtin_file_exists(*args):
|
|
652
|
+
debug_log("builtin_file_exists", f"called with {args}")
|
|
653
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
654
|
+
return EvaluationError("file_exists() takes exactly 1 string argument")
|
|
655
|
+
return File.exists(args[0])
|
|
656
|
+
|
|
657
|
+
def builtin_file_read_json(*args):
|
|
658
|
+
debug_log("builtin_file_read_json", f"called with {args}")
|
|
659
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
660
|
+
return EvaluationError("file_read_json() takes exactly 1 string argument")
|
|
661
|
+
return File.read_json(args[0])
|
|
662
|
+
|
|
663
|
+
# FIXED: JSON write function - CRITICAL FIX
|
|
664
|
+
def builtin_file_write_json(*args):
|
|
665
|
+
debug_log("builtin_file_write_json", f"called with {args}")
|
|
666
|
+
if len(args) != 2 or not isinstance(args[0], String):
|
|
667
|
+
return EvaluationError("file_write_json() takes path string and data")
|
|
668
|
+
|
|
669
|
+
path = args[0]
|
|
670
|
+
data = args[1]
|
|
671
|
+
|
|
672
|
+
debug_log(" JSON write - path", path.value if isinstance(path, String) else path)
|
|
673
|
+
debug_log(" JSON write - data type", type(data).__name__)
|
|
674
|
+
debug_log(" JSON write - data value", data)
|
|
675
|
+
|
|
676
|
+
try:
|
|
677
|
+
# FIX: Use the File.write_json method which properly handles conversion
|
|
678
|
+
return File.write_json(path, data)
|
|
679
|
+
except Exception as e:
|
|
680
|
+
return EvaluationError(f"JSON write error: {str(e)}")
|
|
681
|
+
|
|
682
|
+
def builtin_file_append(*args):
|
|
683
|
+
debug_log("builtin_file_append", f"called with {args}")
|
|
684
|
+
if len(args) != 2 or not all(isinstance(a, String) for a in args):
|
|
685
|
+
return EvaluationError("file_append() takes exactly 2 string arguments")
|
|
686
|
+
return File.append_text(args[0], args[1])
|
|
687
|
+
|
|
688
|
+
def builtin_file_list_dir(*args):
|
|
689
|
+
debug_log("builtin_file_list_dir", f"called with {args}")
|
|
690
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
691
|
+
return EvaluationError("file_list_dir() takes exactly 1 string argument")
|
|
692
|
+
return File.list_directory(args[0])
|
|
693
|
+
|
|
694
|
+
# Debug builtins
|
|
695
|
+
def builtin_debug_log(*args):
|
|
696
|
+
debug_log("builtin_debug_log", f"called with {len(args)} args")
|
|
697
|
+
if len(args) == 0:
|
|
698
|
+
return EvaluationError("debug_log() requires at least a message")
|
|
699
|
+
message = args[0]
|
|
700
|
+
value = args[1] if len(args) > 1 else None
|
|
701
|
+
return Debug.log(message, value)
|
|
702
|
+
|
|
703
|
+
def builtin_debug_trace(*args):
|
|
704
|
+
debug_log("builtin_debug_trace", f"called with {args}")
|
|
705
|
+
if len(args) != 1 or not isinstance(args[0], String):
|
|
706
|
+
return EvaluationError("debug_trace() takes exactly 1 string argument")
|
|
707
|
+
return Debug.trace(args[0])
|
|
708
|
+
|
|
709
|
+
# FIXED: String function to handle all Zexus types
|
|
710
|
+
def builtin_string(*args):
|
|
711
|
+
debug_log("builtin_string", f"called with {args}")
|
|
712
|
+
if len(args) != 1:
|
|
713
|
+
return EvaluationError(f"string() takes exactly 1 argument ({len(args)} given)")
|
|
714
|
+
arg = args[0]
|
|
715
|
+
|
|
716
|
+
if isinstance(arg, Integer):
|
|
717
|
+
result = String(str(arg.value))
|
|
718
|
+
elif isinstance(arg, Float):
|
|
719
|
+
result = String(str(arg.value))
|
|
720
|
+
elif isinstance(arg, String):
|
|
721
|
+
result = arg
|
|
722
|
+
elif isinstance(arg, BooleanObj):
|
|
723
|
+
result = String("true" if arg.value else "false")
|
|
724
|
+
elif isinstance(arg, (List, Map)):
|
|
725
|
+
result = String(arg.inspect())
|
|
726
|
+
elif isinstance(arg, Builtin):
|
|
727
|
+
result = String(f"<built-in function: {arg.name}>")
|
|
728
|
+
elif isinstance(arg, DateTime):
|
|
729
|
+
result = String(f"<DateTime: {arg.timestamp}>")
|
|
730
|
+
elif is_error(arg): # Use is_error helper
|
|
731
|
+
result = String(str(arg))
|
|
732
|
+
elif arg == NULL:
|
|
733
|
+
result = String("null")
|
|
734
|
+
else:
|
|
735
|
+
result = String("unknown")
|
|
736
|
+
|
|
737
|
+
debug_log(" builtin_string result", result)
|
|
738
|
+
return result
|
|
739
|
+
|
|
740
|
+
# Other existing builtin functions
|
|
741
|
+
def builtin_len(*args):
|
|
742
|
+
debug_log("builtin_len", f"called with {args}")
|
|
743
|
+
if len(args) != 1:
|
|
744
|
+
return EvaluationError(f"len() takes exactly 1 argument ({len(args)} given)")
|
|
745
|
+
arg = args[0]
|
|
746
|
+
if isinstance(arg, String):
|
|
747
|
+
return Integer(len(arg.value))
|
|
748
|
+
elif isinstance(arg, List):
|
|
749
|
+
return Integer(len(arg.elements))
|
|
750
|
+
return EvaluationError(f"len() not supported for type {arg.type()}")
|
|
751
|
+
|
|
752
|
+
def builtin_first(*args):
|
|
753
|
+
debug_log("builtin_first", f"called with {args}")
|
|
754
|
+
if len(args) != 1:
|
|
755
|
+
return EvaluationError(f"first() takes exactly 1 argument ({len(args)} given)")
|
|
756
|
+
if not isinstance(args[0], List):
|
|
757
|
+
return EvaluationError("first() expects a list")
|
|
758
|
+
list_obj = args[0]
|
|
759
|
+
return list_obj.elements[0] if list_obj.elements else NULL
|
|
760
|
+
|
|
761
|
+
def builtin_rest(*args):
|
|
762
|
+
debug_log("builtin_rest", f"called with {args}")
|
|
763
|
+
if len(args) != 1:
|
|
764
|
+
return EvaluationError(f"rest() takes exactly 1 argument ({len(args)} given)")
|
|
765
|
+
if not isinstance(args[0], List):
|
|
766
|
+
return EvaluationError("rest() expects a list")
|
|
767
|
+
list_obj = args[0]
|
|
768
|
+
return List(list_obj.elements[1:]) if len(list_obj.elements) > 0 else List([])
|
|
769
|
+
|
|
770
|
+
def builtin_push(*args):
|
|
771
|
+
debug_log("builtin_push", f"called with {args}")
|
|
772
|
+
if len(args) != 2:
|
|
773
|
+
return EvaluationError(f"push() takes exactly 2 arguments ({len(args)} given)")
|
|
774
|
+
if not isinstance(args[0], List):
|
|
775
|
+
return EvaluationError("push() expects a list as first argument")
|
|
776
|
+
list_obj = args[0]
|
|
777
|
+
new_elements = list_obj.elements + [args[1]]
|
|
778
|
+
return List(new_elements)
|
|
779
|
+
|
|
780
|
+
def builtin_reduce(*args):
|
|
781
|
+
"""Built-in reduce function for arrays"""
|
|
782
|
+
debug_log("builtin_reduce", f"called with {args}")
|
|
783
|
+
if len(args) < 2 or len(args) > 3:
|
|
784
|
+
return EvaluationError("reduce() takes 2 or 3 arguments (array, lambda[, initial])")
|
|
785
|
+
array_obj, lambda_fn = args[0], args[1]
|
|
786
|
+
initial = args[2] if len(args) == 3 else None
|
|
787
|
+
return array_reduce(array_obj, lambda_fn, initial)
|
|
788
|
+
|
|
789
|
+
def builtin_map(*args):
|
|
790
|
+
"""Built-in map function for arrays"""
|
|
791
|
+
debug_log("builtin_map", f"called with {args}")
|
|
792
|
+
if len(args) != 2:
|
|
793
|
+
return EvaluationError("map() takes 2 arguments (array, lambda)")
|
|
794
|
+
return array_map(args[0], args[1])
|
|
795
|
+
|
|
796
|
+
def builtin_filter(*args):
|
|
797
|
+
"""Built-in filter function for arrays"""
|
|
798
|
+
debug_log("builtin_filter", f"called with {args}")
|
|
799
|
+
if len(args) != 2:
|
|
800
|
+
return EvaluationError("filter() takes 2 arguments (array, lambda)")
|
|
801
|
+
return array_filter(args[0], args[1])
|
|
802
|
+
|
|
803
|
+
# Register core builtins
|
|
804
|
+
try:
|
|
805
|
+
builtins.update({
|
|
806
|
+
"now": Builtin(builtin_datetime_now, "now"),
|
|
807
|
+
"timestamp": Builtin(builtin_timestamp, "timestamp"),
|
|
808
|
+
"random": Builtin(builtin_math_random, "random"),
|
|
809
|
+
"to_hex": Builtin(builtin_to_hex, "to_hex"),
|
|
810
|
+
"from_hex": Builtin(builtin_from_hex, "from_hex"),
|
|
811
|
+
"sqrt": Builtin(builtin_sqrt, "sqrt"),
|
|
812
|
+
|
|
813
|
+
"file_read_text": Builtin(builtin_file_read_text, "file_read_text"),
|
|
814
|
+
"file_write_text": Builtin(builtin_file_write_text, "file_write_text"),
|
|
815
|
+
"file_exists": Builtin(builtin_file_exists, "file_exists"),
|
|
816
|
+
"file_read_json": Builtin(builtin_file_read_json, "file_read_json"),
|
|
817
|
+
"file_write_json": Builtin(builtin_file_write_json, "file_write_json"),
|
|
818
|
+
"file_append": Builtin(builtin_file_append, "file_append"),
|
|
819
|
+
"file_list_dir": Builtin(builtin_file_list_dir, "file_list_dir"),
|
|
820
|
+
|
|
821
|
+
"debug_log": Builtin(builtin_debug_log, "debug_log"),
|
|
822
|
+
"debug_trace": Builtin(builtin_debug_trace, "debug_trace"),
|
|
823
|
+
|
|
824
|
+
"string": Builtin(builtin_string, "string"),
|
|
825
|
+
"len": Builtin(builtin_len, "len"),
|
|
826
|
+
"first": Builtin(builtin_first, "first"),
|
|
827
|
+
"rest": Builtin(builtin_rest, "rest"),
|
|
828
|
+
"push": Builtin(builtin_push, "push"),
|
|
829
|
+
"reduce": Builtin(builtin_reduce, "reduce"),
|
|
830
|
+
"map": Builtin(builtin_map, "map"),
|
|
831
|
+
"filter": Builtin(builtin_filter, "filter"),
|
|
832
|
+
})
|
|
833
|
+
except NameError:
|
|
834
|
+
# If Builtin class is not available at import time, keep pending mapping
|
|
835
|
+
try:
|
|
836
|
+
__CORE_BUILTINS_PENDING = {
|
|
837
|
+
"now": builtin_datetime_now,
|
|
838
|
+
"timestamp": builtin_timestamp,
|
|
839
|
+
"random": builtin_math_random,
|
|
840
|
+
"to_hex": builtin_to_hex,
|
|
841
|
+
"from_hex": builtin_from_hex,
|
|
842
|
+
"sqrt": builtin_sqrt,
|
|
843
|
+
"file_read_text": builtin_file_read_text,
|
|
844
|
+
"file_write_text": builtin_file_write_text,
|
|
845
|
+
"file_exists": builtin_file_exists,
|
|
846
|
+
"file_read_json": builtin_file_read_json,
|
|
847
|
+
"file_write_json": builtin_file_write_json,
|
|
848
|
+
"file_append": builtin_file_append,
|
|
849
|
+
"file_list_dir": builtin_file_list_dir,
|
|
850
|
+
"debug_log": builtin_debug_log,
|
|
851
|
+
"debug_trace": builtin_debug_trace,
|
|
852
|
+
"string": builtin_string,
|
|
853
|
+
"len": builtin_len,
|
|
854
|
+
"first": builtin_first,
|
|
855
|
+
"rest": builtin_rest,
|
|
856
|
+
"push": builtin_push,
|
|
857
|
+
"reduce": builtin_reduce,
|
|
858
|
+
"map": builtin_map,
|
|
859
|
+
"filter": builtin_filter,
|
|
860
|
+
}
|
|
861
|
+
except Exception:
|
|
862
|
+
pass
|
|
863
|
+
|
|
864
|
+
# --- RENDERER REGISTRY & HELPERS ---------------------------------------
|
|
865
|
+
# Try to use the real renderer backend if available, otherwise keep a safe registry.
|
|
866
|
+
try:
|
|
867
|
+
from renderer import backend as _BACKEND
|
|
868
|
+
_BACKEND_AVAILABLE = True
|
|
869
|
+
except Exception:
|
|
870
|
+
_BACKEND_AVAILABLE = False
|
|
871
|
+
_BACKEND = None
|
|
872
|
+
|
|
873
|
+
# Local fallback registry and palette (used only if backend unavailable)
|
|
874
|
+
RENDER_REGISTRY = {
|
|
875
|
+
'screens': {},
|
|
876
|
+
'components': {},
|
|
877
|
+
'themes': {},
|
|
878
|
+
'canvases': {},
|
|
879
|
+
'current_theme': None
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
# Helper converters: Zexus object -> Python native/simple printable
|
|
883
|
+
def _to_str(obj):
|
|
884
|
+
if isinstance(obj, String):
|
|
885
|
+
return obj.value
|
|
886
|
+
if isinstance(obj, (Integer, Float)):
|
|
887
|
+
return str(obj.value)
|
|
888
|
+
return getattr(obj, 'inspect', lambda: str(obj))()
|
|
889
|
+
|
|
890
|
+
def _to_python(obj):
|
|
891
|
+
"""Convert Map/List/Zexus primitives into Python primitives for registry storage."""
|
|
892
|
+
if obj is None:
|
|
893
|
+
return None
|
|
894
|
+
if isinstance(obj, Map):
|
|
895
|
+
py = {}
|
|
896
|
+
for k, v in obj.pairs.items():
|
|
897
|
+
key = k.inspect() if hasattr(k, 'inspect') else str(k)
|
|
898
|
+
py[key] = _to_python(v)
|
|
899
|
+
return py
|
|
900
|
+
if isinstance(obj, List):
|
|
901
|
+
return [_to_python(e) for e in obj.elements]
|
|
902
|
+
if isinstance(obj, String):
|
|
903
|
+
return obj.value
|
|
904
|
+
if isinstance(obj, Integer):
|
|
905
|
+
return obj.value
|
|
906
|
+
if isinstance(obj, Float):
|
|
907
|
+
return obj.value
|
|
908
|
+
if obj == NULL:
|
|
909
|
+
return None
|
|
910
|
+
return getattr(obj, 'inspect', lambda: str(obj))()
|
|
911
|
+
|
|
912
|
+
# --- RENDERER BUILTIN IMPLEMENTATIONS (delegating to backend if present) ---
|
|
913
|
+
|
|
914
|
+
def builtin_mix(*args):
|
|
915
|
+
"""mix(colorA, colorB, ratio) -> String"""
|
|
916
|
+
if len(args) != 3:
|
|
917
|
+
return EvaluationError("mix() expects 3 arguments (colorA, colorB, ratio)")
|
|
918
|
+
a, b, ratio = args
|
|
919
|
+
a_name = _to_str(a); b_name = _to_str(b)
|
|
920
|
+
try:
|
|
921
|
+
ratio_val = float(ratio.value) if isinstance(ratio, (Integer, Float)) else float(str(ratio))
|
|
922
|
+
except Exception:
|
|
923
|
+
ratio_val = 0.5
|
|
924
|
+
|
|
925
|
+
if _BACKEND_AVAILABLE:
|
|
926
|
+
try:
|
|
927
|
+
res = _BACKEND.mix(a_name, b_name, ratio_val)
|
|
928
|
+
return String(str(res))
|
|
929
|
+
except Exception as e:
|
|
930
|
+
return String(f"mix({a_name},{b_name},{ratio_val})")
|
|
931
|
+
# fallback: store mix representation locally
|
|
932
|
+
return String(f"mix({a_name},{b_name},{ratio_val})")
|
|
933
|
+
|
|
934
|
+
def builtin_define_screen(*args):
|
|
935
|
+
if len(args) < 1:
|
|
936
|
+
return EvaluationError("define_screen() requires at least a name")
|
|
937
|
+
name = _to_str(args[0])
|
|
938
|
+
props = _to_python(args[1]) if len(args) > 1 else {}
|
|
939
|
+
if _BACKEND_AVAILABLE:
|
|
940
|
+
try:
|
|
941
|
+
_BACKEND.define_screen(name, props)
|
|
942
|
+
return NULL
|
|
943
|
+
except Exception as e:
|
|
944
|
+
return EvaluationError(str(e))
|
|
945
|
+
# fallback
|
|
946
|
+
RENDER_REGISTRY['screens'].setdefault(name, {'properties': props, 'components': [], 'theme': None})
|
|
947
|
+
return NULL
|
|
948
|
+
|
|
949
|
+
def builtin_define_component(*args):
|
|
950
|
+
if len(args) < 1:
|
|
951
|
+
return EvaluationError("define_component() requires at least a name")
|
|
952
|
+
name = _to_str(args[0]); props = _to_python(args[1]) if len(args) > 1 else {}
|
|
953
|
+
if _BACKEND_AVAILABLE:
|
|
954
|
+
try:
|
|
955
|
+
_BACKEND.define_component(name, props)
|
|
956
|
+
return NULL
|
|
957
|
+
except Exception as e:
|
|
958
|
+
return EvaluationError(str(e))
|
|
959
|
+
RENDER_REGISTRY['components'][name] = props
|
|
960
|
+
return NULL
|
|
961
|
+
|
|
962
|
+
def builtin_add_to_screen(*args):
|
|
963
|
+
if len(args) != 2:
|
|
964
|
+
return EvaluationError("add_to_screen() requires (screen_name, component_name)")
|
|
965
|
+
screen = _to_str(args[0]); comp = _to_str(args[1])
|
|
966
|
+
if _BACKEND_AVAILABLE:
|
|
967
|
+
try:
|
|
968
|
+
_BACKEND.add_to_screen(screen, comp)
|
|
969
|
+
return NULL
|
|
970
|
+
except Exception as e:
|
|
971
|
+
return EvaluationError(str(e))
|
|
972
|
+
if screen not in RENDER_REGISTRY['screens']:
|
|
973
|
+
return EvaluationError(f"Screen '{screen}' not found")
|
|
974
|
+
RENDER_REGISTRY['screens'][screen]['components'].append(comp)
|
|
975
|
+
return NULL
|
|
976
|
+
|
|
977
|
+
def builtin_render_screen(*args):
|
|
978
|
+
if len(args) != 1:
|
|
979
|
+
return EvaluationError("render_screen() requires exactly 1 argument")
|
|
980
|
+
name = _to_str(args[0])
|
|
981
|
+
if _BACKEND_AVAILABLE:
|
|
982
|
+
try:
|
|
983
|
+
out = _BACKEND.render_screen(name)
|
|
984
|
+
return String(str(out))
|
|
985
|
+
except Exception as e:
|
|
986
|
+
return String(f"<render error: {str(e)}>")
|
|
987
|
+
screen = RENDER_REGISTRY['screens'].get(name)
|
|
988
|
+
if not screen:
|
|
989
|
+
return String(f"<missing screen: {name}>")
|
|
990
|
+
props = screen.get('properties', {}); comps = screen.get('components', [])
|
|
991
|
+
theme = screen.get('theme') or RENDER_REGISTRY.get('current_theme')
|
|
992
|
+
return String(f"Screen:{name} props={props} components={comps} theme={theme}")
|
|
993
|
+
|
|
994
|
+
def builtin_set_theme(*args):
|
|
995
|
+
if len(args) == 1:
|
|
996
|
+
theme_name = _to_str(args[0])
|
|
997
|
+
if _BACKEND_AVAILABLE:
|
|
998
|
+
try:
|
|
999
|
+
_BACKEND.set_theme(theme_name)
|
|
1000
|
+
return NULL
|
|
1001
|
+
except Exception as e:
|
|
1002
|
+
return EvaluationError(str(e))
|
|
1003
|
+
RENDER_REGISTRY['current_theme'] = theme_name
|
|
1004
|
+
return NULL
|
|
1005
|
+
if len(args) == 2:
|
|
1006
|
+
target = _to_str(args[0]); theme_name = _to_str(args[1])
|
|
1007
|
+
if _BACKEND_AVAILABLE:
|
|
1008
|
+
try:
|
|
1009
|
+
_BACKEND.set_theme(target, theme_name)
|
|
1010
|
+
return NULL
|
|
1011
|
+
except Exception as e:
|
|
1012
|
+
return EvaluationError(str(e))
|
|
1013
|
+
if target in RENDER_REGISTRY['screens']:
|
|
1014
|
+
RENDER_REGISTRY['screens'][target]['theme'] = theme_name
|
|
1015
|
+
return NULL
|
|
1016
|
+
RENDER_REGISTRY['themes'].setdefault(theme_name, {})
|
|
1017
|
+
return NULL
|
|
1018
|
+
return EvaluationError("set_theme() requires 1 (theme) or 2 (target,theme) args")
|
|
1019
|
+
|
|
1020
|
+
def builtin_create_canvas(*args):
|
|
1021
|
+
if len(args) != 2:
|
|
1022
|
+
return EvaluationError("create_canvas(width, height)")
|
|
1023
|
+
try:
|
|
1024
|
+
wid = int(args[0].value) if isinstance(args[0], Integer) else int(str(args[0]))
|
|
1025
|
+
hei = int(args[1].value) if isinstance(args[1], Integer) else int(str(args[1]))
|
|
1026
|
+
except Exception:
|
|
1027
|
+
return EvaluationError("Invalid numeric arguments to create_canvas()")
|
|
1028
|
+
if _BACKEND_AVAILABLE:
|
|
1029
|
+
try:
|
|
1030
|
+
cid = _BACKEND.create_canvas(wid, hei)
|
|
1031
|
+
return String(str(cid))
|
|
1032
|
+
except Exception as e:
|
|
1033
|
+
return EvaluationError(str(e))
|
|
1034
|
+
cid = f"canvas_{len(RENDER_REGISTRY['canvases'])+1}"
|
|
1035
|
+
RENDER_REGISTRY['canvases'][cid] = {'width': wid, 'height': hei, 'draw_ops': []}
|
|
1036
|
+
return String(cid)
|
|
1037
|
+
|
|
1038
|
+
def builtin_draw_line(*args):
|
|
1039
|
+
if len(args) != 5:
|
|
1040
|
+
return EvaluationError("draw_line(canvas_id,x1,y1,x2,y2)")
|
|
1041
|
+
cid = _to_str(args[0])
|
|
1042
|
+
try:
|
|
1043
|
+
coords = [int(a.value) if isinstance(a, Integer) else int(str(a)) for a in args[1:]]
|
|
1044
|
+
except Exception:
|
|
1045
|
+
return EvaluationError("Invalid coordinates in draw_line()")
|
|
1046
|
+
if _BACKEND_AVAILABLE:
|
|
1047
|
+
try:
|
|
1048
|
+
_BACKEND.draw_line(cid, *coords)
|
|
1049
|
+
return NULL
|
|
1050
|
+
except Exception as e:
|
|
1051
|
+
return EvaluationError(str(e))
|
|
1052
|
+
canvas = RENDER_REGISTRY['canvases'].get(cid)
|
|
1053
|
+
if not canvas:
|
|
1054
|
+
return EvaluationError(f"Canvas {cid} not found")
|
|
1055
|
+
canvas['draw_ops'].append(('line', coords))
|
|
1056
|
+
return NULL
|
|
1057
|
+
|
|
1058
|
+
def builtin_draw_text(*args):
|
|
1059
|
+
if len(args) != 4:
|
|
1060
|
+
return EvaluationError("draw_text(canvas_id,x,y,text)")
|
|
1061
|
+
cid = _to_str(args[0])
|
|
1062
|
+
try:
|
|
1063
|
+
x = int(args[1].value) if isinstance(args[1], Integer) else int(str(args[1]))
|
|
1064
|
+
y = int(args[2].value) if isinstance(args[2], Integer) else int(str(args[2]))
|
|
1065
|
+
except Exception:
|
|
1066
|
+
return EvaluationError("Invalid coordinates in draw_text()")
|
|
1067
|
+
text = _to_str(args[3])
|
|
1068
|
+
if _BACKEND_AVAILABLE:
|
|
1069
|
+
try:
|
|
1070
|
+
_BACKEND.draw_text(cid, x, y, text)
|
|
1071
|
+
return NULL
|
|
1072
|
+
except Exception as e:
|
|
1073
|
+
return EvaluationError(str(e))
|
|
1074
|
+
canvas = RENDER_REGISTRY['canvases'].get(cid)
|
|
1075
|
+
if not canvas:
|
|
1076
|
+
return EvaluationError(f"Canvas {cid} not found")
|
|
1077
|
+
canvas['draw_ops'].append(('text', (x, y, text)))
|
|
1078
|
+
return NULL
|
|
1079
|
+
|
|
1080
|
+
# --- REGISTER RENDERER BUILTINS INTO builtins DICTIONARY ---------------------
|
|
1081
|
+
# (leave the existing update logic in place; this code will add entries if `builtins` exists)
|
|
1082
|
+
try:
|
|
1083
|
+
builtins.update({
|
|
1084
|
+
"mix": Builtin(builtin_mix, "mix"),
|
|
1085
|
+
"define_screen": Builtin(builtin_define_screen, "define_screen"),
|
|
1086
|
+
"define_component": Builtin(builtin_define_component, "define_component"),
|
|
1087
|
+
"add_to_screen": Builtin(builtin_add_to_screen, "add_to_screen"),
|
|
1088
|
+
"render_screen": Builtin(builtin_render_screen, "render_screen"),
|
|
1089
|
+
"set_theme": Builtin(builtin_set_theme, "set_theme"),
|
|
1090
|
+
"create_canvas": Builtin(builtin_create_canvas, "create_canvas"),
|
|
1091
|
+
"draw_line": Builtin(builtin_draw_line, "draw_line"),
|
|
1092
|
+
"draw_text": Builtin(builtin_draw_text, "draw_text"),
|
|
1093
|
+
})
|
|
1094
|
+
except NameError:
|
|
1095
|
+
# keep the pending dict as before; other code will merge later
|
|
1096
|
+
try:
|
|
1097
|
+
__RENDERER_BUILTINS_PENDING = {
|
|
1098
|
+
"mix": ("builtin", builtin_mix),
|
|
1099
|
+
"define_screen": ("builtin", builtin_define_screen),
|
|
1100
|
+
"define_component": ("builtin", builtin_define_component),
|
|
1101
|
+
"add_to_screen": ("builtin", builtin_add_to_screen),
|
|
1102
|
+
"render_screen": ("builtin", builtin_render_screen),
|
|
1103
|
+
"set_theme": ("builtin", builtin_set_theme),
|
|
1104
|
+
"create_canvas": ("builtin", builtin_create_canvas),
|
|
1105
|
+
"draw_line": ("builtin", builtin_draw_line),
|
|
1106
|
+
"draw_text": ("builtin", builtin_draw_text),
|
|
1107
|
+
}
|
|
1108
|
+
except Exception:
|
|
1109
|
+
pass
|
|
1110
|
+
|
|
1111
|
+
# === CRITICAL FIX: Enhanced LetStatement Evaluation ===
|
|
1112
|
+
def eval_let_statement_fixed(node, env, stack_trace):
|
|
1113
|
+
"""FIXED: Evaluate let statement without circular dependencies"""
|
|
1114
|
+
debug_log("eval_let_statement", f"let {node.name.value}")
|
|
1115
|
+
|
|
1116
|
+
# CRITICAL FIX: Evaluate the value FIRST, before setting the variable
|
|
1117
|
+
value = eval_node(node.value, env, stack_trace)
|
|
1118
|
+
if is_error(value): # Use is_error helper
|
|
1119
|
+
debug_log(" Let statement value evaluation error", value)
|
|
1120
|
+
return value
|
|
1121
|
+
|
|
1122
|
+
# THEN set the variable in the environment
|
|
1123
|
+
env.set(node.name.value, value)
|
|
1124
|
+
debug_log(" Let statement successful", f"{node.name.value} = {value}")
|
|
1125
|
+
return NULL
|
|
1126
|
+
|
|
1127
|
+
# === CRITICAL FIX: Enhanced Try-Catch Evaluation ===
|
|
1128
|
+
def eval_try_catch_statement_fixed(node, env, stack_trace):
|
|
1129
|
+
"""FIXED: Evaluate try-catch statement with proper error handling"""
|
|
1130
|
+
debug_log("eval_try_catch_statement", f"error_var: {node.error_variable.value if node.error_variable else 'error'}")
|
|
1131
|
+
try:
|
|
1132
|
+
debug_log(" Executing try block")
|
|
1133
|
+
result = eval_node(node.try_block, env, stack_trace)
|
|
1134
|
+
if is_error(result): # Use is_error helper
|
|
1135
|
+
debug_log(" Try block returned error", result)
|
|
1136
|
+
catch_env = Environment(outer=env)
|
|
1137
|
+
error_var_name = node.error_variable.value if node.error_variable else "error"
|
|
1138
|
+
error_value = String(str(result))
|
|
1139
|
+
catch_env.set(error_var_name, error_value)
|
|
1140
|
+
debug_log(f" Set error variable '{error_var_name}' to: {error_value}")
|
|
1141
|
+
debug_log(" Executing catch block")
|
|
1142
|
+
return eval_node(node.catch_block, catch_env, stack_trace)
|
|
1143
|
+
else:
|
|
1144
|
+
debug_log(" Try block completed successfully")
|
|
1145
|
+
return result
|
|
1146
|
+
except Exception as e:
|
|
1147
|
+
debug_log(f" Exception caught in try block: {e}")
|
|
1148
|
+
catch_env = Environment(outer=env)
|
|
1149
|
+
error_var_name = node.error_variable.value if node.error_variable else "error"
|
|
1150
|
+
error_value = String(str(e))
|
|
1151
|
+
catch_env.set(error_var_name, error_value)
|
|
1152
|
+
debug_log(f" Set error variable '{error_var_name}' to: {error_value}")
|
|
1153
|
+
debug_log(" Executing catch block")
|
|
1154
|
+
return eval_node(node.catch_block, catch_env, stack_trace)
|
|
1155
|
+
|
|
1156
|
+
# === ENHANCED MAIN EVAL_NODE FUNCTION WITH CRITICAL FIXES ===
|
|
1157
|
+
def eval_node(node, env, stack_trace=None):
|
|
1158
|
+
if node is None:
|
|
1159
|
+
debug_log("eval_node", "Node is None, returning NULL")
|
|
1160
|
+
return NULL
|
|
1161
|
+
|
|
1162
|
+
node_type = type(node)
|
|
1163
|
+
stack_trace = stack_trace or []
|
|
1164
|
+
|
|
1165
|
+
# Add to stack trace for better error reporting
|
|
1166
|
+
current_frame = f" at {node_type.__name__}"
|
|
1167
|
+
if hasattr(node, 'token') and node.token:
|
|
1168
|
+
current_frame += f" (line {node.token.line})"
|
|
1169
|
+
stack_trace.append(current_frame)
|
|
1170
|
+
|
|
1171
|
+
debug_log("eval_node", f"Processing {node_type.__name__}")
|
|
1172
|
+
|
|
1173
|
+
try:
|
|
1174
|
+
# Statements
|
|
1175
|
+
if node_type == Program:
|
|
1176
|
+
debug_log(" Program node", f"{len(node.statements)} statements")
|
|
1177
|
+
return eval_program(node.statements, env)
|
|
1178
|
+
|
|
1179
|
+
elif node_type == ExpressionStatement:
|
|
1180
|
+
debug_log(" ExpressionStatement node")
|
|
1181
|
+
return eval_node(node.expression, env, stack_trace)
|
|
1182
|
+
|
|
1183
|
+
elif node_type == BlockStatement:
|
|
1184
|
+
debug_log(" BlockStatement node", f"{len(node.statements)} statements")
|
|
1185
|
+
return eval_block_statement(node, env)
|
|
1186
|
+
|
|
1187
|
+
elif node_type == ReturnStatement:
|
|
1188
|
+
debug_log(" ReturnStatement node")
|
|
1189
|
+
val = eval_node(node.return_value, env, stack_trace)
|
|
1190
|
+
if is_error(val): # Use is_error helper
|
|
1191
|
+
return val
|
|
1192
|
+
return ReturnValue(val)
|
|
1193
|
+
|
|
1194
|
+
# CRITICAL FIX: Use the fixed let statement evaluation
|
|
1195
|
+
elif node_type == LetStatement:
|
|
1196
|
+
return eval_let_statement_fixed(node, env, stack_trace)
|
|
1197
|
+
|
|
1198
|
+
elif node_type == ActionStatement:
|
|
1199
|
+
debug_log(" ActionStatement node", f"action {node.name.value}")
|
|
1200
|
+
action_obj = Action(node.parameters, node.body, env)
|
|
1201
|
+
env.set(node.name.value, action_obj)
|
|
1202
|
+
return NULL
|
|
1203
|
+
|
|
1204
|
+
# NEW: Export statement
|
|
1205
|
+
elif node_type == ExportStatement:
|
|
1206
|
+
# safe logging for single/multi-name export statements
|
|
1207
|
+
try:
|
|
1208
|
+
if hasattr(node, 'names') and node.names:
|
|
1209
|
+
names_text = ','.join([n.value if hasattr(n, 'value') else str(n) for n in node.names])
|
|
1210
|
+
elif hasattr(node, 'name') and node.name is not None:
|
|
1211
|
+
names_text = getattr(node.name, 'value', str(node.name))
|
|
1212
|
+
else:
|
|
1213
|
+
names_text = '<no-names>'
|
|
1214
|
+
except Exception:
|
|
1215
|
+
names_text = '<unknown>'
|
|
1216
|
+
debug_log(" ExportStatement node", f"export {names_text}")
|
|
1217
|
+
return eval_export_statement(node, env)
|
|
1218
|
+
|
|
1219
|
+
elif node_type == IfStatement:
|
|
1220
|
+
debug_log(" IfStatement node")
|
|
1221
|
+
condition = eval_node(node.condition, env, stack_trace)
|
|
1222
|
+
if is_error(condition): # Use is_error helper
|
|
1223
|
+
return condition
|
|
1224
|
+
if is_truthy(condition):
|
|
1225
|
+
debug_log(" If condition true")
|
|
1226
|
+
return eval_node(node.consequence, env, stack_trace)
|
|
1227
|
+
elif node.alternative is not None:
|
|
1228
|
+
debug_log(" If condition false, has alternative")
|
|
1229
|
+
return eval_node(node.alternative, env, stack_trace)
|
|
1230
|
+
debug_log(" If condition false, no alternative")
|
|
1231
|
+
return NULL
|
|
1232
|
+
|
|
1233
|
+
elif node_type == WhileStatement:
|
|
1234
|
+
debug_log(" WhileStatement node")
|
|
1235
|
+
result = NULL
|
|
1236
|
+
while True:
|
|
1237
|
+
condition = eval_node(node.condition, env, stack_trace)
|
|
1238
|
+
if is_error(condition): # Use is_error helper
|
|
1239
|
+
return condition
|
|
1240
|
+
if not is_truthy(condition):
|
|
1241
|
+
break
|
|
1242
|
+
result = eval_node(node.body, env, stack_trace)
|
|
1243
|
+
if isinstance(result, (ReturnValue, EvaluationError, ObjectEvaluationError)):
|
|
1244
|
+
break
|
|
1245
|
+
return result
|
|
1246
|
+
|
|
1247
|
+
elif node_type == ForEachStatement:
|
|
1248
|
+
debug_log(" ForEachStatement node", f"for each {node.item.value}")
|
|
1249
|
+
iterable = eval_node(node.iterable, env, stack_trace)
|
|
1250
|
+
if is_error(iterable): # Use is_error helper
|
|
1251
|
+
return iterable
|
|
1252
|
+
if not isinstance(iterable, List):
|
|
1253
|
+
return EvaluationError("for-each loop expected list")
|
|
1254
|
+
|
|
1255
|
+
result = NULL
|
|
1256
|
+
for element in iterable.elements:
|
|
1257
|
+
env.set(node.item.value, element)
|
|
1258
|
+
result = eval_node(node.body, env, stack_trace)
|
|
1259
|
+
if isinstance(result, (ReturnValue, EvaluationError, ObjectEvaluationError)):
|
|
1260
|
+
break
|
|
1261
|
+
|
|
1262
|
+
return result
|
|
1263
|
+
|
|
1264
|
+
# CRITICAL FIX: Use the fixed try-catch evaluation
|
|
1265
|
+
elif node_type == TryCatchStatement:
|
|
1266
|
+
return eval_try_catch_statement_fixed(node, env, stack_trace)
|
|
1267
|
+
|
|
1268
|
+
elif node_type == AssignmentExpression:
|
|
1269
|
+
debug_log(" AssignmentExpression node")
|
|
1270
|
+
return eval_assignment_expression(node, env)
|
|
1271
|
+
|
|
1272
|
+
elif node_type == PropertyAccessExpression:
|
|
1273
|
+
debug_log(" PropertyAccessExpression node", f"{node.object}.{node.property}")
|
|
1274
|
+
obj = eval_node(node.object, env, stack_trace)
|
|
1275
|
+
if is_error(obj): # Use is_error helper
|
|
1276
|
+
return obj
|
|
1277
|
+
property_name = node.property.value
|
|
1278
|
+
|
|
1279
|
+
if isinstance(obj, EmbeddedCode):
|
|
1280
|
+
if property_name == "code":
|
|
1281
|
+
return String(obj.code)
|
|
1282
|
+
elif property_name == "language":
|
|
1283
|
+
return String(obj.language)
|
|
1284
|
+
# Default behavior for property access: return NULL if not found
|
|
1285
|
+
# (eval_identifier would return an error, but property access
|
|
1286
|
+
# might just mean a missing property in dynamic objects like Maps)
|
|
1287
|
+
# However, for entity instances, we would expect a proper getter.
|
|
1288
|
+
if isinstance(obj, Map):
|
|
1289
|
+
return obj.pairs.get(property_name, NULL)
|
|
1290
|
+
# You might have a specific `EntityInstance` or similar object
|
|
1291
|
+
# that implements a `get` method for properties.
|
|
1292
|
+
elif hasattr(obj, 'get') and callable(obj.get):
|
|
1293
|
+
return obj.get(property_name)
|
|
1294
|
+
|
|
1295
|
+
return NULL # Or raise an error if strict property access is desired
|
|
1296
|
+
|
|
1297
|
+
elif node_type == AST_Boolean:
|
|
1298
|
+
debug_log(" Boolean node", f"value: {node.value}")
|
|
1299
|
+
return TRUE if node.value else FALSE
|
|
1300
|
+
|
|
1301
|
+
# NEW: Lambda expression
|
|
1302
|
+
elif node_type == LambdaExpression:
|
|
1303
|
+
debug_log(" LambdaExpression node")
|
|
1304
|
+
return eval_lambda_expression(node, env)
|
|
1305
|
+
|
|
1306
|
+
elif node_type == MethodCallExpression:
|
|
1307
|
+
debug_log(" MethodCallExpression node", f"{node.object}.{node.method}")
|
|
1308
|
+
obj = eval_node(node.object, env, stack_trace)
|
|
1309
|
+
if is_error(obj): # Use is_error helper
|
|
1310
|
+
return obj
|
|
1311
|
+
method_name = node.method.value
|
|
1312
|
+
|
|
1313
|
+
# Handle array methods with lambdas
|
|
1314
|
+
if isinstance(obj, List):
|
|
1315
|
+
args = eval_expressions(node.arguments, env)
|
|
1316
|
+
if is_error(args): # Use is_error helper
|
|
1317
|
+
return args
|
|
1318
|
+
|
|
1319
|
+
if method_name == "reduce":
|
|
1320
|
+
if len(args) < 1:
|
|
1321
|
+
return EvaluationError("reduce() requires at least a lambda function")
|
|
1322
|
+
lambda_fn = args[0]
|
|
1323
|
+
initial = args[1] if len(args) > 1 else None
|
|
1324
|
+
return array_reduce(obj, lambda_fn, initial, env)
|
|
1325
|
+
elif method_name == "map":
|
|
1326
|
+
if len(args) != 1:
|
|
1327
|
+
return EvaluationError("map() requires exactly one lambda function")
|
|
1328
|
+
return array_map(obj, args[0], env)
|
|
1329
|
+
elif method_name == "filter":
|
|
1330
|
+
if len(args) != 1:
|
|
1331
|
+
return EvaluationError("filter() requires exactly one lambda function")
|
|
1332
|
+
return array_filter(obj, args[0], env)
|
|
1333
|
+
|
|
1334
|
+
# Handle embedded code method calls
|
|
1335
|
+
if isinstance(obj, EmbeddedCode):
|
|
1336
|
+
args = eval_expressions(node.arguments, env)
|
|
1337
|
+
if is_error(args): # Use is_error helper
|
|
1338
|
+
return args
|
|
1339
|
+
# Simplified embedded execution
|
|
1340
|
+
print(f"[EMBED] Executing {obj.language}.{method_name}")
|
|
1341
|
+
return Integer(42) # Placeholder result
|
|
1342
|
+
|
|
1343
|
+
return EvaluationError(f"Method '{method_name}' not supported for {obj.type()}")
|
|
1344
|
+
|
|
1345
|
+
elif node_type == EmbeddedLiteral:
|
|
1346
|
+
debug_log(" EmbeddedLiteral node")
|
|
1347
|
+
return EmbeddedCode("embedded_block", node.language, node.code)
|
|
1348
|
+
|
|
1349
|
+
elif node_type == PrintStatement:
|
|
1350
|
+
debug_log(" PrintStatement node")
|
|
1351
|
+
val = eval_node(node.value, env, stack_trace)
|
|
1352
|
+
if is_error(val): # Use is_error helper
|
|
1353
|
+
# Print errors to stderr but don't stop execution
|
|
1354
|
+
print(f"❌ Error: {val}", file=sys.stderr)
|
|
1355
|
+
return NULL
|
|
1356
|
+
debug_log(" Printing value", val)
|
|
1357
|
+
print(val.inspect())
|
|
1358
|
+
return NULL
|
|
1359
|
+
|
|
1360
|
+
elif node_type == ScreenStatement:
|
|
1361
|
+
debug_log(" ScreenStatement node", node.name.value)
|
|
1362
|
+
print(f"[RENDER] Screen: {node.name.value}")
|
|
1363
|
+
return NULL
|
|
1364
|
+
|
|
1365
|
+
elif node_type == EmbeddedCodeStatement:
|
|
1366
|
+
debug_log(" EmbeddedCodeStatement node", node.name.value)
|
|
1367
|
+
embedded_obj = EmbeddedCode(node.name.value, node.language, node.code)
|
|
1368
|
+
env.set(node.name.value, embedded_obj)
|
|
1369
|
+
return NULL
|
|
1370
|
+
|
|
1371
|
+
elif node_type == UseStatement:
|
|
1372
|
+
debug_log(" UseStatement node", node.file_path)
|
|
1373
|
+
from .module_cache import get_cached_module, cache_module, get_module_candidates, normalize_path
|
|
1374
|
+
|
|
1375
|
+
# Extract file path from node
|
|
1376
|
+
file_path_attr = getattr(node, 'file_path', None) or getattr(node, 'embedded_ref', None)
|
|
1377
|
+
if isinstance(file_path_attr, StringLiteral):
|
|
1378
|
+
file_path = file_path_attr.value
|
|
1379
|
+
else:
|
|
1380
|
+
file_path = file_path_attr
|
|
1381
|
+
|
|
1382
|
+
if not file_path:
|
|
1383
|
+
return EvaluationError("use: missing file path")
|
|
1384
|
+
|
|
1385
|
+
debug_log(" UseStatement loading", file_path)
|
|
1386
|
+
|
|
1387
|
+
# Try to get normalized path and candidates
|
|
1388
|
+
normalized_path = normalize_path(file_path)
|
|
1389
|
+
candidates = get_module_candidates(file_path)
|
|
1390
|
+
|
|
1391
|
+
# Check cache first
|
|
1392
|
+
cached_env = get_cached_module(normalized_path)
|
|
1393
|
+
if cached_env:
|
|
1394
|
+
debug_log(" Found module in cache", normalized_path)
|
|
1395
|
+
module_env = cached_env
|
|
1396
|
+
loaded = True
|
|
1397
|
+
else:
|
|
1398
|
+
# Not in cache, load from filesystem
|
|
1399
|
+
debug_log(" Module not in cache, loading from disk")
|
|
1400
|
+
module_env = Environment()
|
|
1401
|
+
loaded = False
|
|
1402
|
+
parse_errors = []
|
|
1403
|
+
|
|
1404
|
+
# CRITICAL: Cache a placeholder before evaluation to break circular dependencies.
|
|
1405
|
+
# This allows module A -> module B -> module A to resolve to the same Environment
|
|
1406
|
+
# instance rather than recursing indefinitely.
|
|
1407
|
+
try:
|
|
1408
|
+
cache_module(normalized_path, module_env)
|
|
1409
|
+
except Exception:
|
|
1410
|
+
# Best-effort: if caching fails, continue without placeholder
|
|
1411
|
+
pass
|
|
1412
|
+
|
|
1413
|
+
for candidate in candidates:
|
|
1414
|
+
try:
|
|
1415
|
+
print(f"[MOD-DEBUG] trying candidate: {candidate}")
|
|
1416
|
+
if not os.path.exists(candidate):
|
|
1417
|
+
print(f"[MOD-DEBUG] candidate does not exist: {candidate}")
|
|
1418
|
+
continue
|
|
1419
|
+
debug_log(" Found module file", candidate)
|
|
1420
|
+
with open(candidate, 'r', encoding='utf-8') as f:
|
|
1421
|
+
code = f.read()
|
|
1422
|
+
|
|
1423
|
+
# Import parser/lexer here to avoid top-level circular imports
|
|
1424
|
+
from .lexer import Lexer
|
|
1425
|
+
from .parser import Parser
|
|
1426
|
+
lexer = Lexer(code)
|
|
1427
|
+
parser = Parser(lexer)
|
|
1428
|
+
program = parser.parse_program()
|
|
1429
|
+
|
|
1430
|
+
if getattr(parser, 'errors', None):
|
|
1431
|
+
print(f"[MOD-DEBUG] parser errors for {candidate}: {parser.errors}")
|
|
1432
|
+
parse_errors.append((candidate, parser.errors))
|
|
1433
|
+
continue
|
|
1434
|
+
|
|
1435
|
+
# Evaluate module into its own environment
|
|
1436
|
+
eval_node(program, module_env)
|
|
1437
|
+
# Cache the successfully loaded module (overwrite placeholder)
|
|
1438
|
+
cache_module(normalized_path, module_env)
|
|
1439
|
+
loaded = True
|
|
1440
|
+
debug_log(" Module loaded and cached", normalized_path)
|
|
1441
|
+
print(f"[MOD-DEBUG] module evaluated: {candidate}")
|
|
1442
|
+
break
|
|
1443
|
+
except Exception as e:
|
|
1444
|
+
print(f"[MOD-DEBUG] exception loading candidate {candidate}: {e}")
|
|
1445
|
+
parse_errors.append((candidate, str(e)))
|
|
1446
|
+
|
|
1447
|
+
if not loaded:
|
|
1448
|
+
# If we previously placed a placeholder in cache, remove it to avoid stale entries
|
|
1449
|
+
try:
|
|
1450
|
+
invalidate = None
|
|
1451
|
+
try:
|
|
1452
|
+
# import locally to avoid top-level import cycles
|
|
1453
|
+
from .module_cache import invalidate_module
|
|
1454
|
+
invalidate = invalidate_module
|
|
1455
|
+
except Exception:
|
|
1456
|
+
invalidate = None
|
|
1457
|
+
if invalidate:
|
|
1458
|
+
invalidate(normalized_path)
|
|
1459
|
+
except Exception:
|
|
1460
|
+
pass
|
|
1461
|
+
|
|
1462
|
+
debug_log(" UseStatement failed to load candidates", parse_errors)
|
|
1463
|
+
return EvaluationError(f"Module not found or failed to load: {file_path}")
|
|
1464
|
+
|
|
1465
|
+
# Set alias or import exported names
|
|
1466
|
+
alias = getattr(node, 'alias', None)
|
|
1467
|
+
|
|
1468
|
+
# Debug: show exports discovered in module_env
|
|
1469
|
+
try:
|
|
1470
|
+
exports_debug = module_env.get_exports() if hasattr(module_env, 'get_exports') else {}
|
|
1471
|
+
print(f"[MOD-DEBUG] module_env exports for {file_path}: {exports_debug}")
|
|
1472
|
+
except Exception as e:
|
|
1473
|
+
print(f"[MOD-DEBUG] error reading exports: {e}")
|
|
1474
|
+
if alias:
|
|
1475
|
+
debug_log(" Setting module alias", alias)
|
|
1476
|
+
env.set(alias, module_env)
|
|
1477
|
+
else:
|
|
1478
|
+
try:
|
|
1479
|
+
exports = module_env.get_exports()
|
|
1480
|
+
# Get importer file path (if available from stack trace or environment context)
|
|
1481
|
+
importer_file = env.get("__file__") if hasattr(env, 'get') else None
|
|
1482
|
+
if importer_file and hasattr(importer_file, 'value'):
|
|
1483
|
+
importer_file = importer_file.value
|
|
1484
|
+
|
|
1485
|
+
for name, value in exports.items():
|
|
1486
|
+
debug_log(" Importing export", name)
|
|
1487
|
+
# Check permission if importer_file is available
|
|
1488
|
+
if importer_file:
|
|
1489
|
+
perm_check = check_import_permission(value, importer_file, env)
|
|
1490
|
+
if is_error(perm_check): # Use is_error helper
|
|
1491
|
+
debug_log(" Permission denied for export", name)
|
|
1492
|
+
return perm_check
|
|
1493
|
+
env.set(name, value)
|
|
1494
|
+
except Exception:
|
|
1495
|
+
# If module has no exports, make its env available as a module object
|
|
1496
|
+
module_name = os.path.basename(file_path)
|
|
1497
|
+
debug_log(" Setting module object", module_name)
|
|
1498
|
+
env.set(module_name, module_env)
|
|
1499
|
+
|
|
1500
|
+
return NULL
|
|
1501
|
+
|
|
1502
|
+
# FROM statement: import specific names from a module
|
|
1503
|
+
elif node_type == FromStatement:
|
|
1504
|
+
debug_log(" FromStatement node", node.file_path)
|
|
1505
|
+
# Reuse the UseStatement logic to obtain module env
|
|
1506
|
+
use_node = UseStatement(node.file_path)
|
|
1507
|
+
res = eval_node(use_node, env, stack_trace)
|
|
1508
|
+
if is_error(res): # Use is_error helper
|
|
1509
|
+
return res
|
|
1510
|
+
|
|
1511
|
+
# module should now be available in env (either alias or exports)
|
|
1512
|
+
try:
|
|
1513
|
+
module_env = env.get(os.path.basename(node.file_path))
|
|
1514
|
+
if not module_env or not hasattr(module_env, 'get_exports'):
|
|
1515
|
+
# fallback: look up by normalized path
|
|
1516
|
+
module_env = None
|
|
1517
|
+
except Exception:
|
|
1518
|
+
module_env = None
|
|
1519
|
+
|
|
1520
|
+
# Import requested names
|
|
1521
|
+
importer_file = env.get("__file__") if hasattr(env, 'get') else None
|
|
1522
|
+
if importer_file and hasattr(importer_file, 'value'):
|
|
1523
|
+
importer_file = importer_file.value
|
|
1524
|
+
|
|
1525
|
+
for name_pair in node.imports:
|
|
1526
|
+
src_name = name_pair[0].value if hasattr(name_pair[0], 'value') else str(name_pair[0])
|
|
1527
|
+
as_name = name_pair[1].value if len(name_pair) > 1 and name_pair[1] is not None else src_name
|
|
1528
|
+
try:
|
|
1529
|
+
value = module_env.get_exports().get(src_name)
|
|
1530
|
+
if value is None:
|
|
1531
|
+
return EvaluationError(f"From import: '{src_name}' not found in module")
|
|
1532
|
+
# Check permission if importer_file is available
|
|
1533
|
+
if importer_file:
|
|
1534
|
+
perm_check = check_import_permission(value, importer_file, env)
|
|
1535
|
+
if is_error(perm_check): # Use is_error helper
|
|
1536
|
+
debug_log(" Permission denied for from-import", src_name)
|
|
1537
|
+
return perm_check
|
|
1538
|
+
env.set(as_name, value)
|
|
1539
|
+
except Exception:
|
|
1540
|
+
return EvaluationError(f"From import failed for '{src_name}'")
|
|
1541
|
+
|
|
1542
|
+
return NULL
|
|
1543
|
+
|
|
1544
|
+
elif node_type == ComponentStatement:
|
|
1545
|
+
debug_log(" ComponentStatement node", node.name.value)
|
|
1546
|
+
# Evaluate properties (map or block)
|
|
1547
|
+
props = None
|
|
1548
|
+
if hasattr(node, 'properties') and node.properties is not None:
|
|
1549
|
+
props_val = eval_node(node.properties, env, stack_trace)
|
|
1550
|
+
if is_error(props_val): # Use is_error helper
|
|
1551
|
+
return props_val
|
|
1552
|
+
props = _to_python(props_val) if isinstance(props_val, (Map, List, String)) else None
|
|
1553
|
+
# Register via runtime builtin if available
|
|
1554
|
+
if 'define_component' in builtins:
|
|
1555
|
+
return builtins['define_component'].fn(String(node.name.value), Map(props) if isinstance(props, dict) else NULL)
|
|
1556
|
+
env.set(node.name.value, String(f"<component {node.name.value}>") )
|
|
1557
|
+
return NULL
|
|
1558
|
+
|
|
1559
|
+
elif node_type == ThemeStatement:
|
|
1560
|
+
debug_log(" ThemeStatement node", node.name.value)
|
|
1561
|
+
props_val = eval_node(node.properties, env, stack_trace) if hasattr(node, 'properties') else NULL
|
|
1562
|
+
if is_error(props_val): # Use is_error helper
|
|
1563
|
+
return props_val
|
|
1564
|
+
# Set theme locally
|
|
1565
|
+
env.set(node.name.value, props_val)
|
|
1566
|
+
return NULL
|
|
1567
|
+
|
|
1568
|
+
elif node_type == DebugStatement:
|
|
1569
|
+
debug_log(" DebugStatement node")
|
|
1570
|
+
val = eval_node(node.value, env, stack_trace)
|
|
1571
|
+
if is_error(val): # Use is_error helper
|
|
1572
|
+
return val
|
|
1573
|
+
Debug.log(String(str(val)))
|
|
1574
|
+
return NULL
|
|
1575
|
+
|
|
1576
|
+
elif node_type == ExternalDeclaration:
|
|
1577
|
+
debug_log(" ExternalDeclaration node", node.name.value)
|
|
1578
|
+
# Register a placeholder builtin that raises when called until linked
|
|
1579
|
+
def _external_placeholder(*a):
|
|
1580
|
+
return EvaluationError(f"External function '{node.name.value}' not linked")
|
|
1581
|
+
env.set(node.name.value, Builtin(_external_placeholder, node.name.value))
|
|
1582
|
+
return NULL
|
|
1583
|
+
|
|
1584
|
+
elif node_type == ExactlyStatement:
|
|
1585
|
+
debug_log(" ExactlyStatement node")
|
|
1586
|
+
return eval_node(node.body, env, stack_trace)
|
|
1587
|
+
|
|
1588
|
+
# NEW: EntityStatement - Call the helper for entity definition
|
|
1589
|
+
elif node_type == EntityStatement:
|
|
1590
|
+
debug_log(" EntityStatement node", node.name.value)
|
|
1591
|
+
return eval_entity_statement(node, env)
|
|
1592
|
+
|
|
1593
|
+
# NEW: SealStatement - Call the helper for sealing
|
|
1594
|
+
elif node_type == SealStatement:
|
|
1595
|
+
debug_log(" SealStatement node", node.target)
|
|
1596
|
+
return eval_seal_statement(node, env, stack_trace)
|
|
1597
|
+
|
|
1598
|
+
# Expressions
|
|
1599
|
+
elif node_type == IntegerLiteral:
|
|
1600
|
+
debug_log(" IntegerLiteral node", node.value)
|
|
1601
|
+
return Integer(node.value)
|
|
1602
|
+
|
|
1603
|
+
elif node_type == zexus_ast.FloatLiteral or node_type.__name__ == 'FloatLiteral':
|
|
1604
|
+
# FloatLiteral support
|
|
1605
|
+
try:
|
|
1606
|
+
val = getattr(node, 'value', None)
|
|
1607
|
+
return Float(val)
|
|
1608
|
+
except Exception:
|
|
1609
|
+
return EvaluationError(f"Invalid float literal: {getattr(node, 'value', None)}")
|
|
1610
|
+
|
|
1611
|
+
elif node_type == StringLiteral:
|
|
1612
|
+
debug_log(" StringLiteral node", node.value)
|
|
1613
|
+
return String(node.value)
|
|
1614
|
+
|
|
1615
|
+
elif node_type == ListLiteral:
|
|
1616
|
+
debug_log(" ListLiteral node", f"{len(node.elements)} elements")
|
|
1617
|
+
elements = eval_expressions(node.elements, env)
|
|
1618
|
+
# FIXED: use is_error helper
|
|
1619
|
+
if is_error(elements):
|
|
1620
|
+
return elements
|
|
1621
|
+
return List(elements)
|
|
1622
|
+
|
|
1623
|
+
elif node_type == MapLiteral:
|
|
1624
|
+
debug_log(" MapLiteral node", f"{len(node.pairs)} pairs")
|
|
1625
|
+
pairs = {}
|
|
1626
|
+
for key_expr, value_expr in node.pairs:
|
|
1627
|
+
key = eval_node(key_expr, env, stack_trace)
|
|
1628
|
+
# FIXED: use is_error helper
|
|
1629
|
+
if is_error(key):
|
|
1630
|
+
return key
|
|
1631
|
+
value = eval_node(value_expr, env, stack_trace)
|
|
1632
|
+
if is_error(value):
|
|
1633
|
+
return value
|
|
1634
|
+
key_str = key.inspect()
|
|
1635
|
+
pairs[key_str] = value
|
|
1636
|
+
return Map(pairs)
|
|
1637
|
+
|
|
1638
|
+
elif node_type == Identifier:
|
|
1639
|
+
debug_log(" Identifier node", node.value)
|
|
1640
|
+
return eval_identifier(node, env)
|
|
1641
|
+
|
|
1642
|
+
elif node_type == ActionLiteral:
|
|
1643
|
+
debug_log(" ActionLiteral node")
|
|
1644
|
+
return Action(node.parameters, node.body, env)
|
|
1645
|
+
|
|
1646
|
+
# FIXED: CallExpression - Properly handle builtin function calls
|
|
1647
|
+
elif node_type == CallExpression:
|
|
1648
|
+
debug_log("🚀 CallExpression node", f"Calling {node.function}")
|
|
1649
|
+
function = eval_node(node.function, env, stack_trace)
|
|
1650
|
+
if is_error(function): # Use is_error helper
|
|
1651
|
+
debug_log(" Function evaluation error", function)
|
|
1652
|
+
return function
|
|
1653
|
+
|
|
1654
|
+
args = eval_expressions(node.arguments, env)
|
|
1655
|
+
# FIXED: detect error results using is_error() BEFORE attempting to len()/unpack
|
|
1656
|
+
if is_error(args):
|
|
1657
|
+
debug_log(" Arguments evaluation error", args)
|
|
1658
|
+
return args
|
|
1659
|
+
|
|
1660
|
+
arg_count = len(args) if isinstance(args, (list, tuple)) else "unknown"
|
|
1661
|
+
debug_log(" Arguments evaluated", f"{args} (count: {arg_count})")
|
|
1662
|
+
|
|
1663
|
+
# CRITICAL FIX: Ensure builtin functions are called properly
|
|
1664
|
+
debug_log(" Calling apply_function", f"function: {function}, args: {args}")
|
|
1665
|
+
result = apply_function(function, args)
|
|
1666
|
+
debug_log(" CallExpression result", result)
|
|
1667
|
+
return result
|
|
1668
|
+
|
|
1669
|
+
elif node_type == PrefixExpression:
|
|
1670
|
+
# Use is_error helper to check `right`
|
|
1671
|
+
debug_log(" PrefixExpression node", f"{node.operator} {node.right}")
|
|
1672
|
+
right = eval_node(node.right, env, stack_trace)
|
|
1673
|
+
if is_error(right):
|
|
1674
|
+
return right
|
|
1675
|
+
return eval_prefix_expression(node.operator, right)
|
|
1676
|
+
|
|
1677
|
+
elif node_type == InfixExpression:
|
|
1678
|
+
debug_log(" InfixExpression node", f"{node.left} {node.operator} {node.right}")
|
|
1679
|
+
left = eval_node(node.left, env, stack_trace)
|
|
1680
|
+
if is_error(left): # Use is_error helper
|
|
1681
|
+
return left
|
|
1682
|
+
right = eval_node(node.right, env, stack_trace)
|
|
1683
|
+
if is_error(right): # Use is_error helper
|
|
1684
|
+
return right
|
|
1685
|
+
return eval_infix_expression(node.operator, left, right)
|
|
1686
|
+
|
|
1687
|
+
elif node_type == IfExpression:
|
|
1688
|
+
debug_log(" IfExpression node")
|
|
1689
|
+
return eval_if_expression(node, env)
|
|
1690
|
+
|
|
1691
|
+
debug_log(" Unknown node type", node_type)
|
|
1692
|
+
return EvaluationError(f"Unknown node type: {node_type}", stack_trace=stack_trace)
|
|
1693
|
+
|
|
1694
|
+
except Exception as e:
|
|
1695
|
+
# Enhanced error with stack trace
|
|
1696
|
+
error_msg = f"Internal error: {str(e)}"
|
|
1697
|
+
debug_log(" Exception in eval_node", error_msg)
|
|
1698
|
+
return EvaluationError(error_msg, stack_trace=stack_trace[-5:]) # Last 5 frames
|
|
1699
|
+
|
|
1700
|
+
|
|
1701
|
+
# =====================================================
|
|
1702
|
+
# NEW STATEMENT HANDLERS - ENTITY, VERIFY, CONTRACT, PROTECT, SEAL
|
|
1703
|
+
# =====================================================
|
|
1704
|
+
|
|
1705
|
+
def eval_entity_statement(node, env):
|
|
1706
|
+
"""Evaluate entity statement - create entity definition"""
|
|
1707
|
+
from .object import EntityDefinition # Ensure EntityDefinition is imported from object.py
|
|
1708
|
+
|
|
1709
|
+
properties = {}
|
|
1710
|
+
for prop in node.properties:
|
|
1711
|
+
prop_name = prop.name.value if hasattr(prop.name, 'value') else str(prop.name)
|
|
1712
|
+
prop_type = prop.type.value if hasattr(prop.type, 'value') else str(prop.type)
|
|
1713
|
+
default_value = eval_node(prop.default_value, env) if hasattr(prop, 'default_value') and prop.default_value else NULL
|
|
1714
|
+
if is_error(default_value):
|
|
1715
|
+
return default_value
|
|
1716
|
+
properties[prop_name] = {
|
|
1717
|
+
"type": prop_type,
|
|
1718
|
+
"default_value": default_value # Store Zexus object for defaults
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
entity_def = EntityDefinition(node.name.value, properties)
|
|
1722
|
+
env.set(node.name.value, entity_def)
|
|
1723
|
+
return NULL
|
|
1724
|
+
|
|
1725
|
+
|
|
1726
|
+
def eval_verify_statement(node, env, stack_trace=None):
|
|
1727
|
+
"""Evaluate verify statement - register verification checks"""
|
|
1728
|
+
from .security import VerifyWrapper, VerificationCheck, get_security_context
|
|
1729
|
+
|
|
1730
|
+
# Evaluate target function
|
|
1731
|
+
target_value = eval_node(node.target, env, stack_trace)
|
|
1732
|
+
if is_error(target_value): # Use is_error helper
|
|
1733
|
+
return target_value
|
|
1734
|
+
|
|
1735
|
+
# Evaluate conditions
|
|
1736
|
+
checks = []
|
|
1737
|
+
for condition_node in node.conditions:
|
|
1738
|
+
condition_value = eval_node(condition_node, env, stack_trace)
|
|
1739
|
+
if is_error(condition_value): # Use is_error helper
|
|
1740
|
+
return condition_value
|
|
1741
|
+
# Assuming condition_value is a Zexus object that can be evaluated to truthy/falsy
|
|
1742
|
+
# Or if it's an Action/Lambda, it should be wrapped.
|
|
1743
|
+
if callable(condition_value) or isinstance(condition_value, Action):
|
|
1744
|
+
checks.append(VerificationCheck(str(condition_node), lambda ctx: condition_value))
|
|
1745
|
+
else:
|
|
1746
|
+
# For simpler truthy/falsy conditions directly from eval_node
|
|
1747
|
+
checks.append(VerificationCheck(str(condition_node), lambda ctx, cond=condition_value: cond))
|
|
1748
|
+
|
|
1749
|
+
|
|
1750
|
+
# Wrap function with verification
|
|
1751
|
+
wrapped = VerifyWrapper(target_value, checks, node.error_handler)
|
|
1752
|
+
|
|
1753
|
+
# Register in security context
|
|
1754
|
+
ctx = get_security_context()
|
|
1755
|
+
ctx.register_verify_check(str(node.target), wrapped) # Assuming target has a string representation for key
|
|
1756
|
+
|
|
1757
|
+
return wrapped
|
|
1758
|
+
|
|
1759
|
+
|
|
1760
|
+
def eval_contract_statement(node, env, stack_trace=None):
|
|
1761
|
+
"""Evaluate contract statement - create smart contract"""
|
|
1762
|
+
from .security import SmartContract
|
|
1763
|
+
|
|
1764
|
+
storage_vars = {}
|
|
1765
|
+
for storage_var_node in node.storage_vars:
|
|
1766
|
+
storage_var_name = storage_var_node.name.value
|
|
1767
|
+
# Initial value might be present
|
|
1768
|
+
initial_value = eval_node(storage_var_node.initial_value, env) if hasattr(storage_var_node, 'initial_value') and storage_var_node.initial_value else NULL
|
|
1769
|
+
if is_error(initial_value):
|
|
1770
|
+
return initial_value
|
|
1771
|
+
storage_vars[storage_var_name] = initial_value
|
|
1772
|
+
|
|
1773
|
+
actions = {}
|
|
1774
|
+
for action_node in node.actions:
|
|
1775
|
+
# Assuming action_node is an ActionStatement or ActionLiteral
|
|
1776
|
+
action_obj = eval_node(action_node, env, stack_trace) # Evaluate action literal/statement
|
|
1777
|
+
if is_error(action_obj):
|
|
1778
|
+
return action_obj
|
|
1779
|
+
actions[action_node.name.value] = action_obj
|
|
1780
|
+
|
|
1781
|
+
contract = SmartContract(node.name.value, storage_vars, actions)
|
|
1782
|
+
contract.deploy() # This should probably be a method call from Zexus or an explicit statement
|
|
1783
|
+
|
|
1784
|
+
env.set(node.name.value, contract)
|
|
1785
|
+
return NULL
|
|
1786
|
+
|
|
1787
|
+
|
|
1788
|
+
def eval_protect_statement(node, env, stack_trace=None):
|
|
1789
|
+
"""Evaluate protect statement - register protection rules"""
|
|
1790
|
+
from .security import ProtectionPolicy, get_security_context
|
|
1791
|
+
|
|
1792
|
+
# Evaluate target
|
|
1793
|
+
target_value = eval_node(node.target, env, stack_trace)
|
|
1794
|
+
if is_error(target_value): # Use is_error helper
|
|
1795
|
+
return target_value
|
|
1796
|
+
|
|
1797
|
+
# Evaluate rules
|
|
1798
|
+
rules_value = eval_node(node.rules, env, stack_trace)
|
|
1799
|
+
if is_error(rules_value): # Use is_error helper
|
|
1800
|
+
return rules_value
|
|
1801
|
+
|
|
1802
|
+
# Convert rules to dict
|
|
1803
|
+
rules_dict = {}
|
|
1804
|
+
if isinstance(rules_value, Map):
|
|
1805
|
+
for key, value in rules_value.pairs.items():
|
|
1806
|
+
key_str = key.value if isinstance(key, String) else str(key)
|
|
1807
|
+
rules_dict[key_str] = value
|
|
1808
|
+
|
|
1809
|
+
# Create and register protection policy
|
|
1810
|
+
policy = ProtectionPolicy(str(node.target), rules_dict, node.enforcement_level) # Assuming target has a string representation
|
|
1811
|
+
ctx = get_security_context()
|
|
1812
|
+
ctx.register_protection(str(node.target), policy)
|
|
1813
|
+
|
|
1814
|
+
return policy
|
|
1815
|
+
|
|
1816
|
+
|
|
1817
|
+
def eval_middleware_statement(node, env):
|
|
1818
|
+
"""Evaluate middleware statement - register middleware handler"""
|
|
1819
|
+
from .security import Middleware, get_security_context
|
|
1820
|
+
|
|
1821
|
+
# Evaluate handler
|
|
1822
|
+
handler = eval_node(node.handler, env)
|
|
1823
|
+
if is_error(handler): # Use is_error helper
|
|
1824
|
+
return handler
|
|
1825
|
+
|
|
1826
|
+
# Create middleware
|
|
1827
|
+
middleware = Middleware(node.name.value, handler)
|
|
1828
|
+
|
|
1829
|
+
# Register in security context
|
|
1830
|
+
ctx = get_security_context()
|
|
1831
|
+
ctx.middlewares[node.name.value] = middleware
|
|
1832
|
+
|
|
1833
|
+
return NULL
|
|
1834
|
+
|
|
1835
|
+
|
|
1836
|
+
def eval_auth_statement(node, env):
|
|
1837
|
+
"""Evaluate auth statement - set authentication configuration"""
|
|
1838
|
+
from .security import AuthConfig, get_security_context
|
|
1839
|
+
|
|
1840
|
+
# Evaluate config
|
|
1841
|
+
config_value = eval_node(node.config, env)
|
|
1842
|
+
if is_error(config_value): # Use is_error helper
|
|
1843
|
+
return config_value
|
|
1844
|
+
|
|
1845
|
+
# Convert config to dict
|
|
1846
|
+
config_dict = {}
|
|
1847
|
+
if isinstance(config_value, Map):
|
|
1848
|
+
for key, value in config_value.pairs.items():
|
|
1849
|
+
key_str = key.value if isinstance(key, String) else str(key)
|
|
1850
|
+
config_dict[key_str] = value
|
|
1851
|
+
|
|
1852
|
+
# Create auth config
|
|
1853
|
+
auth_config = AuthConfig(config_dict)
|
|
1854
|
+
|
|
1855
|
+
# Register in security context
|
|
1856
|
+
ctx = get_security_context()
|
|
1857
|
+
ctx.auth_config = auth_config
|
|
1858
|
+
|
|
1859
|
+
return NULL
|
|
1860
|
+
|
|
1861
|
+
|
|
1862
|
+
def eval_throttle_statement(node, env):
|
|
1863
|
+
"""Evaluate throttle statement - register rate limiting"""
|
|
1864
|
+
from .security import RateLimiter, get_security_context
|
|
1865
|
+
|
|
1866
|
+
# Evaluate target and limits
|
|
1867
|
+
target_value = eval_node(node.target, env)
|
|
1868
|
+
if is_error(target_value): # Use is_error helper
|
|
1869
|
+
return target_value
|
|
1870
|
+
|
|
1871
|
+
limits_value = eval_node(node.limits, env)
|
|
1872
|
+
if is_error(limits_value): # Use is_error helper
|
|
1873
|
+
return limits_value
|
|
1874
|
+
|
|
1875
|
+
# Extract limits from map
|
|
1876
|
+
rpm = 100 # Default requests per minute
|
|
1877
|
+
burst = 10 # Default burst size
|
|
1878
|
+
per_user = FALSE # Default is BooleanObj(False)
|
|
1879
|
+
|
|
1880
|
+
if isinstance(limits_value, Map):
|
|
1881
|
+
for key, value in limits_value.pairs.items():
|
|
1882
|
+
key_str = key.value if isinstance(key, String) else str(key)
|
|
1883
|
+
if key_str == "requests_per_minute" and isinstance(value, Integer):
|
|
1884
|
+
rpm = value.value
|
|
1885
|
+
elif key_str == "burst_size" and isinstance(value, Integer):
|
|
1886
|
+
burst = value.value
|
|
1887
|
+
elif key_str == "per_user" and isinstance(value, BooleanObj):
|
|
1888
|
+
per_user = value # Keep as BooleanObj for consistency
|
|
1889
|
+
elif key_str == "per_user" and isinstance(value, Boolean): # If AST Boolean, convert to Zexus BooleanObj
|
|
1890
|
+
per_user = TRUE if value.value else FALSE
|
|
1891
|
+
|
|
1892
|
+
# Create rate limiter
|
|
1893
|
+
limiter = RateLimiter(rpm, burst, per_user.value) # Pass Python bool to RateLimiter
|
|
1894
|
+
|
|
1895
|
+
# Register in security context
|
|
1896
|
+
ctx = get_security_context()
|
|
1897
|
+
ctx.rate_limiters = getattr(ctx, 'rate_limiters', {})
|
|
1898
|
+
ctx.rate_limiters[str(node.target)] = limiter
|
|
1899
|
+
|
|
1900
|
+
return NULL
|
|
1901
|
+
|
|
1902
|
+
|
|
1903
|
+
def eval_cache_statement(node, env):
|
|
1904
|
+
"""Evaluate cache statement - register caching policy"""
|
|
1905
|
+
from .security import CachePolicy, get_security_context
|
|
1906
|
+
|
|
1907
|
+
# Evaluate target and policy
|
|
1908
|
+
target_value = eval_node(node.target, env)
|
|
1909
|
+
if is_error(target_value): # Use is_error helper
|
|
1910
|
+
return target_value
|
|
1911
|
+
|
|
1912
|
+
policy_value = eval_node(node.policy, env)
|
|
1913
|
+
if is_error(policy_value): # Use is_error helper
|
|
1914
|
+
return policy_value
|
|
1915
|
+
|
|
1916
|
+
# Extract policy settings from map
|
|
1917
|
+
ttl = 3600 # Default 1 hour
|
|
1918
|
+
invalidate_on = []
|
|
1919
|
+
|
|
1920
|
+
if isinstance(policy_value, Map):
|
|
1921
|
+
for key, value in policy_value.pairs.items():
|
|
1922
|
+
key_str = key.value if isinstance(key, String) else str(key)
|
|
1923
|
+
if key_str == "ttl" and isinstance(value, Integer):
|
|
1924
|
+
ttl = value.value
|
|
1925
|
+
elif key_str == "invalidate_on" and isinstance(value, List):
|
|
1926
|
+
invalidate_on = [item.value if hasattr(item, 'value') else str(item) for item in value.elements]
|
|
1927
|
+
|
|
1928
|
+
# Create cache policy
|
|
1929
|
+
cache_policy = CachePolicy(ttl=ttl, invalidate_on=invalidate_on)
|
|
1930
|
+
|
|
1931
|
+
# Register in security context
|
|
1932
|
+
ctx = get_security_context()
|
|
1933
|
+
ctx.cache_policies = getattr(ctx, 'cache_policies', {})
|
|
1934
|
+
ctx.cache_policies[str(node.target)] = cache_policy
|
|
1935
|
+
|
|
1936
|
+
return NULL
|
|
1937
|
+
|
|
1938
|
+
|
|
1939
|
+
def eval_seal_statement(node, env, stack_trace=None):
|
|
1940
|
+
"""Evaluate seal statement - mark a variable or property as sealed (immutable)
|
|
1941
|
+
|
|
1942
|
+
Supports sealing identifiers (`seal myVar`) and property access (`seal myMap.key`).
|
|
1943
|
+
"""
|
|
1944
|
+
from .security import SealedObject
|
|
1945
|
+
|
|
1946
|
+
target_node = node.target
|
|
1947
|
+
if target_node is None:
|
|
1948
|
+
return EvaluationError("seal: missing target to seal")
|
|
1949
|
+
|
|
1950
|
+
# Case 1: Sealing an Identifier (e.g., `seal x`)
|
|
1951
|
+
if isinstance(target_node, Identifier):
|
|
1952
|
+
name = target_node.value
|
|
1953
|
+
current_value = env.get(name)
|
|
1954
|
+
if current_value is None:
|
|
1955
|
+
return EvaluationError(f"seal: identifier '{name}' not found")
|
|
1956
|
+
sealed_object = SealedObject(current_value)
|
|
1957
|
+
env.set(name, sealed_object)
|
|
1958
|
+
debug_log(" Sealed identifier", name)
|
|
1959
|
+
return sealed_object
|
|
1960
|
+
|
|
1961
|
+
# Case 2: Sealing a PropertyAccessExpression (e.g., `seal obj.prop` or `seal map[key]`)
|
|
1962
|
+
# Note: For `map[key]`, the parser usually creates a `PropertyAccessExpression`
|
|
1963
|
+
# or `IndexExpression` depending on language design. Assuming PropertyAccessExpression for now.
|
|
1964
|
+
elif isinstance(target_node, PropertyAccessExpression):
|
|
1965
|
+
# Evaluate the object part (e.g., `obj` in `obj.prop`)
|
|
1966
|
+
obj = eval_node(target_node.object, env, stack_trace)
|
|
1967
|
+
if is_error(obj):
|
|
1968
|
+
return obj
|
|
1969
|
+
|
|
1970
|
+
# Determine the property name/key
|
|
1971
|
+
# Assuming node.property is an Identifier for 'obj.prop' like access
|
|
1972
|
+
# If it could be an arbitrary expression (like `map[expression]`),
|
|
1973
|
+
# node.property would need to be evaluated further.
|
|
1974
|
+
prop_key_node = target_node.property
|
|
1975
|
+
if isinstance(prop_key_node, Identifier):
|
|
1976
|
+
prop_key = prop_key_node.value
|
|
1977
|
+
else:
|
|
1978
|
+
# Fallback for dynamic keys like map[expr] if PropertyAccessExpression supports it
|
|
1979
|
+
prop_key_evaluated = eval_node(prop_key_node, env, stack_trace)
|
|
1980
|
+
if is_error(prop_key_evaluated):
|
|
1981
|
+
return prop_key_evaluated
|
|
1982
|
+
prop_key = prop_key_evaluated.inspect() # Use inspect to get a string key for maps
|
|
1983
|
+
|
|
1984
|
+
try:
|
|
1985
|
+
# Handle Map objects: seal a specific key's value
|
|
1986
|
+
if isinstance(obj, Map):
|
|
1987
|
+
if prop_key not in obj.pairs:
|
|
1988
|
+
return EvaluationError(f"seal: map key '{prop_key}' not found on map")
|
|
1989
|
+
# Create a NEW Map with the sealed value, or modify in place if Map allows it.
|
|
1990
|
+
# If Map is mutable, just update:
|
|
1991
|
+
obj.pairs[prop_key] = SealedObject(obj.pairs[prop_key])
|
|
1992
|
+
debug_log(f" Sealed map key '{prop_key}' for map {obj.inspect()}", "")
|
|
1993
|
+
return obj.pairs[prop_key]
|
|
1994
|
+
# Handle EntityInstance-like objects with get/set methods
|
|
1995
|
+
elif hasattr(obj, 'get') and callable(obj.get) and \
|
|
1996
|
+
hasattr(obj, 'set') and callable(obj.set):
|
|
1997
|
+
current_prop_value = obj.get(prop_key)
|
|
1998
|
+
if current_prop_value is None:
|
|
1999
|
+
return EvaluationError(f"seal: property '{prop_key}' not found on object {obj.type()}")
|
|
2000
|
+
sealed_prop_value = SealedObject(current_prop_value)
|
|
2001
|
+
obj.set(prop_key, sealed_prop_value) # This set should trigger immutability check in EntityInstance.set
|
|
2002
|
+
debug_log(f" Sealed property '{prop_key}' on object {obj.inspect()}", "")
|
|
2003
|
+
return sealed_prop_value
|
|
2004
|
+
else:
|
|
2005
|
+
return EvaluationError(f"seal: cannot seal property '{prop_key}' on object of type {obj.type()}")
|
|
2006
|
+
except Exception as e:
|
|
2007
|
+
return EvaluationError(f"seal error on property '{prop_key}': {str(e)}")
|
|
2008
|
+
|
|
2009
|
+
return EvaluationError("seal: unsupported target type for sealing. Expected Identifier or PropertyAccessExpression.")
|
|
2010
|
+
|
|
2011
|
+
|
|
2012
|
+
# Production evaluation with enhanced debugging
|
|
2013
|
+
def evaluate(program, env, debug_mode=False):
|
|
2014
|
+
"""Production evaluation with enhanced error handling and debugging"""
|
|
2015
|
+
if debug_mode:
|
|
2016
|
+
env.enable_debug()
|
|
2017
|
+
print("🔧 Debug mode enabled")
|
|
2018
|
+
|
|
2019
|
+
result = eval_node(program, env)
|
|
2020
|
+
# Resolve awaitables at the top level when possible
|
|
2021
|
+
result = _resolve_awaitable(result)
|
|
2022
|
+
|
|
2023
|
+
if debug_mode:
|
|
2024
|
+
env.disable_debug()
|
|
2025
|
+
|
|
2026
|
+
# When debug mode is off, print a concise 5-line summary only
|
|
2027
|
+
if not debug_mode:
|
|
2028
|
+
try:
|
|
2029
|
+
print(f"Summary: statements parsed={EVAL_SUMMARY.get('parsed_statements',0)}")
|
|
2030
|
+
print(f"Summary: statements evaluated={EVAL_SUMMARY.get('evaluated_statements',0)}")
|
|
2031
|
+
print(f"Summary: errors={EVAL_SUMMARY.get('errors',0)}")
|
|
2032
|
+
print(f"Summary: async_tasks_run={EVAL_SUMMARY.get('async_tasks_run',0)}")
|
|
2033
|
+
print(f"Summary: max_statements_in_block={EVAL_SUMMARY.get('max_statements_in_block',0)}")
|
|
2034
|
+
except Exception:
|
|
2035
|
+
# If summary printing fails, ignore and continue
|
|
2036
|
+
pass
|
|
2037
|
+
|
|
2038
|
+
if is_error(result): # Use is_error helper
|
|
2039
|
+
return str(result)
|
|
2040
|
+
|
|
2041
|
+
return result
|