zexus 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +0 -0
- package/README.md +2513 -0
- package/bin/zexus +2 -0
- package/bin/zpics +2 -0
- package/bin/zpm +2 -0
- package/bin/zx +2 -0
- package/bin/zx-deploy +2 -0
- package/bin/zx-dev +2 -0
- package/bin/zx-run +2 -0
- package/package.json +66 -0
- package/scripts/README.md +24 -0
- package/scripts/postinstall.js +44 -0
- package/shared_config.json +24 -0
- package/src/README.md +1525 -0
- package/src/tests/run_zexus_tests.py +117 -0
- package/src/tests/test_all_phases.zx +346 -0
- package/src/tests/test_blockchain_features.zx +306 -0
- package/src/tests/test_complexity_features.zx +321 -0
- package/src/tests/test_core_integration.py +185 -0
- package/src/tests/test_phase10_ecosystem.zx +177 -0
- package/src/tests/test_phase1_modifiers.zx +87 -0
- package/src/tests/test_phase2_plugins.zx +80 -0
- package/src/tests/test_phase3_security.zx +97 -0
- package/src/tests/test_phase4_vfs.zx +116 -0
- package/src/tests/test_phase5_types.zx +117 -0
- package/src/tests/test_phase6_metaprogramming.zx +125 -0
- package/src/tests/test_phase7_optimization.zx +132 -0
- package/src/tests/test_phase9_advanced_types.zx +157 -0
- package/src/tests/test_security_features.py +419 -0
- package/src/tests/test_security_features.zx +276 -0
- package/src/tests/test_simple_zx.zx +1 -0
- package/src/tests/test_verification_simple.zx +69 -0
- package/src/zexus/__init__.py +28 -0
- package/src/zexus/__main__.py +5 -0
- package/src/zexus/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/advanced_types.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/builtin_modules.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/capability_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/complexity_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/concurrency_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/config.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/dependency_injection.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/ecosystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/environment.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/error_reporter.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/hybrid_orchestrator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/metaprogramming.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/module_cache.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/object.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/optimization.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/plugin_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/policy_engine.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/security.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/stdlib_integration.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/strategy_recovery.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/syntax_validator.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/type_system.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/virtual_filesystem.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/__pycache__/zexus_token.cpython-312.pyc +0 -0
- package/src/zexus/advanced_types.py +401 -0
- package/src/zexus/blockchain/__init__.py +40 -0
- package/src/zexus/blockchain/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/crypto.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/ledger.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/__pycache__/transaction.cpython-312.pyc +0 -0
- package/src/zexus/blockchain/crypto.py +463 -0
- package/src/zexus/blockchain/ledger.py +255 -0
- package/src/zexus/blockchain/transaction.py +267 -0
- package/src/zexus/builtin_modules.py +284 -0
- package/src/zexus/builtin_plugins.py +317 -0
- package/src/zexus/capability_system.py +372 -0
- package/src/zexus/cli/__init__.py +2 -0
- package/src/zexus/cli/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/cli/__pycache__/main.cpython-312.pyc +0 -0
- package/src/zexus/cli/main.py +707 -0
- package/src/zexus/cli/zpm.py +203 -0
- package/src/zexus/compare_interpreter_compiler.py +146 -0
- package/src/zexus/compiler/__init__.py +169 -0
- package/src/zexus/compiler/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/lexer.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/compiler/__pycache__/zexus_ast.cpython-312.pyc +0 -0
- package/src/zexus/compiler/bytecode.py +266 -0
- package/src/zexus/compiler/compat_runtime.py +277 -0
- package/src/zexus/compiler/lexer.py +257 -0
- package/src/zexus/compiler/parser.py +779 -0
- package/src/zexus/compiler/semantic.py +118 -0
- package/src/zexus/compiler/zexus_ast.py +454 -0
- package/src/zexus/complexity_system.py +575 -0
- package/src/zexus/concurrency_system.py +493 -0
- package/src/zexus/config.py +201 -0
- package/src/zexus/crypto_bridge.py +19 -0
- package/src/zexus/dependency_injection.py +423 -0
- package/src/zexus/ecosystem.py +434 -0
- package/src/zexus/environment.py +101 -0
- package/src/zexus/environment_manager.py +119 -0
- package/src/zexus/error_reporter.py +314 -0
- package/src/zexus/evaluator/__init__.py +12 -0
- package/src/zexus/evaluator/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/bytecode_compiler.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/core.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/expressions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/functions.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/integration.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/statements.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/__pycache__/utils.cpython-312.pyc +0 -0
- package/src/zexus/evaluator/bytecode_compiler.py +700 -0
- package/src/zexus/evaluator/core.py +891 -0
- package/src/zexus/evaluator/expressions.py +827 -0
- package/src/zexus/evaluator/functions.py +3989 -0
- package/src/zexus/evaluator/integration.py +396 -0
- package/src/zexus/evaluator/statements.py +4303 -0
- package/src/zexus/evaluator/utils.py +126 -0
- package/src/zexus/evaluator_original.py +2041 -0
- package/src/zexus/external_bridge.py +16 -0
- package/src/zexus/find_affected_imports.sh +155 -0
- package/src/zexus/hybrid_orchestrator.py +152 -0
- package/src/zexus/input_validation.py +259 -0
- package/src/zexus/lexer.py +571 -0
- package/src/zexus/logging.py +89 -0
- package/src/zexus/lsp/__init__.py +9 -0
- package/src/zexus/lsp/completion_provider.py +207 -0
- package/src/zexus/lsp/definition_provider.py +22 -0
- package/src/zexus/lsp/hover_provider.py +71 -0
- package/src/zexus/lsp/server.py +269 -0
- package/src/zexus/lsp/symbol_provider.py +31 -0
- package/src/zexus/metaprogramming.py +321 -0
- package/src/zexus/module_cache.py +89 -0
- package/src/zexus/module_manager.py +107 -0
- package/src/zexus/object.py +973 -0
- package/src/zexus/optimization.py +424 -0
- package/src/zexus/parser/__init__.py +31 -0
- package/src/zexus/parser/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/parser.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_context.cpython-312.pyc +0 -0
- package/src/zexus/parser/__pycache__/strategy_structural.cpython-312.pyc +0 -0
- package/src/zexus/parser/integration.py +86 -0
- package/src/zexus/parser/parser.py +3977 -0
- package/src/zexus/parser/strategy_context.py +7254 -0
- package/src/zexus/parser/strategy_structural.py +1033 -0
- package/src/zexus/persistence.py +391 -0
- package/src/zexus/plugin_system.py +290 -0
- package/src/zexus/policy_engine.py +365 -0
- package/src/zexus/profiler/__init__.py +5 -0
- package/src/zexus/profiler/profiler.py +233 -0
- package/src/zexus/purity_system.py +398 -0
- package/src/zexus/runtime/__init__.py +20 -0
- package/src/zexus/runtime/async_runtime.py +324 -0
- package/src/zexus/search_old_imports.sh +65 -0
- package/src/zexus/security.py +1407 -0
- package/src/zexus/stack_trace.py +233 -0
- package/src/zexus/stdlib/__init__.py +27 -0
- package/src/zexus/stdlib/blockchain.py +341 -0
- package/src/zexus/stdlib/compression.py +167 -0
- package/src/zexus/stdlib/crypto.py +124 -0
- package/src/zexus/stdlib/datetime.py +163 -0
- package/src/zexus/stdlib/db_mongo.py +199 -0
- package/src/zexus/stdlib/db_mysql.py +162 -0
- package/src/zexus/stdlib/db_postgres.py +163 -0
- package/src/zexus/stdlib/db_sqlite.py +133 -0
- package/src/zexus/stdlib/encoding.py +230 -0
- package/src/zexus/stdlib/fs.py +195 -0
- package/src/zexus/stdlib/http.py +219 -0
- package/src/zexus/stdlib/http_server.py +248 -0
- package/src/zexus/stdlib/json_module.py +61 -0
- package/src/zexus/stdlib/math.py +360 -0
- package/src/zexus/stdlib/os_module.py +265 -0
- package/src/zexus/stdlib/regex.py +148 -0
- package/src/zexus/stdlib/sockets.py +253 -0
- package/src/zexus/stdlib/test_framework.zx +208 -0
- package/src/zexus/stdlib/test_runner.zx +119 -0
- package/src/zexus/stdlib_integration.py +341 -0
- package/src/zexus/strategy_recovery.py +256 -0
- package/src/zexus/syntax_validator.py +356 -0
- package/src/zexus/testing/zpics.py +407 -0
- package/src/zexus/testing/zpics_runtime.py +369 -0
- package/src/zexus/type_system.py +374 -0
- package/src/zexus/validation_system.py +569 -0
- package/src/zexus/virtual_filesystem.py +355 -0
- package/src/zexus/vm/__init__.py +8 -0
- package/src/zexus/vm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/async_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/bytecode.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/cache.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/jit.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_manager.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/memory_pool.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/parallel_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/peephole_optimizer.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/profiler.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_allocator.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/register_vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/ssa_converter.cpython-312.pyc +0 -0
- package/src/zexus/vm/__pycache__/vm.cpython-312.pyc +0 -0
- package/src/zexus/vm/async_optimizer.py +420 -0
- package/src/zexus/vm/bytecode.py +428 -0
- package/src/zexus/vm/bytecode_converter.py +297 -0
- package/src/zexus/vm/cache.py +532 -0
- package/src/zexus/vm/jit.py +720 -0
- package/src/zexus/vm/memory_manager.py +520 -0
- package/src/zexus/vm/memory_pool.py +511 -0
- package/src/zexus/vm/optimizer.py +478 -0
- package/src/zexus/vm/parallel_vm.py +899 -0
- package/src/zexus/vm/peephole_optimizer.py +452 -0
- package/src/zexus/vm/profiler.py +527 -0
- package/src/zexus/vm/register_allocator.py +462 -0
- package/src/zexus/vm/register_vm.py +520 -0
- package/src/zexus/vm/ssa_converter.py +757 -0
- package/src/zexus/vm/vm.py +1392 -0
- package/src/zexus/zexus_ast.py +1782 -0
- package/src/zexus/zexus_token.py +253 -0
- package/src/zexus/zpm/__init__.py +15 -0
- package/src/zexus/zpm/installer.py +116 -0
- package/src/zexus/zpm/package_manager.py +208 -0
- package/src/zexus/zpm/publisher.py +98 -0
- package/src/zexus/zpm/registry.py +110 -0
- package/src/zexus.egg-info/PKG-INFO +2235 -0
- package/src/zexus.egg-info/SOURCES.txt +876 -0
- package/src/zexus.egg-info/dependency_links.txt +1 -0
- package/src/zexus.egg-info/entry_points.txt +3 -0
- package/src/zexus.egg-info/not-zip-safe +1 -0
- package/src/zexus.egg-info/requires.txt +14 -0
- package/src/zexus.egg-info/top_level.txt +2 -0
- package/zexus.json +14 -0
|
@@ -0,0 +1,4303 @@
|
|
|
1
|
+
# src/zexus/evaluator/statements.py
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
from ..zexus_ast import (
|
|
6
|
+
Program, ExpressionStatement, BlockStatement, ReturnStatement, ContinueStatement, BreakStatement, ThrowStatement, LetStatement, ConstStatement,
|
|
7
|
+
ActionStatement, FunctionStatement, IfStatement, WhileStatement, ForEachStatement,
|
|
8
|
+
TryCatchStatement, UseStatement, FromStatement, ExportStatement,
|
|
9
|
+
ContractStatement, EntityStatement, VerifyStatement, ProtectStatement,
|
|
10
|
+
SealStatement, MiddlewareStatement, AuthStatement, ThrottleStatement, CacheStatement,
|
|
11
|
+
ComponentStatement, ThemeStatement, DebugStatement, ExternalDeclaration, AssignmentExpression,
|
|
12
|
+
PrintStatement, ScreenStatement, EmbeddedCodeStatement, ExactlyStatement,
|
|
13
|
+
Identifier, PropertyAccessExpression, RestrictStatement, SandboxStatement, TrailStatement,
|
|
14
|
+
NativeStatement, GCStatement, InlineStatement, BufferStatement, SIMDStatement,
|
|
15
|
+
DeferStatement, PatternStatement, PatternCase, EnumStatement, EnumMember, StreamStatement, WatchStatement,
|
|
16
|
+
CapabilityStatement, GrantStatement, RevokeStatement, ValidateStatement, SanitizeStatement, ImmutableStatement,
|
|
17
|
+
InterfaceStatement, TypeAliasStatement, ModuleStatement, PackageStatement, UsingStatement,
|
|
18
|
+
ChannelStatement, SendStatement, ReceiveStatement, AtomicStatement,
|
|
19
|
+
# Blockchain statements and expressions
|
|
20
|
+
LedgerStatement, StateStatement, RequireStatement, RevertStatement, LimitStatement,
|
|
21
|
+
TXExpression, HashExpression, SignatureExpression, VerifySignatureExpression, GasExpression
|
|
22
|
+
)
|
|
23
|
+
from ..object import (
|
|
24
|
+
Environment, Integer, Float, String, Boolean as Boolean, ReturnValue,
|
|
25
|
+
Action, List, Map, EvaluationError, EntityDefinition, EmbeddedCode, Builtin,
|
|
26
|
+
start_collecting_dependencies, stop_collecting_dependencies
|
|
27
|
+
)
|
|
28
|
+
from ..security import (
|
|
29
|
+
SealedObject, SmartContract, VerifyWrapper, VerificationCheck, get_security_context,
|
|
30
|
+
ProtectionPolicy, Middleware, AuthConfig, RateLimiter, CachePolicy
|
|
31
|
+
)
|
|
32
|
+
from .utils import is_error, debug_log, EVAL_SUMMARY, NULL, TRUE, FALSE, _resolve_awaitable, _zexus_to_python, _python_to_zexus, is_truthy
|
|
33
|
+
|
|
34
|
+
# Break exception for loop control flow
|
|
35
|
+
class BreakException:
|
|
36
|
+
"""Exception raised when break statement is encountered in a loop."""
|
|
37
|
+
def __repr__(self):
|
|
38
|
+
return "BreakException()"
|
|
39
|
+
|
|
40
|
+
class StatementEvaluatorMixin:
|
|
41
|
+
"""Handles evaluation of statements, flow control, module loading, and security features."""
|
|
42
|
+
|
|
43
|
+
def ceval_program(self, statements, env):
|
|
44
|
+
debug_log("eval_program", f"Processing {len(statements)} statements")
|
|
45
|
+
|
|
46
|
+
# Track current environment for builtin functions
|
|
47
|
+
self._current_env = env
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
EVAL_SUMMARY['parsed_statements'] = max(EVAL_SUMMARY.get('parsed_statements', 0), len(statements))
|
|
51
|
+
except Exception:
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
result = NULL
|
|
55
|
+
try:
|
|
56
|
+
for i, stmt in enumerate(statements):
|
|
57
|
+
debug_log(f" Statement {i+1}", type(stmt).__name__)
|
|
58
|
+
res = self.eval_node(stmt, env)
|
|
59
|
+
res = _resolve_awaitable(res)
|
|
60
|
+
EVAL_SUMMARY['evaluated_statements'] += 1
|
|
61
|
+
|
|
62
|
+
if isinstance(res, ReturnValue):
|
|
63
|
+
debug_log(" ReturnValue encountered", res.value)
|
|
64
|
+
# Execute deferred cleanup before returning
|
|
65
|
+
self._execute_deferred_cleanup(env, [])
|
|
66
|
+
return res.value
|
|
67
|
+
if is_error(res):
|
|
68
|
+
debug_log(" Error encountered", res)
|
|
69
|
+
try:
|
|
70
|
+
EVAL_SUMMARY['errors'] += 1
|
|
71
|
+
except Exception:
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
# Check if continue_on_error mode is enabled
|
|
75
|
+
if self.continue_on_error:
|
|
76
|
+
# Log the error and continue execution
|
|
77
|
+
error_msg = str(res)
|
|
78
|
+
self.error_log.append(error_msg)
|
|
79
|
+
print(f"[ERROR] {error_msg}")
|
|
80
|
+
debug_log(" Continuing after error", "continue_on_error=True")
|
|
81
|
+
result = NULL # Continue with null result
|
|
82
|
+
continue
|
|
83
|
+
else:
|
|
84
|
+
# Execute deferred cleanup even on error
|
|
85
|
+
self._execute_deferred_cleanup(env, [])
|
|
86
|
+
return res
|
|
87
|
+
result = res
|
|
88
|
+
|
|
89
|
+
debug_log("eval_program completed", result)
|
|
90
|
+
return result
|
|
91
|
+
finally:
|
|
92
|
+
# CRITICAL: Execute all deferred cleanup at program exit
|
|
93
|
+
self._execute_deferred_cleanup(env, [])
|
|
94
|
+
|
|
95
|
+
def eval_block_statement(self, block, env, stack_trace=None):
|
|
96
|
+
debug_log("eval_block_statement", f"len={len(block.statements)}")
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
EVAL_SUMMARY['max_statements_in_block'] = max(EVAL_SUMMARY.get('max_statements_in_block', 0), len(block.statements))
|
|
100
|
+
except Exception:
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
if stack_trace is None:
|
|
104
|
+
stack_trace = []
|
|
105
|
+
|
|
106
|
+
result = NULL
|
|
107
|
+
try:
|
|
108
|
+
for stmt in block.statements:
|
|
109
|
+
res = self.eval_node(stmt, env, stack_trace)
|
|
110
|
+
res = _resolve_awaitable(res)
|
|
111
|
+
EVAL_SUMMARY['evaluated_statements'] += 1
|
|
112
|
+
|
|
113
|
+
if isinstance(res, (ReturnValue, BreakException, EvaluationError)):
|
|
114
|
+
debug_log(" Block interrupted", res)
|
|
115
|
+
if is_error(res):
|
|
116
|
+
try:
|
|
117
|
+
EVAL_SUMMARY['errors'] += 1
|
|
118
|
+
except Exception:
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
# Check if continue_on_error mode is enabled
|
|
122
|
+
if self.continue_on_error:
|
|
123
|
+
# Log the error and continue execution
|
|
124
|
+
error_msg = str(res)
|
|
125
|
+
self.error_log.append(error_msg)
|
|
126
|
+
print(f"[ERROR] {error_msg}")
|
|
127
|
+
debug_log(" Continuing after error in block", "continue_on_error=True")
|
|
128
|
+
result = NULL # Continue with null result
|
|
129
|
+
continue
|
|
130
|
+
|
|
131
|
+
# Execute deferred cleanup before returning
|
|
132
|
+
self._execute_deferred_cleanup(env, stack_trace)
|
|
133
|
+
# Restore stdout before returning
|
|
134
|
+
self._restore_stdout(env)
|
|
135
|
+
return res
|
|
136
|
+
result = res
|
|
137
|
+
|
|
138
|
+
debug_log(" Block completed", result)
|
|
139
|
+
return result
|
|
140
|
+
finally:
|
|
141
|
+
# Always execute deferred cleanup when block exits (normal or error)
|
|
142
|
+
self._execute_deferred_cleanup(env, stack_trace)
|
|
143
|
+
# Restore stdout to previous state (scope-aware)
|
|
144
|
+
self._restore_stdout(env)
|
|
145
|
+
|
|
146
|
+
def eval_expression_statement(self, node, env, stack_trace):
|
|
147
|
+
# Debug: Check if expression is being evaluated
|
|
148
|
+
if hasattr(node.expression, 'function') and hasattr(node.expression.function, 'value'):
|
|
149
|
+
func_name = node.expression.function.value
|
|
150
|
+
if func_name in ['persist_set', 'persist_get']:
|
|
151
|
+
print(f"[EVAL_EXPR_STMT] Evaluating {func_name} call", flush=True)
|
|
152
|
+
result = self.eval_node(node.expression, env, stack_trace)
|
|
153
|
+
if hasattr(node.expression, 'function') and hasattr(node.expression.function, 'value'):
|
|
154
|
+
func_name = node.expression.function.value
|
|
155
|
+
if func_name in ['persist_set', 'persist_get']:
|
|
156
|
+
print(f"[EVAL_EXPR_STMT] Result from {func_name}: {result}", flush=True)
|
|
157
|
+
return result
|
|
158
|
+
|
|
159
|
+
# === VARIABLE & CONTROL FLOW ===
|
|
160
|
+
|
|
161
|
+
def eval_let_statement(self, node, env, stack_trace):
|
|
162
|
+
debug_log("eval_let_statement", f"let {node.name.value}")
|
|
163
|
+
|
|
164
|
+
# FIXED: Evaluate value FIRST to prevent recursion issues
|
|
165
|
+
value = self.eval_node(node.value, env, stack_trace)
|
|
166
|
+
if is_error(value):
|
|
167
|
+
return value
|
|
168
|
+
|
|
169
|
+
# Type annotation validation
|
|
170
|
+
if node.type_annotation:
|
|
171
|
+
type_name = node.type_annotation.value
|
|
172
|
+
debug_log("eval_let_statement", f"Validating type: {type_name}")
|
|
173
|
+
|
|
174
|
+
# Resolve type alias
|
|
175
|
+
type_alias = env.get(type_name)
|
|
176
|
+
if type_alias and hasattr(type_alias, '__class__') and type_alias.__class__.__name__ == 'TypeAlias':
|
|
177
|
+
# Get the base type
|
|
178
|
+
base_type = type_alias.base_type
|
|
179
|
+
debug_log("eval_let_statement", f"Resolved type alias: {type_name} -> {base_type}")
|
|
180
|
+
|
|
181
|
+
# Validate value type matches the base type
|
|
182
|
+
if not self._validate_type(value, base_type):
|
|
183
|
+
return EvaluationError(f"Type mismatch: cannot assign {type(value).__name__} to {type_name} (expected {base_type})")
|
|
184
|
+
else:
|
|
185
|
+
# Direct type validation (for built-in types)
|
|
186
|
+
if not self._validate_type(value, type_name):
|
|
187
|
+
return EvaluationError(f"Type mismatch: cannot assign {type(value).__name__} to {type_name}")
|
|
188
|
+
|
|
189
|
+
env.set(node.name.value, value)
|
|
190
|
+
return NULL
|
|
191
|
+
|
|
192
|
+
def _validate_type(self, value, expected_type):
|
|
193
|
+
"""Validate that a value matches an expected type"""
|
|
194
|
+
# Map Zexus types to Python types
|
|
195
|
+
type_map = {
|
|
196
|
+
'int': ('Integer',),
|
|
197
|
+
'integer': ('Integer',),
|
|
198
|
+
'str': ('String',),
|
|
199
|
+
'string': ('String',),
|
|
200
|
+
'bool': ('Boolean',),
|
|
201
|
+
'boolean': ('Boolean',),
|
|
202
|
+
'float': ('Float', 'Integer'), # int can be used as float
|
|
203
|
+
'array': ('Array',),
|
|
204
|
+
'list': ('Array',),
|
|
205
|
+
'map': ('Map',),
|
|
206
|
+
'dict': ('Map',),
|
|
207
|
+
'null': ('Null',),
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
value_type = type(value).__name__
|
|
211
|
+
expected_types = type_map.get(expected_type.lower(), (expected_type,))
|
|
212
|
+
|
|
213
|
+
return value_type in expected_types
|
|
214
|
+
|
|
215
|
+
def eval_const_statement(self, node, env, stack_trace):
|
|
216
|
+
debug_log("eval_const_statement", f"const {node.name.value}")
|
|
217
|
+
|
|
218
|
+
# Evaluate value FIRST
|
|
219
|
+
value = self.eval_node(node.value, env, stack_trace)
|
|
220
|
+
if is_error(value):
|
|
221
|
+
return value
|
|
222
|
+
|
|
223
|
+
# Set as const in environment
|
|
224
|
+
env.set_const(node.name.value, value)
|
|
225
|
+
return NULL
|
|
226
|
+
|
|
227
|
+
def eval_data_statement(self, node, env, stack_trace):
|
|
228
|
+
"""Evaluate data statement - creates a production-grade dataclass constructor
|
|
229
|
+
|
|
230
|
+
data User {
|
|
231
|
+
name: string,
|
|
232
|
+
email: string = "default",
|
|
233
|
+
age: number require age >= 0
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
data Box<T> {
|
|
237
|
+
value: T
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
Creates a User() or Box<T>() constructor function with:
|
|
241
|
+
- Type validation
|
|
242
|
+
- Constraint validation
|
|
243
|
+
- Generic type substitution
|
|
244
|
+
- Auto-generated methods: toString(), toJSON(), clone(), hash(), verify()
|
|
245
|
+
- Static methods: fromJSON()
|
|
246
|
+
- Immutability support
|
|
247
|
+
- Verification support
|
|
248
|
+
"""
|
|
249
|
+
from ..object import Map, String, Integer, Boolean, List, NULL, EvaluationError, Builtin
|
|
250
|
+
from ..environment import Environment
|
|
251
|
+
import json
|
|
252
|
+
import hashlib
|
|
253
|
+
|
|
254
|
+
debug_log("eval_data_statement", f"data {node.name.value}")
|
|
255
|
+
|
|
256
|
+
type_name = node.name.value
|
|
257
|
+
fields = node.fields
|
|
258
|
+
modifiers = node.modifiers or []
|
|
259
|
+
parent_type = node.parent
|
|
260
|
+
decorators = node.decorators or []
|
|
261
|
+
type_params = node.type_params or []
|
|
262
|
+
|
|
263
|
+
# Check modifiers
|
|
264
|
+
is_immutable = "immutable" in modifiers
|
|
265
|
+
is_verified = "verified" in modifiers
|
|
266
|
+
is_validated = "validated" in decorators
|
|
267
|
+
|
|
268
|
+
debug_log(f" Fields: {len(fields)}, Immutable: {is_immutable}, Verified: {is_verified}, Validated: {is_validated}")
|
|
269
|
+
|
|
270
|
+
if type_params:
|
|
271
|
+
debug_log(f" Generic type parameters: {type_params}")
|
|
272
|
+
|
|
273
|
+
# If this is a generic type, we need to create a factory that produces specialized constructors
|
|
274
|
+
if type_params:
|
|
275
|
+
# Store the generic template
|
|
276
|
+
generic_template = {
|
|
277
|
+
'type_name': type_name,
|
|
278
|
+
'fields': fields,
|
|
279
|
+
'modifiers': modifiers,
|
|
280
|
+
'parent_type': parent_type,
|
|
281
|
+
'decorators': decorators,
|
|
282
|
+
'type_params': type_params,
|
|
283
|
+
'env': env,
|
|
284
|
+
'stack_trace': stack_trace,
|
|
285
|
+
'evaluator': self
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
# Create a Builtin that stores the template
|
|
289
|
+
template_constructor = Builtin(lambda *args: EvaluationError(
|
|
290
|
+
f"Generic type '{type_name}' requires type arguments. Use {type_name}<Type>(...)"
|
|
291
|
+
))
|
|
292
|
+
template_constructor.is_generic = True
|
|
293
|
+
template_constructor.generic_template = generic_template
|
|
294
|
+
|
|
295
|
+
# Register the generic template
|
|
296
|
+
env.set(type_name, template_constructor)
|
|
297
|
+
return NULL
|
|
298
|
+
|
|
299
|
+
# Check modifiers
|
|
300
|
+
is_immutable = "immutable" in modifiers
|
|
301
|
+
is_verified = "verified" in modifiers
|
|
302
|
+
is_validated = "validated" in decorators
|
|
303
|
+
|
|
304
|
+
debug_log(f" Fields: {len(fields)}, Immutable: {is_immutable}, Verified: {is_verified}, Validated: {is_validated}")
|
|
305
|
+
|
|
306
|
+
# If there's a parent, get parent fields
|
|
307
|
+
parent_fields = []
|
|
308
|
+
parent_constructor = None
|
|
309
|
+
if parent_type:
|
|
310
|
+
debug_log(f" Inheritance: {type_name} extends {parent_type}")
|
|
311
|
+
parent_constructor = env.get(parent_type)
|
|
312
|
+
if parent_constructor is None:
|
|
313
|
+
return EvaluationError(f"Parent type '{parent_type}' not found")
|
|
314
|
+
|
|
315
|
+
# Extract parent fields from the parent's constructor metadata
|
|
316
|
+
if hasattr(parent_constructor, 'dataclass_fields'):
|
|
317
|
+
parent_fields = parent_constructor.dataclass_fields
|
|
318
|
+
debug_log(f" Parent has {len(parent_fields)} fields")
|
|
319
|
+
|
|
320
|
+
# Combine parent fields + child fields
|
|
321
|
+
all_fields = parent_fields + fields
|
|
322
|
+
|
|
323
|
+
# Store reference to self and env for closures
|
|
324
|
+
evaluator_self = self
|
|
325
|
+
parent_env = env
|
|
326
|
+
|
|
327
|
+
# Create constructor function
|
|
328
|
+
def dataclass_constructor(*args):
|
|
329
|
+
"""Production-grade dataclass constructor with full validation"""
|
|
330
|
+
|
|
331
|
+
# Create instance as a Map with String keys
|
|
332
|
+
instance = Map({})
|
|
333
|
+
instance.pairs = {}
|
|
334
|
+
|
|
335
|
+
# Set type metadata
|
|
336
|
+
instance.pairs[String("__type__")] = String(type_name)
|
|
337
|
+
instance.pairs[String("__immutable__")] = Boolean(is_immutable)
|
|
338
|
+
instance.pairs[String("__verified__")] = Boolean(is_verified)
|
|
339
|
+
|
|
340
|
+
# Process each field with validation (parent fields first, then child fields)
|
|
341
|
+
arg_index = 0
|
|
342
|
+
for field in all_fields:
|
|
343
|
+
field_name = field.name
|
|
344
|
+
|
|
345
|
+
# Skip computed properties and methods - they'll be added later
|
|
346
|
+
if field.computed or field.method_body is not None:
|
|
347
|
+
continue
|
|
348
|
+
|
|
349
|
+
field_value = NULL
|
|
350
|
+
|
|
351
|
+
# Get value from arguments or default
|
|
352
|
+
if arg_index < len(args):
|
|
353
|
+
field_value = args[arg_index]
|
|
354
|
+
arg_index += 1
|
|
355
|
+
elif field.default_value is not None:
|
|
356
|
+
# Evaluate default value in parent environment
|
|
357
|
+
field_value = evaluator_self.eval_node(field.default_value, parent_env, stack_trace)
|
|
358
|
+
if is_error(field_value):
|
|
359
|
+
return field_value
|
|
360
|
+
|
|
361
|
+
# Type validation
|
|
362
|
+
if field.field_type and field_value != NULL:
|
|
363
|
+
expected_type = field.field_type
|
|
364
|
+
|
|
365
|
+
type_map = {
|
|
366
|
+
"string": String,
|
|
367
|
+
"number": Integer,
|
|
368
|
+
"bool": Boolean,
|
|
369
|
+
"array": List,
|
|
370
|
+
"map": Map
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
if expected_type in type_map:
|
|
374
|
+
expected_class = type_map[expected_type]
|
|
375
|
+
if not isinstance(field_value, expected_class):
|
|
376
|
+
actual_type = type(field_value).__name__
|
|
377
|
+
return EvaluationError(
|
|
378
|
+
f"Type mismatch for field '{field_name}': expected {expected_type}, got {actual_type}"
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# Constraint validation (require clause)
|
|
382
|
+
if field.constraint and field_value != NULL:
|
|
383
|
+
# Evaluate constraint with field value in scope
|
|
384
|
+
temp_env = Environment(outer=parent_env)
|
|
385
|
+
temp_env.set(field_name, field_value)
|
|
386
|
+
|
|
387
|
+
constraint_result = evaluator_self.eval_node(field.constraint, temp_env, stack_trace)
|
|
388
|
+
if is_error(constraint_result):
|
|
389
|
+
return constraint_result
|
|
390
|
+
|
|
391
|
+
# Check if constraint is truthy
|
|
392
|
+
is_valid = False
|
|
393
|
+
if hasattr(constraint_result, 'value'):
|
|
394
|
+
is_valid = bool(constraint_result.value)
|
|
395
|
+
elif isinstance(constraint_result, Boolean):
|
|
396
|
+
is_valid = constraint_result.value
|
|
397
|
+
else:
|
|
398
|
+
is_valid = bool(constraint_result)
|
|
399
|
+
|
|
400
|
+
if not is_valid:
|
|
401
|
+
return EvaluationError(
|
|
402
|
+
f"Validation failed for field '{field_name}': constraint not satisfied"
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
# Set field value
|
|
406
|
+
instance.pairs[String(field_name)] = field_value
|
|
407
|
+
|
|
408
|
+
# Auto-generated methods
|
|
409
|
+
|
|
410
|
+
# toString() method
|
|
411
|
+
def to_string_method(*args):
|
|
412
|
+
parts = []
|
|
413
|
+
for field in fields:
|
|
414
|
+
fname = field.name
|
|
415
|
+
fkey = String(fname)
|
|
416
|
+
if fkey in instance.pairs:
|
|
417
|
+
fval = instance.pairs[fkey]
|
|
418
|
+
if hasattr(fval, 'inspect'):
|
|
419
|
+
val_str = fval.inspect()
|
|
420
|
+
elif hasattr(fval, 'value'):
|
|
421
|
+
val_str = repr(fval.value)
|
|
422
|
+
else:
|
|
423
|
+
val_str = str(fval)
|
|
424
|
+
parts.append(f'{fname}={val_str}')
|
|
425
|
+
return String(f"{type_name}({', '.join(parts)})")
|
|
426
|
+
|
|
427
|
+
instance.pairs[String("toString")] = Builtin(to_string_method)
|
|
428
|
+
|
|
429
|
+
# toJSON() method
|
|
430
|
+
def to_json_method(*args):
|
|
431
|
+
obj = {}
|
|
432
|
+
for field in fields:
|
|
433
|
+
fname = field.name
|
|
434
|
+
fkey = String(fname)
|
|
435
|
+
if fkey in instance.pairs:
|
|
436
|
+
fval = instance.pairs[fkey]
|
|
437
|
+
if hasattr(fval, 'value'):
|
|
438
|
+
obj[fname] = fval.value
|
|
439
|
+
elif fval == NULL:
|
|
440
|
+
obj[fname] = None
|
|
441
|
+
else:
|
|
442
|
+
obj[fname] = str(fval)
|
|
443
|
+
return String(json.dumps(obj))
|
|
444
|
+
|
|
445
|
+
instance.pairs[String("toJSON")] = Builtin(to_json_method)
|
|
446
|
+
|
|
447
|
+
# clone() method
|
|
448
|
+
def clone_method(*args):
|
|
449
|
+
clone_args = []
|
|
450
|
+
for field in fields:
|
|
451
|
+
fname = field.name
|
|
452
|
+
fkey = String(fname)
|
|
453
|
+
if fkey in instance.pairs:
|
|
454
|
+
clone_args.append(instance.pairs[fkey])
|
|
455
|
+
else:
|
|
456
|
+
clone_args.append(NULL)
|
|
457
|
+
return dataclass_constructor(*clone_args)
|
|
458
|
+
|
|
459
|
+
instance.pairs[String("clone")] = Builtin(clone_method)
|
|
460
|
+
|
|
461
|
+
# equals() method
|
|
462
|
+
def equals_method(*args):
|
|
463
|
+
if len(args) == 0:
|
|
464
|
+
return Boolean(False)
|
|
465
|
+
other = args[0]
|
|
466
|
+
if not isinstance(other, Map):
|
|
467
|
+
return Boolean(False)
|
|
468
|
+
|
|
469
|
+
# Check type match
|
|
470
|
+
other_type = other.pairs.get(String("__type__"))
|
|
471
|
+
if not other_type or other_type.value != type_name:
|
|
472
|
+
return Boolean(False)
|
|
473
|
+
|
|
474
|
+
# Compare all fields
|
|
475
|
+
for field in fields:
|
|
476
|
+
fname = field.name
|
|
477
|
+
fkey = String(fname)
|
|
478
|
+
if fkey not in instance.pairs or fkey not in other.pairs:
|
|
479
|
+
return Boolean(False)
|
|
480
|
+
|
|
481
|
+
val1 = instance.pairs[fkey]
|
|
482
|
+
val2 = other.pairs[fkey]
|
|
483
|
+
|
|
484
|
+
# Compare values
|
|
485
|
+
if hasattr(val1, 'value') and hasattr(val2, 'value'):
|
|
486
|
+
if val1.value != val2.value:
|
|
487
|
+
return Boolean(False)
|
|
488
|
+
elif val1 != val2:
|
|
489
|
+
return Boolean(False)
|
|
490
|
+
|
|
491
|
+
return Boolean(True)
|
|
492
|
+
|
|
493
|
+
instance.pairs[String("equals")] = Builtin(equals_method)
|
|
494
|
+
|
|
495
|
+
# Verified type methods
|
|
496
|
+
if is_verified:
|
|
497
|
+
# hash() method - cryptographic hash of all fields
|
|
498
|
+
def hash_method(*args):
|
|
499
|
+
json_str = to_json_method()
|
|
500
|
+
hash_val = hashlib.sha256(json_str.value.encode()).hexdigest()
|
|
501
|
+
return String(hash_val)
|
|
502
|
+
|
|
503
|
+
instance.pairs[String("hash")] = Builtin(hash_method)
|
|
504
|
+
|
|
505
|
+
# verify() method - re-validate all constraints
|
|
506
|
+
def verify_method(*args):
|
|
507
|
+
for field in fields:
|
|
508
|
+
if field.constraint:
|
|
509
|
+
fname = field.name
|
|
510
|
+
fkey = String(fname)
|
|
511
|
+
if fkey in instance.pairs:
|
|
512
|
+
fval = instance.pairs[fkey]
|
|
513
|
+
temp_env = Environment(outer=parent_env)
|
|
514
|
+
temp_env.set(fname, fval)
|
|
515
|
+
result = evaluator_self.eval_node(field.constraint, temp_env, stack_trace)
|
|
516
|
+
if is_error(result):
|
|
517
|
+
return Boolean(False)
|
|
518
|
+
if hasattr(result, 'value') and not result.value:
|
|
519
|
+
return Boolean(False)
|
|
520
|
+
return Boolean(True)
|
|
521
|
+
|
|
522
|
+
instance.pairs[String("verify")] = Builtin(verify_method)
|
|
523
|
+
|
|
524
|
+
# Add custom methods defined in the dataclass (parent + child)
|
|
525
|
+
for field in all_fields:
|
|
526
|
+
if field.method_body is not None:
|
|
527
|
+
# Check if this is an operator overload
|
|
528
|
+
is_operator = hasattr(field, 'operator') and field.operator is not None
|
|
529
|
+
|
|
530
|
+
# Create a closure for the method that has access to instance fields
|
|
531
|
+
def make_method(method_body, method_params, method_name, decorators, is_op=False):
|
|
532
|
+
def custom_method(*args):
|
|
533
|
+
# Create method environment with instance fields
|
|
534
|
+
from ..environment import Environment
|
|
535
|
+
method_env = Environment(outer=parent_env)
|
|
536
|
+
|
|
537
|
+
# Bind 'this' to the instance
|
|
538
|
+
method_env.set('this', instance)
|
|
539
|
+
|
|
540
|
+
# Bind parameters to arguments
|
|
541
|
+
for i, param in enumerate(method_params):
|
|
542
|
+
if i < len(args):
|
|
543
|
+
method_env.set(param, args[i])
|
|
544
|
+
else:
|
|
545
|
+
method_env.set(param, NULL)
|
|
546
|
+
|
|
547
|
+
# Apply @logged decorator (operators don't get logged)
|
|
548
|
+
if "logged" in decorators and not is_op:
|
|
549
|
+
arg_str = ", ".join([str(arg.value if hasattr(arg, 'value') else arg) for arg in args])
|
|
550
|
+
print(f"📝 Calling {method_name}({arg_str})")
|
|
551
|
+
|
|
552
|
+
# Execute method body
|
|
553
|
+
from ..object import ReturnValue
|
|
554
|
+
from .. import zexus_ast
|
|
555
|
+
result = NULL
|
|
556
|
+
|
|
557
|
+
# Handle both BlockStatement and list of statements
|
|
558
|
+
statements = method_body
|
|
559
|
+
if isinstance(method_body, zexus_ast.BlockStatement):
|
|
560
|
+
statements = method_body.statements
|
|
561
|
+
|
|
562
|
+
for stmt in statements:
|
|
563
|
+
result = evaluator_self.eval_node(stmt, method_env, stack_trace)
|
|
564
|
+
if is_error(result):
|
|
565
|
+
return result
|
|
566
|
+
# Handle return statements
|
|
567
|
+
if isinstance(result, ReturnValue):
|
|
568
|
+
return result.value
|
|
569
|
+
|
|
570
|
+
# Apply @logged decorator (return value)
|
|
571
|
+
if "logged" in decorators and not is_op:
|
|
572
|
+
result_str = str(result.value if hasattr(result, 'value') else result)
|
|
573
|
+
print(f"📝 {method_name} returned: {result_str}")
|
|
574
|
+
|
|
575
|
+
return result
|
|
576
|
+
|
|
577
|
+
return custom_method
|
|
578
|
+
|
|
579
|
+
# Create the method function
|
|
580
|
+
method_func = make_method(field.method_body, field.method_params, field.name, field.decorators, is_operator)
|
|
581
|
+
|
|
582
|
+
# Apply @cached decorator if present
|
|
583
|
+
if "cached" in field.decorators:
|
|
584
|
+
cache = {}
|
|
585
|
+
original_func = method_func
|
|
586
|
+
|
|
587
|
+
def cached_method(*args):
|
|
588
|
+
# Create cache key from arguments
|
|
589
|
+
cache_key = tuple(arg.value if hasattr(arg, 'value') else str(arg) for arg in args)
|
|
590
|
+
if cache_key in cache:
|
|
591
|
+
return cache[cache_key]
|
|
592
|
+
result = original_func(*args)
|
|
593
|
+
cache[cache_key] = result
|
|
594
|
+
return result
|
|
595
|
+
|
|
596
|
+
method_func = cached_method
|
|
597
|
+
|
|
598
|
+
# Store the method/operator with appropriate key
|
|
599
|
+
if is_operator:
|
|
600
|
+
# Store with __operator_{symbol}__ key for operator overloading
|
|
601
|
+
operator_key = f"__operator_{field.operator}__"
|
|
602
|
+
instance.pairs[String(operator_key)] = Builtin(
|
|
603
|
+
method_func,
|
|
604
|
+
name=operator_key
|
|
605
|
+
)
|
|
606
|
+
else:
|
|
607
|
+
# Regular method
|
|
608
|
+
instance.pairs[String(field.name)] = Builtin(
|
|
609
|
+
method_func,
|
|
610
|
+
name=field.name
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
# Store computed property definitions for auto-calling on access
|
|
614
|
+
computed_props = {}
|
|
615
|
+
for field in all_fields:
|
|
616
|
+
if field.computed:
|
|
617
|
+
computed_props[field.name] = field.computed
|
|
618
|
+
|
|
619
|
+
# Store computed property metadata
|
|
620
|
+
if computed_props:
|
|
621
|
+
instance.pairs[String("__computed__")] = computed_props
|
|
622
|
+
|
|
623
|
+
return instance
|
|
624
|
+
|
|
625
|
+
# Create static fromJSON method
|
|
626
|
+
def from_json_static(*args):
|
|
627
|
+
"""Static method to deserialize from JSON"""
|
|
628
|
+
if len(args) == 0:
|
|
629
|
+
return EvaluationError("fromJSON requires a JSON string argument")
|
|
630
|
+
|
|
631
|
+
json_str = args[0]
|
|
632
|
+
if not isinstance(json_str, String):
|
|
633
|
+
return EvaluationError("fromJSON expects a string argument")
|
|
634
|
+
|
|
635
|
+
try:
|
|
636
|
+
data = json.loads(json_str.value)
|
|
637
|
+
constructor_args = []
|
|
638
|
+
|
|
639
|
+
for field in fields:
|
|
640
|
+
fname = field.name
|
|
641
|
+
if fname in data:
|
|
642
|
+
val = data[fname]
|
|
643
|
+
# Convert JSON values to Zexus objects
|
|
644
|
+
if isinstance(val, str):
|
|
645
|
+
constructor_args.append(String(val))
|
|
646
|
+
elif isinstance(val, bool):
|
|
647
|
+
constructor_args.append(Boolean(val))
|
|
648
|
+
elif isinstance(val, (int, float)):
|
|
649
|
+
constructor_args.append(Integer(int(val)))
|
|
650
|
+
elif isinstance(val, list):
|
|
651
|
+
# Convert to List
|
|
652
|
+
zx_list = List()
|
|
653
|
+
zx_list.elements = [String(str(item)) for item in val]
|
|
654
|
+
constructor_args.append(zx_list)
|
|
655
|
+
elif isinstance(val, dict):
|
|
656
|
+
# Convert to Map
|
|
657
|
+
zx_map = Map()
|
|
658
|
+
zx_map.pairs = {String(k): String(str(v)) for k, v in val.items()}
|
|
659
|
+
constructor_args.append(zx_map)
|
|
660
|
+
elif val is None:
|
|
661
|
+
constructor_args.append(NULL)
|
|
662
|
+
else:
|
|
663
|
+
constructor_args.append(String(str(val)))
|
|
664
|
+
else:
|
|
665
|
+
constructor_args.append(NULL)
|
|
666
|
+
|
|
667
|
+
return dataclass_constructor(*constructor_args)
|
|
668
|
+
|
|
669
|
+
except json.JSONDecodeError as e:
|
|
670
|
+
return EvaluationError(f"Invalid JSON: {str(e)}")
|
|
671
|
+
except Exception as e:
|
|
672
|
+
return EvaluationError(f"Error deserializing JSON: {str(e)}")
|
|
673
|
+
|
|
674
|
+
# Create static default() method
|
|
675
|
+
def default_static(*args):
|
|
676
|
+
"""Static method to create instance with all default values"""
|
|
677
|
+
default_args = []
|
|
678
|
+
|
|
679
|
+
for field in fields:
|
|
680
|
+
if field.default_value is not None:
|
|
681
|
+
# Evaluate default value in parent environment
|
|
682
|
+
default_val = evaluator_self.eval_node(field.default_value, parent_env, stack_trace)
|
|
683
|
+
if is_error(default_val):
|
|
684
|
+
return default_val
|
|
685
|
+
default_args.append(default_val)
|
|
686
|
+
else:
|
|
687
|
+
# No default - use NULL
|
|
688
|
+
default_args.append(NULL)
|
|
689
|
+
|
|
690
|
+
return dataclass_constructor(*default_args)
|
|
691
|
+
|
|
692
|
+
# Register constructor as a Builtin with static methods
|
|
693
|
+
constructor = Builtin(dataclass_constructor)
|
|
694
|
+
|
|
695
|
+
# Store fields for inheritance (child classes need access to parent fields)
|
|
696
|
+
constructor.dataclass_fields = all_fields
|
|
697
|
+
|
|
698
|
+
# Add static methods as properties on the constructor
|
|
699
|
+
# (We'll store them in a special way that the evaluator can access)
|
|
700
|
+
constructor.static_methods = {
|
|
701
|
+
"fromJSON": Builtin(from_json_static),
|
|
702
|
+
"default": Builtin(default_static)
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
# Register constructor in environment as const
|
|
706
|
+
# For specialized generics (e.g., Box<number>), don't fail if already registered
|
|
707
|
+
try:
|
|
708
|
+
env.set_const(type_name, constructor)
|
|
709
|
+
except ValueError as e:
|
|
710
|
+
# If it's a specialized generic that's already registered, just return the existing one
|
|
711
|
+
if '<' in type_name and '>' in type_name:
|
|
712
|
+
debug_log(f" ℹ️ Specialized generic already registered: {type_name}")
|
|
713
|
+
return NULL
|
|
714
|
+
else:
|
|
715
|
+
# Re-raise for non-generic types
|
|
716
|
+
raise e
|
|
717
|
+
|
|
718
|
+
debug_log(f" ✅ Registered production-grade dataclass: {type_name}")
|
|
719
|
+
return NULL
|
|
720
|
+
|
|
721
|
+
def eval_return_statement(self, node, env, stack_trace):
|
|
722
|
+
val = self.eval_node(node.return_value, env, stack_trace)
|
|
723
|
+
if is_error(val):
|
|
724
|
+
return val
|
|
725
|
+
return ReturnValue(val)
|
|
726
|
+
|
|
727
|
+
def eval_continue_statement(self, node, env, stack_trace):
|
|
728
|
+
"""Enable continue-on-error mode for the evaluator."""
|
|
729
|
+
debug_log("eval_continue_statement", "Enabling error recovery mode")
|
|
730
|
+
self.continue_on_error = True
|
|
731
|
+
return NULL
|
|
732
|
+
|
|
733
|
+
def eval_break_statement(self, node, env, stack_trace):
|
|
734
|
+
"""Return BreakException to signal loop exit."""
|
|
735
|
+
debug_log("eval_break_statement", "Breaking out of loop")
|
|
736
|
+
return BreakException()
|
|
737
|
+
|
|
738
|
+
def eval_throw_statement(self, node, env, stack_trace):
|
|
739
|
+
"""Throw an error/exception."""
|
|
740
|
+
debug_log("eval_throw_statement", "Throwing error")
|
|
741
|
+
# Evaluate error message
|
|
742
|
+
message = self.eval_node(node.message, env, stack_trace)
|
|
743
|
+
if is_error(message):
|
|
744
|
+
return message
|
|
745
|
+
# Convert to string
|
|
746
|
+
error_msg = str(message) if hasattr(message, '__str__') else "Unknown error"
|
|
747
|
+
return EvaluationError(error_msg, stack_trace=stack_trace)
|
|
748
|
+
|
|
749
|
+
def eval_assignment_expression(self, node, env, stack_trace):
|
|
750
|
+
# Support assigning to identifiers or property access targets
|
|
751
|
+
from ..object import EvaluationError, NULL
|
|
752
|
+
|
|
753
|
+
# If target is a property access expression
|
|
754
|
+
if isinstance(node.name, PropertyAccessExpression):
|
|
755
|
+
# Evaluate object and property
|
|
756
|
+
obj = self.eval_node(node.name.object, env, stack_trace)
|
|
757
|
+
if is_error(obj):
|
|
758
|
+
return obj
|
|
759
|
+
|
|
760
|
+
# Safely extract property key
|
|
761
|
+
if hasattr(node.name.property, 'value'):
|
|
762
|
+
prop_key = node.name.property.value
|
|
763
|
+
else:
|
|
764
|
+
# Evaluate property expression
|
|
765
|
+
prop_result = self.eval_node(node.name.property, env, stack_trace)
|
|
766
|
+
if is_error(prop_result):
|
|
767
|
+
return prop_result
|
|
768
|
+
prop_key = prop_result.value if hasattr(prop_result, 'value') else str(prop_result)
|
|
769
|
+
|
|
770
|
+
# Evaluate value first
|
|
771
|
+
value = self.eval_node(node.value, env, stack_trace)
|
|
772
|
+
if is_error(value):
|
|
773
|
+
return value
|
|
774
|
+
|
|
775
|
+
# Check for seal on property
|
|
776
|
+
try:
|
|
777
|
+
if isinstance(obj, Map):
|
|
778
|
+
existing = obj.pairs.get(prop_key)
|
|
779
|
+
if existing is not None and existing.__class__.__name__ == 'SealedObject':
|
|
780
|
+
return EvaluationError(f"Cannot modify sealed property: {prop_key}")
|
|
781
|
+
elif hasattr(obj, 'get') and hasattr(obj, 'set'):
|
|
782
|
+
existing = obj.get(prop_key)
|
|
783
|
+
if existing is not None and getattr(existing, '__class__', None) and existing.__class__.__name__ == 'SealedObject':
|
|
784
|
+
return EvaluationError(f"Cannot modify sealed property: {prop_key}")
|
|
785
|
+
except Exception:
|
|
786
|
+
pass
|
|
787
|
+
|
|
788
|
+
# Enforcement: consult security restrictions for writes
|
|
789
|
+
try:
|
|
790
|
+
ctx = get_security_context()
|
|
791
|
+
target = f"{getattr(node.name.object, 'value', str(node.name.object))}.{prop_key}"
|
|
792
|
+
restriction = ctx.get_restriction(target)
|
|
793
|
+
except Exception:
|
|
794
|
+
restriction = None
|
|
795
|
+
|
|
796
|
+
if restriction:
|
|
797
|
+
rule = restriction.get('restriction')
|
|
798
|
+
if rule == 'read-only':
|
|
799
|
+
return EvaluationError(f"Write prohibited by restriction: {target}")
|
|
800
|
+
if rule == 'admin-only':
|
|
801
|
+
is_admin = bool(env.get('__is_admin__')) if env and hasattr(env, 'get') else False
|
|
802
|
+
if not is_admin:
|
|
803
|
+
return EvaluationError('Admin privileges required to modify this field')
|
|
804
|
+
|
|
805
|
+
# Perform set
|
|
806
|
+
try:
|
|
807
|
+
if isinstance(obj, Map):
|
|
808
|
+
obj.pairs[prop_key] = value
|
|
809
|
+
return value
|
|
810
|
+
elif hasattr(obj, 'set'):
|
|
811
|
+
obj.set(prop_key, value)
|
|
812
|
+
return value
|
|
813
|
+
except Exception as e:
|
|
814
|
+
return EvaluationError(str(e))
|
|
815
|
+
|
|
816
|
+
return EvaluationError('Assignment to property failed')
|
|
817
|
+
|
|
818
|
+
# Otherwise it's an identifier assignment
|
|
819
|
+
if isinstance(node.name, Identifier):
|
|
820
|
+
name = node.name.value
|
|
821
|
+
target_obj = env.get(name)
|
|
822
|
+
if isinstance(target_obj, SealedObject):
|
|
823
|
+
return EvaluationError(f"Cannot assign to sealed object: {name}")
|
|
824
|
+
|
|
825
|
+
value = self.eval_node(node.value, env, stack_trace)
|
|
826
|
+
if is_error(value):
|
|
827
|
+
return value
|
|
828
|
+
|
|
829
|
+
try:
|
|
830
|
+
env.assign(name, value)
|
|
831
|
+
except ValueError as e:
|
|
832
|
+
return EvaluationError(str(e))
|
|
833
|
+
return value
|
|
834
|
+
|
|
835
|
+
return EvaluationError('Invalid assignment target')
|
|
836
|
+
|
|
837
|
+
def eval_try_catch_statement(self, node, env, stack_trace):
|
|
838
|
+
debug_log("eval_try_catch", f"error_var: {node.error_variable.value if node.error_variable else 'error'}")
|
|
839
|
+
|
|
840
|
+
try:
|
|
841
|
+
result = self.eval_node(node.try_block, env, stack_trace)
|
|
842
|
+
if is_error(result):
|
|
843
|
+
catch_env = Environment(outer=env)
|
|
844
|
+
var_name = node.error_variable.value if node.error_variable else "error"
|
|
845
|
+
catch_env.set(var_name, String(str(result)))
|
|
846
|
+
return self.eval_node(node.catch_block, catch_env, stack_trace)
|
|
847
|
+
return result
|
|
848
|
+
except Exception as e:
|
|
849
|
+
catch_env = Environment(outer=env)
|
|
850
|
+
var_name = node.error_variable.value if node.error_variable else "error"
|
|
851
|
+
catch_env.set(var_name, String(str(e)))
|
|
852
|
+
return self.eval_node(node.catch_block, catch_env, stack_trace)
|
|
853
|
+
|
|
854
|
+
def eval_if_statement(self, node, env, stack_trace):
|
|
855
|
+
cond = self.eval_node(node.condition, env, stack_trace)
|
|
856
|
+
if is_error(cond):
|
|
857
|
+
return cond
|
|
858
|
+
|
|
859
|
+
if is_truthy(cond):
|
|
860
|
+
return self.eval_node(node.consequence, env, stack_trace)
|
|
861
|
+
|
|
862
|
+
# Check elif conditions
|
|
863
|
+
if hasattr(node, 'elif_parts') and node.elif_parts:
|
|
864
|
+
for elif_condition, elif_consequence in node.elif_parts:
|
|
865
|
+
elif_cond = self.eval_node(elif_condition, env, stack_trace)
|
|
866
|
+
if is_error(elif_cond):
|
|
867
|
+
return elif_cond
|
|
868
|
+
if is_truthy(elif_cond):
|
|
869
|
+
return self.eval_node(elif_consequence, env, stack_trace)
|
|
870
|
+
|
|
871
|
+
# Check else clause
|
|
872
|
+
if node.alternative:
|
|
873
|
+
return self.eval_node(node.alternative, env, stack_trace)
|
|
874
|
+
|
|
875
|
+
return NULL
|
|
876
|
+
|
|
877
|
+
def eval_while_statement(self, node, env, stack_trace):
|
|
878
|
+
result = NULL
|
|
879
|
+
while True:
|
|
880
|
+
cond = self.eval_node(node.condition, env, stack_trace)
|
|
881
|
+
if is_error(cond):
|
|
882
|
+
return cond
|
|
883
|
+
if not is_truthy(cond):
|
|
884
|
+
break
|
|
885
|
+
|
|
886
|
+
result = self.eval_node(node.body, env, stack_trace)
|
|
887
|
+
if isinstance(result, ReturnValue):
|
|
888
|
+
return result
|
|
889
|
+
if isinstance(result, BreakException):
|
|
890
|
+
# Break out of loop, return NULL to continue execution in block
|
|
891
|
+
return NULL
|
|
892
|
+
if isinstance(result, EvaluationError):
|
|
893
|
+
return result
|
|
894
|
+
|
|
895
|
+
return result
|
|
896
|
+
|
|
897
|
+
def eval_foreach_statement(self, node, env, stack_trace):
|
|
898
|
+
iterable = self.eval_node(node.iterable, env, stack_trace)
|
|
899
|
+
if is_error(iterable):
|
|
900
|
+
return iterable
|
|
901
|
+
|
|
902
|
+
if not isinstance(iterable, List):
|
|
903
|
+
return EvaluationError("ForEach expects List")
|
|
904
|
+
|
|
905
|
+
result = NULL
|
|
906
|
+
for item in iterable.elements:
|
|
907
|
+
env.set(node.item.value, item)
|
|
908
|
+
result = self.eval_node(node.body, env, stack_trace)
|
|
909
|
+
if isinstance(result, ReturnValue):
|
|
910
|
+
return result
|
|
911
|
+
if isinstance(result, BreakException):
|
|
912
|
+
# Break out of loop, return NULL to continue execution in block
|
|
913
|
+
return NULL
|
|
914
|
+
if isinstance(result, EvaluationError):
|
|
915
|
+
return result
|
|
916
|
+
|
|
917
|
+
return result
|
|
918
|
+
|
|
919
|
+
def eval_watch_statement(self, node, env, stack_trace):
|
|
920
|
+
# 1. Start collecting dependencies
|
|
921
|
+
start_collecting_dependencies()
|
|
922
|
+
|
|
923
|
+
# 2. Evaluate the watched expression or block
|
|
924
|
+
if node.watched_expr:
|
|
925
|
+
# Explicit watch: watch expr => block
|
|
926
|
+
# Evaluate expression to capture dependencies
|
|
927
|
+
res = self.eval_node(node.watched_expr, env, stack_trace)
|
|
928
|
+
if is_error(res):
|
|
929
|
+
stop_collecting_dependencies()
|
|
930
|
+
return res
|
|
931
|
+
else:
|
|
932
|
+
# Implicit watch: watch block
|
|
933
|
+
# Evaluate block to capture dependencies AND execute it
|
|
934
|
+
res = self.eval_node(node.reaction, env, stack_trace)
|
|
935
|
+
if is_error(res):
|
|
936
|
+
stop_collecting_dependencies()
|
|
937
|
+
return res
|
|
938
|
+
|
|
939
|
+
# 3. Stop collecting and get dependencies
|
|
940
|
+
deps = stop_collecting_dependencies()
|
|
941
|
+
|
|
942
|
+
# 4. Define the reaction callback WITH GUARD against infinite recursion
|
|
943
|
+
executing = [False] # Mutable flag to track execution state
|
|
944
|
+
def reaction_callback(new_val):
|
|
945
|
+
if executing[0]:
|
|
946
|
+
# Already executing, skip to prevent infinite loop
|
|
947
|
+
return
|
|
948
|
+
executing[0] = True
|
|
949
|
+
try:
|
|
950
|
+
# Re-evaluate the reaction block WITHOUT collecting dependencies
|
|
951
|
+
result = self.eval_node(node.reaction, env, [])
|
|
952
|
+
# Check for errors but don't propagate them (watchers shouldn't crash the program)
|
|
953
|
+
if is_error(result):
|
|
954
|
+
pass # Silently ignore watcher errors to prevent cascading failures
|
|
955
|
+
except Exception as e:
|
|
956
|
+
pass # Silently ignore exceptions in watchers
|
|
957
|
+
finally:
|
|
958
|
+
executing[0] = False
|
|
959
|
+
|
|
960
|
+
# 5. Register callback for each dependency
|
|
961
|
+
for dep_env, name in deps:
|
|
962
|
+
dep_env.add_watcher(name, reaction_callback)
|
|
963
|
+
|
|
964
|
+
return NULL
|
|
965
|
+
|
|
966
|
+
def eval_log_statement(self, node, env, stack_trace):
|
|
967
|
+
"""
|
|
968
|
+
Evaluates a LOG statement: log > filepath
|
|
969
|
+
Redirects subsequent print output to the specified file.
|
|
970
|
+
Output is automatically restored when the current block exits.
|
|
971
|
+
"""
|
|
972
|
+
import sys
|
|
973
|
+
import os
|
|
974
|
+
|
|
975
|
+
# 1. Evaluate the filepath expression
|
|
976
|
+
filepath_obj = self.eval_node(node.filepath, env, stack_trace)
|
|
977
|
+
if is_error(filepath_obj):
|
|
978
|
+
return filepath_obj
|
|
979
|
+
|
|
980
|
+
# 2. Convert to string
|
|
981
|
+
if hasattr(filepath_obj, 'value'):
|
|
982
|
+
filepath = str(filepath_obj.value)
|
|
983
|
+
else:
|
|
984
|
+
filepath = str(filepath_obj)
|
|
985
|
+
|
|
986
|
+
# 3. Normalize path (handle relative paths relative to CWD)
|
|
987
|
+
if not os.path.isabs(filepath):
|
|
988
|
+
filepath = os.path.join(os.getcwd(), filepath)
|
|
989
|
+
|
|
990
|
+
# 4. Open file for writing (append mode)
|
|
991
|
+
try:
|
|
992
|
+
log_file = open(filepath, 'a')
|
|
993
|
+
except Exception as e:
|
|
994
|
+
return new_error(f"Cannot open log file '{filepath}': {e}", stack_trace)
|
|
995
|
+
|
|
996
|
+
# 5. Save current stdout state for restoration
|
|
997
|
+
if not hasattr(env, '_stdout_stack'):
|
|
998
|
+
env._stdout_stack = []
|
|
999
|
+
env._stdout_stack.append(sys.stdout)
|
|
1000
|
+
|
|
1001
|
+
# 6. Redirect stdout to this file
|
|
1002
|
+
sys.stdout = log_file
|
|
1003
|
+
|
|
1004
|
+
# 7. Store the file handle for cleanup
|
|
1005
|
+
if not hasattr(env, '_log_files'):
|
|
1006
|
+
env._log_files = []
|
|
1007
|
+
env._log_files.append(log_file)
|
|
1008
|
+
|
|
1009
|
+
return NULL
|
|
1010
|
+
|
|
1011
|
+
def _restore_stdout(self, env):
|
|
1012
|
+
"""Restore stdout to previous state and close log file (scope-aware)"""
|
|
1013
|
+
import sys
|
|
1014
|
+
|
|
1015
|
+
# Restore stdout if we have a saved state
|
|
1016
|
+
if hasattr(env, '_stdout_stack') and env._stdout_stack:
|
|
1017
|
+
previous_stdout = env._stdout_stack.pop()
|
|
1018
|
+
|
|
1019
|
+
# Close current log file if it's a file object
|
|
1020
|
+
if hasattr(sys.stdout, 'close') and sys.stdout != sys.__stdout__:
|
|
1021
|
+
try:
|
|
1022
|
+
sys.stdout.flush()
|
|
1023
|
+
sys.stdout.close()
|
|
1024
|
+
except Exception:
|
|
1025
|
+
pass
|
|
1026
|
+
|
|
1027
|
+
# Restore previous stdout
|
|
1028
|
+
sys.stdout = previous_stdout
|
|
1029
|
+
|
|
1030
|
+
# Remove from log files list
|
|
1031
|
+
if hasattr(env, '_log_files') and env._log_files:
|
|
1032
|
+
env._log_files.pop()
|
|
1033
|
+
|
|
1034
|
+
# === MODULE LOADING (FULL LOGIC) ===
|
|
1035
|
+
|
|
1036
|
+
def _check_import_permission(self, val, importer):
|
|
1037
|
+
"""Helper to check if a file is allowed to import a specific value."""
|
|
1038
|
+
allowed = getattr(val, '_allowed_files', [])
|
|
1039
|
+
if not allowed:
|
|
1040
|
+
return True
|
|
1041
|
+
|
|
1042
|
+
try:
|
|
1043
|
+
importer_norm = os.path.normpath(os.path.abspath(importer))
|
|
1044
|
+
for a in allowed:
|
|
1045
|
+
a_norm = os.path.normpath(os.path.abspath(a))
|
|
1046
|
+
if importer_norm == a_norm:
|
|
1047
|
+
return True
|
|
1048
|
+
if a in importer:
|
|
1049
|
+
return True
|
|
1050
|
+
except Exception:
|
|
1051
|
+
return False
|
|
1052
|
+
|
|
1053
|
+
return False
|
|
1054
|
+
|
|
1055
|
+
def eval_use_statement(self, node, env, stack_trace):
|
|
1056
|
+
from ..module_cache import get_cached_module, cache_module, get_module_candidates, normalize_path, invalidate_module
|
|
1057
|
+
from ..builtin_modules import is_builtin_module, get_builtin_module
|
|
1058
|
+
from ..stdlib_integration import is_stdlib_module, get_stdlib_module
|
|
1059
|
+
|
|
1060
|
+
# 1. Determine File Path
|
|
1061
|
+
file_path_attr = getattr(node, 'file_path', None) or getattr(node, 'embedded_ref', None)
|
|
1062
|
+
file_path = file_path_attr.value if hasattr(file_path_attr, 'value') else file_path_attr
|
|
1063
|
+
if not file_path:
|
|
1064
|
+
return EvaluationError("use: missing file path")
|
|
1065
|
+
|
|
1066
|
+
debug_log(" UseStatement loading", file_path)
|
|
1067
|
+
|
|
1068
|
+
# 1a. Check if this is a stdlib module (fs, http, json, datetime, crypto, blockchain)
|
|
1069
|
+
if is_stdlib_module(file_path):
|
|
1070
|
+
debug_log(f" Loading stdlib module: {file_path}")
|
|
1071
|
+
try:
|
|
1072
|
+
module_env = get_stdlib_module(file_path, self)
|
|
1073
|
+
if module_env:
|
|
1074
|
+
# Handle named imports: use {read_file, write_file} from "stdlib/fs"
|
|
1075
|
+
is_named_import = getattr(node, 'is_named_import', False)
|
|
1076
|
+
names = getattr(node, 'names', [])
|
|
1077
|
+
alias = getattr(node, 'alias', None)
|
|
1078
|
+
|
|
1079
|
+
if is_named_import and names:
|
|
1080
|
+
# Import specific functions
|
|
1081
|
+
for name_node in names:
|
|
1082
|
+
name = name_node.value if hasattr(name_node, 'value') else str(name_node)
|
|
1083
|
+
value = module_env.get(name)
|
|
1084
|
+
if value is None:
|
|
1085
|
+
return EvaluationError(f"'{name}' is not exported from {file_path}")
|
|
1086
|
+
env.set(name, value)
|
|
1087
|
+
debug_log(f" Imported '{name}' from {file_path}", value)
|
|
1088
|
+
elif alias:
|
|
1089
|
+
# Import as alias: use "stdlib/fs" as fs
|
|
1090
|
+
env.set(alias, module_env)
|
|
1091
|
+
else:
|
|
1092
|
+
# Import all functions into current scope
|
|
1093
|
+
for key in module_env.store.keys():
|
|
1094
|
+
env.set(key, module_env.get(key))
|
|
1095
|
+
return NULL
|
|
1096
|
+
else:
|
|
1097
|
+
return EvaluationError(f"Stdlib module '{file_path}' not available")
|
|
1098
|
+
except Exception as e:
|
|
1099
|
+
return EvaluationError(f"Error loading stdlib module '{file_path}': {str(e)}")
|
|
1100
|
+
|
|
1101
|
+
# 1b. Check if this is a builtin module (crypto, datetime, math)
|
|
1102
|
+
if is_builtin_module(file_path):
|
|
1103
|
+
debug_log(f" Loading builtin module: {file_path}")
|
|
1104
|
+
module_env = get_builtin_module(file_path, self)
|
|
1105
|
+
if module_env:
|
|
1106
|
+
alias = getattr(node, 'alias', None)
|
|
1107
|
+
if alias:
|
|
1108
|
+
env.set(alias, module_env)
|
|
1109
|
+
else:
|
|
1110
|
+
# Import all functions into current scope
|
|
1111
|
+
for key in module_env.store.keys():
|
|
1112
|
+
env.set(key, module_env.get(key))
|
|
1113
|
+
return NULL
|
|
1114
|
+
else:
|
|
1115
|
+
return EvaluationError(f"Builtin module '{file_path}' not available")
|
|
1116
|
+
|
|
1117
|
+
normalized_path = normalize_path(file_path)
|
|
1118
|
+
|
|
1119
|
+
# 2. Check Cache
|
|
1120
|
+
module_env = get_cached_module(normalized_path)
|
|
1121
|
+
|
|
1122
|
+
# 3. Load if not cached
|
|
1123
|
+
if not module_env:
|
|
1124
|
+
# Get the importing file's path for relative resolution
|
|
1125
|
+
importer_file = None
|
|
1126
|
+
__file_obj = env.get("__file__")
|
|
1127
|
+
if __file_obj:
|
|
1128
|
+
if hasattr(__file_obj, 'value'):
|
|
1129
|
+
importer_file = __file_obj.value
|
|
1130
|
+
elif isinstance(__file_obj, str):
|
|
1131
|
+
importer_file = __file_obj
|
|
1132
|
+
|
|
1133
|
+
candidates = get_module_candidates(file_path, importer_file)
|
|
1134
|
+
module_env = Environment()
|
|
1135
|
+
loaded = False
|
|
1136
|
+
parse_errors = []
|
|
1137
|
+
|
|
1138
|
+
# Circular dependency placeholder
|
|
1139
|
+
try:
|
|
1140
|
+
cache_module(normalized_path, module_env)
|
|
1141
|
+
except Exception:
|
|
1142
|
+
pass
|
|
1143
|
+
|
|
1144
|
+
for candidate in candidates:
|
|
1145
|
+
try:
|
|
1146
|
+
if not os.path.exists(candidate):
|
|
1147
|
+
continue
|
|
1148
|
+
|
|
1149
|
+
debug_log(" Found module file", candidate)
|
|
1150
|
+
with open(candidate, 'r', encoding='utf-8') as f:
|
|
1151
|
+
code = f.read()
|
|
1152
|
+
|
|
1153
|
+
from ..lexer import Lexer
|
|
1154
|
+
from ..parser import Parser
|
|
1155
|
+
|
|
1156
|
+
lexer = Lexer(code)
|
|
1157
|
+
parser = Parser(lexer)
|
|
1158
|
+
program = parser.parse_program()
|
|
1159
|
+
|
|
1160
|
+
if getattr(parser, 'errors', None):
|
|
1161
|
+
parse_errors.append((candidate, parser.errors))
|
|
1162
|
+
continue
|
|
1163
|
+
|
|
1164
|
+
# Set __file__ in module environment so it can do relative imports
|
|
1165
|
+
module_env.set("__file__", String(os.path.abspath(candidate)))
|
|
1166
|
+
# Set __MODULE__ to the module path (not "__main__" since it's imported)
|
|
1167
|
+
module_env.set("__MODULE__", String(file_path))
|
|
1168
|
+
|
|
1169
|
+
# Recursive evaluation
|
|
1170
|
+
self.eval_node(program, module_env)
|
|
1171
|
+
|
|
1172
|
+
# Update cache with fully loaded env
|
|
1173
|
+
cache_module(normalized_path, module_env)
|
|
1174
|
+
loaded = True
|
|
1175
|
+
break
|
|
1176
|
+
except Exception as e:
|
|
1177
|
+
parse_errors.append((candidate, str(e)))
|
|
1178
|
+
|
|
1179
|
+
if not loaded:
|
|
1180
|
+
try:
|
|
1181
|
+
invalidate_module(normalized_path)
|
|
1182
|
+
except Exception:
|
|
1183
|
+
pass
|
|
1184
|
+
return EvaluationError(f"Module not found or failed to load: {file_path}")
|
|
1185
|
+
|
|
1186
|
+
# 4. Bind to Current Environment
|
|
1187
|
+
is_named_import = getattr(node, 'is_named_import', False)
|
|
1188
|
+
names = getattr(node, 'names', [])
|
|
1189
|
+
alias = getattr(node, 'alias', None)
|
|
1190
|
+
|
|
1191
|
+
if is_named_import and names:
|
|
1192
|
+
# Handle: use { name1, name2 } from "./file.zx"
|
|
1193
|
+
exports = module_env.get_exports()
|
|
1194
|
+
__file_obj = env.get("__file__")
|
|
1195
|
+
importer_file = None
|
|
1196
|
+
if __file_obj:
|
|
1197
|
+
importer_file = __file_obj.value if hasattr(__file_obj, 'value') else __file_obj
|
|
1198
|
+
|
|
1199
|
+
for name_node in names:
|
|
1200
|
+
name = name_node.value if hasattr(name_node, 'value') else str(name_node)
|
|
1201
|
+
|
|
1202
|
+
# First check if there's a Module object with exports
|
|
1203
|
+
value = None
|
|
1204
|
+
for key in module_env.store if hasattr(module_env, 'store') else []:
|
|
1205
|
+
potential_module = module_env.get(key)
|
|
1206
|
+
if potential_module and hasattr(potential_module, 'exports') and hasattr(potential_module, 'get_member'):
|
|
1207
|
+
if name in potential_module.exports:
|
|
1208
|
+
member = potential_module.get_member(name)
|
|
1209
|
+
if member:
|
|
1210
|
+
value = member.value
|
|
1211
|
+
break
|
|
1212
|
+
|
|
1213
|
+
# Try to get from exports if not found in Module
|
|
1214
|
+
if value is None:
|
|
1215
|
+
value = exports.get(name)
|
|
1216
|
+
|
|
1217
|
+
if value is None:
|
|
1218
|
+
# Fallback: try to get from module environment directly
|
|
1219
|
+
value = module_env.get(name)
|
|
1220
|
+
|
|
1221
|
+
if value is None:
|
|
1222
|
+
return EvaluationError(f"'{name}' is not exported from {file_path}")
|
|
1223
|
+
|
|
1224
|
+
# Security check
|
|
1225
|
+
if importer_file and not self._check_import_permission(value, importer_file):
|
|
1226
|
+
return EvaluationError(f"Permission denied: cannot import '{name}' from '{file_path}'")
|
|
1227
|
+
|
|
1228
|
+
env.set(name, value)
|
|
1229
|
+
debug_log(f" Imported '{name}' from {file_path}", value)
|
|
1230
|
+
|
|
1231
|
+
elif alias:
|
|
1232
|
+
# Handle: use "./file.zx" as alias
|
|
1233
|
+
env.set(alias, module_env)
|
|
1234
|
+
else:
|
|
1235
|
+
# Handle: use "./file.zx" (import all exports)
|
|
1236
|
+
try:
|
|
1237
|
+
exports = module_env.get_exports()
|
|
1238
|
+
__file_obj = env.get("__file__")
|
|
1239
|
+
importer_file = None
|
|
1240
|
+
if __file_obj:
|
|
1241
|
+
importer_file = __file_obj.value if hasattr(__file_obj, 'value') else __file_obj
|
|
1242
|
+
|
|
1243
|
+
for name, value in exports.items():
|
|
1244
|
+
if importer_file:
|
|
1245
|
+
if not self._check_import_permission(value, importer_file):
|
|
1246
|
+
return EvaluationError(f"Permission denied for export {name}")
|
|
1247
|
+
env.set(name, value)
|
|
1248
|
+
except Exception:
|
|
1249
|
+
# Fallback: expose module as filename object
|
|
1250
|
+
module_name = os.path.basename(file_path)
|
|
1251
|
+
env.set(module_name, module_env)
|
|
1252
|
+
|
|
1253
|
+
return NULL
|
|
1254
|
+
|
|
1255
|
+
def eval_from_statement(self, node, env, stack_trace):
|
|
1256
|
+
"""Full implementation of FromStatement."""
|
|
1257
|
+
from ..module_cache import get_cached_module, cache_module, get_module_candidates, normalize_path, invalidate_module
|
|
1258
|
+
|
|
1259
|
+
# 1. Resolve Path
|
|
1260
|
+
file_path = node.file_path
|
|
1261
|
+
if not file_path:
|
|
1262
|
+
return EvaluationError("from: missing file path")
|
|
1263
|
+
|
|
1264
|
+
normalized_path = normalize_path(file_path)
|
|
1265
|
+
module_env = get_cached_module(normalized_path)
|
|
1266
|
+
|
|
1267
|
+
# 2. Load Logic (Explicitly repeated to ensure isolation)
|
|
1268
|
+
if not module_env:
|
|
1269
|
+
# Get the importing file's path for relative resolution
|
|
1270
|
+
importer_file = None
|
|
1271
|
+
__file_obj = env.get("__file__")
|
|
1272
|
+
if __file_obj:
|
|
1273
|
+
if hasattr(__file_obj, 'value'):
|
|
1274
|
+
importer_file = __file_obj.value
|
|
1275
|
+
elif isinstance(__file_obj, str):
|
|
1276
|
+
importer_file = __file_obj
|
|
1277
|
+
|
|
1278
|
+
candidates = get_module_candidates(file_path, importer_file)
|
|
1279
|
+
module_env = Environment()
|
|
1280
|
+
loaded = False
|
|
1281
|
+
|
|
1282
|
+
try:
|
|
1283
|
+
cache_module(normalized_path, module_env)
|
|
1284
|
+
except Exception:
|
|
1285
|
+
pass
|
|
1286
|
+
|
|
1287
|
+
for candidate in candidates:
|
|
1288
|
+
try:
|
|
1289
|
+
if not os.path.exists(candidate):
|
|
1290
|
+
continue
|
|
1291
|
+
|
|
1292
|
+
with open(candidate, 'r', encoding='utf-8') as f:
|
|
1293
|
+
code = f.read()
|
|
1294
|
+
|
|
1295
|
+
from ..lexer import Lexer
|
|
1296
|
+
from ..parser import Parser
|
|
1297
|
+
|
|
1298
|
+
lexer = Lexer(code)
|
|
1299
|
+
parser = Parser(lexer)
|
|
1300
|
+
program = parser.parse_program()
|
|
1301
|
+
|
|
1302
|
+
if getattr(parser, 'errors', None):
|
|
1303
|
+
continue
|
|
1304
|
+
|
|
1305
|
+
# Set __file__ in module environment so it can do relative imports
|
|
1306
|
+
module_env.set("__file__", String(os.path.abspath(candidate)))
|
|
1307
|
+
# Set __MODULE__ to the module path (not "__main__" since it's imported)
|
|
1308
|
+
module_env.set("__MODULE__", String(file_path))
|
|
1309
|
+
|
|
1310
|
+
self.eval_node(program, module_env)
|
|
1311
|
+
cache_module(normalized_path, module_env)
|
|
1312
|
+
loaded = True
|
|
1313
|
+
break
|
|
1314
|
+
except Exception:
|
|
1315
|
+
continue
|
|
1316
|
+
|
|
1317
|
+
if not loaded:
|
|
1318
|
+
try:
|
|
1319
|
+
invalidate_module(normalized_path)
|
|
1320
|
+
except Exception:
|
|
1321
|
+
pass
|
|
1322
|
+
return EvaluationError(f"From import: failed to load module {file_path}")
|
|
1323
|
+
|
|
1324
|
+
# 3. Import Specific Names
|
|
1325
|
+
__file_obj = env.get("__file__")
|
|
1326
|
+
importer_file = None
|
|
1327
|
+
if __file_obj:
|
|
1328
|
+
importer_file = __file_obj.value if hasattr(__file_obj, 'value') else __file_obj
|
|
1329
|
+
|
|
1330
|
+
for name_pair in node.imports:
|
|
1331
|
+
# name_pair is [source_name, dest_name] (dest_name optional)
|
|
1332
|
+
src = name_pair[0].value if hasattr(name_pair[0], 'value') else str(name_pair[0])
|
|
1333
|
+
dest = name_pair[1].value if len(name_pair) > 1 and name_pair[1] else src
|
|
1334
|
+
|
|
1335
|
+
# First, check if there's a Module object in the environment
|
|
1336
|
+
val = None
|
|
1337
|
+
found_in_module = False
|
|
1338
|
+
|
|
1339
|
+
# Look for modules in the module_env
|
|
1340
|
+
for key in module_env.store if hasattr(module_env, 'store') else []:
|
|
1341
|
+
potential_module = module_env.get(key)
|
|
1342
|
+
if potential_module and hasattr(potential_module, 'exports') and hasattr(potential_module, 'get_member'):
|
|
1343
|
+
# This is a Module object - check if src is in its exports
|
|
1344
|
+
if src in potential_module.exports:
|
|
1345
|
+
member = potential_module.get_member(src)
|
|
1346
|
+
if member:
|
|
1347
|
+
val = member.value
|
|
1348
|
+
found_in_module = True
|
|
1349
|
+
break
|
|
1350
|
+
|
|
1351
|
+
# If not found in module exports, try standard exports
|
|
1352
|
+
if not found_in_module:
|
|
1353
|
+
# Retrieve from module exports
|
|
1354
|
+
exports = module_env.get_exports() if hasattr(module_env, 'get_exports') else {}
|
|
1355
|
+
val = exports.get(src)
|
|
1356
|
+
|
|
1357
|
+
if val is None:
|
|
1358
|
+
# Fallback: check if it's in the environment directly
|
|
1359
|
+
val = module_env.get(src)
|
|
1360
|
+
|
|
1361
|
+
if val is None:
|
|
1362
|
+
return EvaluationError(f"'{src}' is not exported from {file_path}")
|
|
1363
|
+
|
|
1364
|
+
# Security Check
|
|
1365
|
+
if importer_file and not self._check_import_permission(val, importer_file):
|
|
1366
|
+
return EvaluationError(f"Permission denied: cannot import '{src}' into '{importer_file}'")
|
|
1367
|
+
|
|
1368
|
+
env.set(dest, val)
|
|
1369
|
+
|
|
1370
|
+
return NULL
|
|
1371
|
+
|
|
1372
|
+
def eval_export_statement(self, node, env, stack_trace):
|
|
1373
|
+
names = []
|
|
1374
|
+
if hasattr(node, 'names') and node.names:
|
|
1375
|
+
names = [n.value for n in node.names]
|
|
1376
|
+
elif hasattr(node, 'name') and node.name:
|
|
1377
|
+
names = [node.name.value]
|
|
1378
|
+
|
|
1379
|
+
# Check if we're inside a module
|
|
1380
|
+
current_module = env.get('__current_module__') if env else None
|
|
1381
|
+
|
|
1382
|
+
for nm in names:
|
|
1383
|
+
val = env.get(nm)
|
|
1384
|
+
if not val:
|
|
1385
|
+
return EvaluationError(f"Cannot export undefined: {nm}")
|
|
1386
|
+
|
|
1387
|
+
# If inside a module, add to module's exports list
|
|
1388
|
+
if current_module and hasattr(current_module, 'add_export'):
|
|
1389
|
+
current_module.add_export(nm)
|
|
1390
|
+
|
|
1391
|
+
# Also do standard env export
|
|
1392
|
+
try:
|
|
1393
|
+
env.export(nm, val)
|
|
1394
|
+
except Exception as e:
|
|
1395
|
+
return EvaluationError(f"Export failed: {str(e)}")
|
|
1396
|
+
|
|
1397
|
+
return NULL
|
|
1398
|
+
|
|
1399
|
+
# === SECURITY STATEMENTS (Full Logic) ===
|
|
1400
|
+
|
|
1401
|
+
def eval_seal_statement(self, node, env, stack_trace):
|
|
1402
|
+
target_node = node.target
|
|
1403
|
+
if not target_node:
|
|
1404
|
+
return EvaluationError("seal: missing target")
|
|
1405
|
+
|
|
1406
|
+
if isinstance(target_node, Identifier):
|
|
1407
|
+
name = target_node.value
|
|
1408
|
+
val = env.get(name)
|
|
1409
|
+
if not val:
|
|
1410
|
+
return EvaluationError(f"seal: identifier '{name}' not found")
|
|
1411
|
+
|
|
1412
|
+
sealed = SealedObject(val)
|
|
1413
|
+
env.set(name, sealed)
|
|
1414
|
+
return sealed
|
|
1415
|
+
|
|
1416
|
+
elif isinstance(target_node, PropertyAccessExpression):
|
|
1417
|
+
obj = self.eval_node(target_node.object, env, stack_trace)
|
|
1418
|
+
if is_error(obj):
|
|
1419
|
+
return obj
|
|
1420
|
+
|
|
1421
|
+
# Safely extract property key
|
|
1422
|
+
if hasattr(target_node.property, 'value'):
|
|
1423
|
+
prop_key = target_node.property.value
|
|
1424
|
+
else:
|
|
1425
|
+
# Evaluate property expression
|
|
1426
|
+
prop_result = self.eval_node(target_node.property, env, stack_trace)
|
|
1427
|
+
if is_error(prop_result):
|
|
1428
|
+
return prop_result
|
|
1429
|
+
prop_key = prop_result.value if hasattr(prop_result, 'value') else str(prop_result)
|
|
1430
|
+
|
|
1431
|
+
if isinstance(obj, Map):
|
|
1432
|
+
if prop_key not in obj.pairs:
|
|
1433
|
+
return EvaluationError(f"seal: key '{prop_key}' missing")
|
|
1434
|
+
|
|
1435
|
+
obj.pairs[prop_key] = SealedObject(obj.pairs[prop_key])
|
|
1436
|
+
return obj.pairs[prop_key]
|
|
1437
|
+
|
|
1438
|
+
if hasattr(obj, 'set') and hasattr(obj, 'get'):
|
|
1439
|
+
curr = obj.get(prop_key)
|
|
1440
|
+
if not curr:
|
|
1441
|
+
return EvaluationError(f"seal: prop '{prop_key}' missing")
|
|
1442
|
+
|
|
1443
|
+
sealed = SealedObject(curr)
|
|
1444
|
+
obj.set(prop_key, sealed)
|
|
1445
|
+
return sealed
|
|
1446
|
+
|
|
1447
|
+
return EvaluationError("seal: unsupported target")
|
|
1448
|
+
|
|
1449
|
+
def eval_audit_statement(self, node, env, stack_trace):
|
|
1450
|
+
"""Evaluate audit statement for compliance logging.
|
|
1451
|
+
|
|
1452
|
+
Syntax: audit data_name, "action_type", [optional_timestamp];
|
|
1453
|
+
|
|
1454
|
+
Returns a log entry dictionary with the audited data reference.
|
|
1455
|
+
"""
|
|
1456
|
+
from datetime import datetime
|
|
1457
|
+
from ..object import String, Map
|
|
1458
|
+
|
|
1459
|
+
# Get the data identifier
|
|
1460
|
+
if not isinstance(node.data_name, Identifier):
|
|
1461
|
+
return EvaluationError(f"audit: expected identifier, got {type(node.data_name).__name__}")
|
|
1462
|
+
|
|
1463
|
+
data_name = node.data_name.value
|
|
1464
|
+
|
|
1465
|
+
# Evaluate the action type string
|
|
1466
|
+
if isinstance(node.action_type, StringLiteral):
|
|
1467
|
+
action_type = node.action_type.value
|
|
1468
|
+
else:
|
|
1469
|
+
action_type_result = self.eval_node(node.action_type, env, stack_trace)
|
|
1470
|
+
if is_error(action_type_result):
|
|
1471
|
+
return action_type_result
|
|
1472
|
+
action_type = to_string(action_type_result)
|
|
1473
|
+
|
|
1474
|
+
# Get optional timestamp
|
|
1475
|
+
timestamp = None
|
|
1476
|
+
if node.timestamp:
|
|
1477
|
+
if isinstance(node.timestamp, Identifier):
|
|
1478
|
+
timestamp = env.get(node.timestamp.value)
|
|
1479
|
+
else:
|
|
1480
|
+
timestamp = self.eval_node(node.timestamp, env, stack_trace)
|
|
1481
|
+
if is_error(timestamp):
|
|
1482
|
+
return timestamp
|
|
1483
|
+
|
|
1484
|
+
# If no timestamp provided, use current time
|
|
1485
|
+
if timestamp is None:
|
|
1486
|
+
timestamp = datetime.now().isoformat()
|
|
1487
|
+
else:
|
|
1488
|
+
timestamp = to_string(timestamp)
|
|
1489
|
+
|
|
1490
|
+
# Get reference to the audited data
|
|
1491
|
+
audited_data = env.get(data_name)
|
|
1492
|
+
if audited_data is None:
|
|
1493
|
+
return EvaluationError(f"audit: identifier '{data_name}' not found")
|
|
1494
|
+
|
|
1495
|
+
# Create audit log entry as a Map object and record via security context
|
|
1496
|
+
audit_log_pairs = {
|
|
1497
|
+
"data_name": String(data_name),
|
|
1498
|
+
"action": String(action_type),
|
|
1499
|
+
"timestamp": String(timestamp),
|
|
1500
|
+
"data_type": String(type(audited_data).__name__),
|
|
1501
|
+
}
|
|
1502
|
+
|
|
1503
|
+
# Register to AuditLog via SecurityContext for persistence/inspection
|
|
1504
|
+
try:
|
|
1505
|
+
ctx = get_security_context()
|
|
1506
|
+
ctx.log_audit(data_name, action_type, type(audited_data).__name__, timestamp, {'source': 'audit_statement'})
|
|
1507
|
+
# Also emit a trail event so live traces can capture it
|
|
1508
|
+
ctx.emit_event('audit', {'data_name': data_name, 'action': action_type})
|
|
1509
|
+
except Exception:
|
|
1510
|
+
pass
|
|
1511
|
+
|
|
1512
|
+
return Map(audit_log_pairs)
|
|
1513
|
+
|
|
1514
|
+
def eval_restrict_statement(self, node, env, stack_trace):
|
|
1515
|
+
"""Evaluate restrict statement for field-level access control.
|
|
1516
|
+
|
|
1517
|
+
Syntax: restrict obj.field = "restriction_type";
|
|
1518
|
+
|
|
1519
|
+
Returns a restriction entry with the applied rule.
|
|
1520
|
+
"""
|
|
1521
|
+
from datetime import datetime, timezone
|
|
1522
|
+
from ..object import String, Map
|
|
1523
|
+
|
|
1524
|
+
# Get target field information
|
|
1525
|
+
if not isinstance(node.target, PropertyAccessExpression):
|
|
1526
|
+
return EvaluationError("restrict: target must be object.field")
|
|
1527
|
+
|
|
1528
|
+
obj_name = node.target.object.value if isinstance(node.target.object, Identifier) else str(node.target.object)
|
|
1529
|
+
field_name = node.target.property.value if isinstance(node.target.property, Identifier) else str(node.target.property)
|
|
1530
|
+
|
|
1531
|
+
# Get restriction type
|
|
1532
|
+
if isinstance(node.restriction_type, StringLiteral):
|
|
1533
|
+
restriction = node.restriction_type.value
|
|
1534
|
+
else:
|
|
1535
|
+
restriction = to_string(self.eval_node(node.restriction_type, env, stack_trace))
|
|
1536
|
+
|
|
1537
|
+
# Get the object to apply restriction
|
|
1538
|
+
obj = env.get(obj_name)
|
|
1539
|
+
if obj is None:
|
|
1540
|
+
return EvaluationError(f"restrict: object '{obj_name}' not found")
|
|
1541
|
+
|
|
1542
|
+
# Register restriction with security context so enforcement can consult it
|
|
1543
|
+
try:
|
|
1544
|
+
ctx = get_security_context()
|
|
1545
|
+
entry = ctx.register_restriction(f"{obj_name}.{field_name}", field_name, restriction)
|
|
1546
|
+
except Exception:
|
|
1547
|
+
entry = None
|
|
1548
|
+
|
|
1549
|
+
# Return restriction entry (include id if available)
|
|
1550
|
+
result_map = {
|
|
1551
|
+
"target": String(f"{obj_name}.{field_name}"),
|
|
1552
|
+
"field": String(field_name),
|
|
1553
|
+
"restriction": String(restriction),
|
|
1554
|
+
"status": String("applied"),
|
|
1555
|
+
"timestamp": String(datetime.now(timezone.utc).isoformat())
|
|
1556
|
+
}
|
|
1557
|
+
if entry and entry.get('id'):
|
|
1558
|
+
result_map['id'] = String(entry.get('id'))
|
|
1559
|
+
|
|
1560
|
+
return Map(result_map)
|
|
1561
|
+
|
|
1562
|
+
def eval_sandbox_statement(self, node, env, stack_trace):
|
|
1563
|
+
"""Evaluate sandbox statement for isolated execution environments.
|
|
1564
|
+
|
|
1565
|
+
Syntax: sandbox { code }
|
|
1566
|
+
|
|
1567
|
+
Creates a new isolated environment and executes code within it.
|
|
1568
|
+
"""
|
|
1569
|
+
|
|
1570
|
+
# Create isolated environment (child of current)
|
|
1571
|
+
sandbox_env = Environment(outer=env)
|
|
1572
|
+
# Mark as running inside a sandbox and attach a default policy name
|
|
1573
|
+
sandbox_env.set('__in_sandbox__', True)
|
|
1574
|
+
# Allow caller to specify a policy on the node (future enhancement)
|
|
1575
|
+
sandbox_policy = getattr(node, 'policy', None) or 'default'
|
|
1576
|
+
sandbox_env.set('__sandbox_policy__', sandbox_policy)
|
|
1577
|
+
# Ensure default sandbox policy exists
|
|
1578
|
+
try:
|
|
1579
|
+
sec = get_security_context()
|
|
1580
|
+
if 'default' not in sec.sandbox_policies:
|
|
1581
|
+
# conservative default: disallow file I/O builtins
|
|
1582
|
+
sec.register_sandbox_policy('default', allowed_builtins=[
|
|
1583
|
+
'now','timestamp','random','to_hex','from_hex','sqrt',
|
|
1584
|
+
'string','len','first','rest','push','reduce','map','filter',
|
|
1585
|
+
'debug_log','debug_trace'
|
|
1586
|
+
])
|
|
1587
|
+
except Exception:
|
|
1588
|
+
pass
|
|
1589
|
+
|
|
1590
|
+
# Execute body in sandbox
|
|
1591
|
+
if node.body is None:
|
|
1592
|
+
return NULL
|
|
1593
|
+
|
|
1594
|
+
result = self.eval_node(node.body, sandbox_env, stack_trace)
|
|
1595
|
+
|
|
1596
|
+
# Register sandbox run for observability
|
|
1597
|
+
try:
|
|
1598
|
+
ctx = get_security_context()
|
|
1599
|
+
# store a minimal summary (stringified result) for now
|
|
1600
|
+
result_summary = None
|
|
1601
|
+
try:
|
|
1602
|
+
result_summary = str(result)
|
|
1603
|
+
except Exception:
|
|
1604
|
+
result_summary = None
|
|
1605
|
+
ctx.register_sandbox_run(parent_context=getattr(env, 'name', None), policy=None, result_summary=result_summary)
|
|
1606
|
+
except Exception:
|
|
1607
|
+
pass
|
|
1608
|
+
|
|
1609
|
+
# Return result from sandbox execution
|
|
1610
|
+
return result if result is not None else NULL
|
|
1611
|
+
|
|
1612
|
+
def eval_trail_statement(self, node, env, stack_trace):
|
|
1613
|
+
"""Evaluate trail statement for real-time audit/debug/print tracking.
|
|
1614
|
+
|
|
1615
|
+
Syntax:
|
|
1616
|
+
trail audit; // follow all audit events
|
|
1617
|
+
trail print; // follow all print statements
|
|
1618
|
+
trail debug; // follow all debug output
|
|
1619
|
+
|
|
1620
|
+
Sets up event tracking and returns trail configuration.
|
|
1621
|
+
"""
|
|
1622
|
+
from datetime import datetime, timezone
|
|
1623
|
+
from ..object import String, Map
|
|
1624
|
+
|
|
1625
|
+
trail_type = node.trail_type
|
|
1626
|
+
filter_key = None
|
|
1627
|
+
|
|
1628
|
+
if isinstance(node.filter_key, StringLiteral):
|
|
1629
|
+
filter_key = node.filter_key.value
|
|
1630
|
+
elif node.filter_key:
|
|
1631
|
+
filter_result = self.eval_node(node.filter_key, env, stack_trace)
|
|
1632
|
+
if not is_error(filter_result):
|
|
1633
|
+
filter_key = to_string(filter_result)
|
|
1634
|
+
|
|
1635
|
+
# Register trail with security context so runtime can wire event sinks
|
|
1636
|
+
try:
|
|
1637
|
+
ctx = get_security_context()
|
|
1638
|
+
entry = ctx.register_trail(trail_type, filter_key)
|
|
1639
|
+
except Exception:
|
|
1640
|
+
entry = None
|
|
1641
|
+
|
|
1642
|
+
# Create trail configuration entry (include id if available)
|
|
1643
|
+
trail_config = {
|
|
1644
|
+
"type": String(trail_type),
|
|
1645
|
+
"filter": String(filter_key) if filter_key else String("*"),
|
|
1646
|
+
"enabled": String("true"),
|
|
1647
|
+
"timestamp": String(datetime.now(timezone.utc).isoformat())
|
|
1648
|
+
}
|
|
1649
|
+
if entry and entry.get('id'):
|
|
1650
|
+
trail_config['id'] = String(entry.get('id'))
|
|
1651
|
+
|
|
1652
|
+
return Map(trail_config)
|
|
1653
|
+
|
|
1654
|
+
def eval_tx_statement(self, node, env, stack_trace):
|
|
1655
|
+
"""Evaluate transaction block - executes statements in transactional context.
|
|
1656
|
+
|
|
1657
|
+
For now, this simply executes the block body.
|
|
1658
|
+
In a full blockchain implementation, this would:
|
|
1659
|
+
- Create a transaction context
|
|
1660
|
+
- Track state changes
|
|
1661
|
+
- Support rollback on failure
|
|
1662
|
+
- Emit transaction events
|
|
1663
|
+
"""
|
|
1664
|
+
debug_log("eval_tx_statement", "Executing transaction block")
|
|
1665
|
+
|
|
1666
|
+
# Execute the transaction body
|
|
1667
|
+
result = self.eval_block_statement(node.body, env, stack_trace)
|
|
1668
|
+
|
|
1669
|
+
# Return the result of the last statement in the block
|
|
1670
|
+
return result if result is not None else NULL
|
|
1671
|
+
|
|
1672
|
+
def eval_contract_statement(self, node, env, stack_trace):
|
|
1673
|
+
# Prepare initial storage values
|
|
1674
|
+
storage = {}
|
|
1675
|
+
for sv in node.storage_vars:
|
|
1676
|
+
init = NULL
|
|
1677
|
+
if getattr(sv, 'initial_value', None):
|
|
1678
|
+
init = self.eval_node(sv.initial_value, env, stack_trace)
|
|
1679
|
+
if is_error(init):
|
|
1680
|
+
return init
|
|
1681
|
+
storage[sv.name.value] = init
|
|
1682
|
+
|
|
1683
|
+
actions = {}
|
|
1684
|
+
for act in node.actions:
|
|
1685
|
+
# Evaluate action node to get Action object
|
|
1686
|
+
action_obj = Action(act.parameters, act.body, env)
|
|
1687
|
+
actions[act.name.value] = action_obj
|
|
1688
|
+
|
|
1689
|
+
# Pass the AST nodes as storage_vars, not the storage dict
|
|
1690
|
+
contract = SmartContract(node.name.value, node.storage_vars, actions)
|
|
1691
|
+
contract.deploy()
|
|
1692
|
+
|
|
1693
|
+
# Initialize storage with evaluated initial values
|
|
1694
|
+
for var_name, init_val in storage.items():
|
|
1695
|
+
contract.storage.set(var_name, init_val)
|
|
1696
|
+
|
|
1697
|
+
# Check if contract has a constructor and execute it
|
|
1698
|
+
if 'constructor' in actions:
|
|
1699
|
+
constructor = actions['constructor']
|
|
1700
|
+
# Create contract environment with storage access
|
|
1701
|
+
contract_env = Environment(outer=env)
|
|
1702
|
+
|
|
1703
|
+
# Set up TX context
|
|
1704
|
+
from ..object import Map, String, Integer
|
|
1705
|
+
tx_context = Map({
|
|
1706
|
+
String("caller"): String("system"), # Default caller
|
|
1707
|
+
String("timestamp"): Integer(int(__import__('time').time())),
|
|
1708
|
+
})
|
|
1709
|
+
contract_env.set("TX", tx_context)
|
|
1710
|
+
|
|
1711
|
+
# Pre-populate environment with storage variables so assignments update storage
|
|
1712
|
+
for storage_var in node.storage_vars:
|
|
1713
|
+
var_name = storage_var.name.value
|
|
1714
|
+
# Get initial value from storage (which was set during deploy)
|
|
1715
|
+
initial_val = contract.storage.get(var_name)
|
|
1716
|
+
if initial_val is not None:
|
|
1717
|
+
contract_env.set(var_name, initial_val)
|
|
1718
|
+
|
|
1719
|
+
# Execute constructor body
|
|
1720
|
+
result = self.eval_node(constructor.body, contract_env, stack_trace)
|
|
1721
|
+
if is_error(result):
|
|
1722
|
+
return result
|
|
1723
|
+
|
|
1724
|
+
# After constructor runs, update contract storage with any modified variables
|
|
1725
|
+
for storage_var in node.storage_vars:
|
|
1726
|
+
var_name = storage_var.name.value
|
|
1727
|
+
# Get the value from constructor environment
|
|
1728
|
+
val = contract_env.get(var_name)
|
|
1729
|
+
if val is not None:
|
|
1730
|
+
# Update persistent storage
|
|
1731
|
+
contract.storage.set(var_name, val)
|
|
1732
|
+
|
|
1733
|
+
env.set(node.name.value, contract)
|
|
1734
|
+
return NULL
|
|
1735
|
+
|
|
1736
|
+
def eval_entity_statement(self, node, env, stack_trace):
|
|
1737
|
+
props = {}
|
|
1738
|
+
methods = {}
|
|
1739
|
+
injected_deps = [] # Track which properties are injected dependencies
|
|
1740
|
+
|
|
1741
|
+
# Handle inheritance - get parent reference but DON'T merge properties yet
|
|
1742
|
+
parent_entity = None
|
|
1743
|
+
if node.parent:
|
|
1744
|
+
parent_entity = env.get(node.parent.value)
|
|
1745
|
+
if not parent_entity:
|
|
1746
|
+
return EvaluationError(f"Parent entity '{node.parent.value}' not found")
|
|
1747
|
+
# Check if it's a SecurityEntityDef (the actual entity class we use)
|
|
1748
|
+
from ..security import EntityDefinition as SecurityEntityDef
|
|
1749
|
+
if not isinstance(parent_entity, SecurityEntityDef):
|
|
1750
|
+
return EvaluationError(f"'{node.parent.value}' exists but is not an entity")
|
|
1751
|
+
# Note: We no longer copy parent properties here - they'll be accessed via parent_ref
|
|
1752
|
+
|
|
1753
|
+
for prop in node.properties:
|
|
1754
|
+
# Check if this is an injected dependency
|
|
1755
|
+
is_injected = getattr(prop, 'is_injected', False)
|
|
1756
|
+
|
|
1757
|
+
# Handle both dict and object formats
|
|
1758
|
+
if isinstance(prop, dict):
|
|
1759
|
+
p_name = prop['name']
|
|
1760
|
+
p_type = prop['type']
|
|
1761
|
+
else:
|
|
1762
|
+
p_name = prop.name.value
|
|
1763
|
+
p_type = prop.type.value if hasattr(prop.type, 'value') else str(prop.type)
|
|
1764
|
+
|
|
1765
|
+
if is_injected:
|
|
1766
|
+
# This is an injected dependency - mark for injection during instantiation
|
|
1767
|
+
injected_deps.append(p_name)
|
|
1768
|
+
props[p_name] = {"type": p_type, "default_value": NULL, "injected": True}
|
|
1769
|
+
debug_log("eval_entity_statement", f"Marked {p_name} as injected dependency")
|
|
1770
|
+
else:
|
|
1771
|
+
def_val = NULL
|
|
1772
|
+
|
|
1773
|
+
if isinstance(prop, dict):
|
|
1774
|
+
# For dict format, default_value is in the dict
|
|
1775
|
+
if 'default_value' in prop:
|
|
1776
|
+
def_val = self.eval_node(prop['default_value'], env, stack_trace)
|
|
1777
|
+
if is_error(def_val):
|
|
1778
|
+
return def_val
|
|
1779
|
+
else:
|
|
1780
|
+
# For object format, default_value is an attribute
|
|
1781
|
+
if getattr(prop, 'default_value', None):
|
|
1782
|
+
def_val = self.eval_node(prop.default_value, env, stack_trace)
|
|
1783
|
+
if is_error(def_val):
|
|
1784
|
+
return def_val
|
|
1785
|
+
|
|
1786
|
+
props[p_name] = {"type": p_type, "default_value": def_val}
|
|
1787
|
+
|
|
1788
|
+
# Process methods (actions defined inside the entity)
|
|
1789
|
+
if hasattr(node, 'methods') and node.methods:
|
|
1790
|
+
for method in node.methods:
|
|
1791
|
+
# Don't evaluate the action statement - that would just store it in env and return NULL
|
|
1792
|
+
# Instead, create the Action object directly
|
|
1793
|
+
from ..object import Action
|
|
1794
|
+
method_action = Action(method.parameters, method.body, env)
|
|
1795
|
+
|
|
1796
|
+
# Store the method by name
|
|
1797
|
+
method_name = method.name.value if hasattr(method, 'name') else str(method)
|
|
1798
|
+
methods[method_name] = method_action
|
|
1799
|
+
|
|
1800
|
+
# Create entity with methods and parent reference
|
|
1801
|
+
# Now parent_ref points to the actual parent, and props only contains THIS entity's properties
|
|
1802
|
+
# Import SecurityEntityDef first for isinstance check
|
|
1803
|
+
from ..security import EntityDefinition as SecurityEntityDef
|
|
1804
|
+
|
|
1805
|
+
parent_ref = parent_entity if (node.parent and isinstance(parent_entity, SecurityEntityDef)) else None
|
|
1806
|
+
|
|
1807
|
+
# Use the EntityDefinition from security.py which supports methods
|
|
1808
|
+
entity = SecurityEntityDef(node.name.value, props, methods, parent_ref)
|
|
1809
|
+
|
|
1810
|
+
# Store injected dependencies list for constructor use
|
|
1811
|
+
entity.injected_deps = injected_deps
|
|
1812
|
+
|
|
1813
|
+
env.set(node.name.value, entity)
|
|
1814
|
+
return NULL
|
|
1815
|
+
|
|
1816
|
+
def eval_verify_statement(self, node, env, stack_trace):
|
|
1817
|
+
"""Evaluate VERIFY statement - supports multiple forms including extended modes"""
|
|
1818
|
+
from .utils import is_truthy as check_truthy
|
|
1819
|
+
import os
|
|
1820
|
+
import re
|
|
1821
|
+
|
|
1822
|
+
# Handle extended verification modes
|
|
1823
|
+
if node.mode:
|
|
1824
|
+
return self._eval_verify_mode(node, env, stack_trace)
|
|
1825
|
+
|
|
1826
|
+
# Special case: verify { cond1, cond2, ... }, "message"
|
|
1827
|
+
# When condition is None but logic_block exists, the block contains the conditions
|
|
1828
|
+
if node.condition is None and node.logic_block is not None:
|
|
1829
|
+
# The logic_block contains expressions that should all be true
|
|
1830
|
+
from ..zexus_ast import BlockStatement, ExpressionStatement
|
|
1831
|
+
if isinstance(node.logic_block, BlockStatement):
|
|
1832
|
+
all_true = True
|
|
1833
|
+
for stmt in node.logic_block.statements:
|
|
1834
|
+
# Each statement should be an expression to evaluate
|
|
1835
|
+
if isinstance(stmt, ExpressionStatement):
|
|
1836
|
+
cond_val = self.eval_node(stmt.expression, env, stack_trace)
|
|
1837
|
+
else:
|
|
1838
|
+
cond_val = self.eval_node(stmt, env, stack_trace)
|
|
1839
|
+
|
|
1840
|
+
if is_error(cond_val):
|
|
1841
|
+
return cond_val
|
|
1842
|
+
|
|
1843
|
+
if not check_truthy(cond_val):
|
|
1844
|
+
all_true = False
|
|
1845
|
+
break
|
|
1846
|
+
|
|
1847
|
+
if not all_true:
|
|
1848
|
+
error_msg = "Verification failed"
|
|
1849
|
+
if node.message:
|
|
1850
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
1851
|
+
if not is_error(msg_val):
|
|
1852
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
1853
|
+
return EvaluationError(error_msg)
|
|
1854
|
+
|
|
1855
|
+
# All conditions passed
|
|
1856
|
+
from ..object import Boolean
|
|
1857
|
+
return Boolean(True)
|
|
1858
|
+
|
|
1859
|
+
# Simple assertion form: verify condition, "message"
|
|
1860
|
+
if node.condition is not None:
|
|
1861
|
+
condition_val = self.eval_node(node.condition, env, stack_trace)
|
|
1862
|
+
if is_error(condition_val):
|
|
1863
|
+
return condition_val
|
|
1864
|
+
|
|
1865
|
+
# Check if condition is truthy
|
|
1866
|
+
if not check_truthy(condition_val):
|
|
1867
|
+
error_msg = "Verification failed"
|
|
1868
|
+
if node.message:
|
|
1869
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
1870
|
+
if not is_error(msg_val):
|
|
1871
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
1872
|
+
|
|
1873
|
+
# Execute logic block if provided
|
|
1874
|
+
if node.logic_block:
|
|
1875
|
+
block_result = self.eval_node(node.logic_block, env, stack_trace)
|
|
1876
|
+
if is_error(block_result):
|
|
1877
|
+
return block_result
|
|
1878
|
+
|
|
1879
|
+
return EvaluationError(error_msg)
|
|
1880
|
+
|
|
1881
|
+
# Verification passed
|
|
1882
|
+
from ..object import Boolean
|
|
1883
|
+
return Boolean(True)
|
|
1884
|
+
|
|
1885
|
+
# Complex wrapper form: verify(target, [conditions...])
|
|
1886
|
+
if node.target is not None:
|
|
1887
|
+
target = self.eval_node(node.target, env, stack_trace)
|
|
1888
|
+
if is_error(target):
|
|
1889
|
+
return target
|
|
1890
|
+
|
|
1891
|
+
checks = []
|
|
1892
|
+
if node.conditions:
|
|
1893
|
+
for cond in node.conditions:
|
|
1894
|
+
val = self.eval_node(cond, env, stack_trace)
|
|
1895
|
+
if is_error(val):
|
|
1896
|
+
return val
|
|
1897
|
+
|
|
1898
|
+
if callable(val) or isinstance(val, Action):
|
|
1899
|
+
checks.append(VerificationCheck(str(cond), lambda ctx: val))
|
|
1900
|
+
else:
|
|
1901
|
+
checks.append(VerificationCheck(str(cond), lambda ctx, v=val: v))
|
|
1902
|
+
|
|
1903
|
+
wrapped = VerifyWrapper(target, checks, node.error_handler)
|
|
1904
|
+
get_security_context().register_verify_check(str(node.target), wrapped)
|
|
1905
|
+
return wrapped
|
|
1906
|
+
|
|
1907
|
+
# Neither form provided
|
|
1908
|
+
return EvaluationError("Invalid VERIFY statement: requires condition or target")
|
|
1909
|
+
|
|
1910
|
+
def _eval_verify_mode(self, node, env, stack_trace):
|
|
1911
|
+
"""Evaluate verify statement with specific mode (data, access, db, env, pattern)"""
|
|
1912
|
+
from .utils import is_truthy as check_truthy
|
|
1913
|
+
import os
|
|
1914
|
+
import re
|
|
1915
|
+
from ..object import Boolean, String
|
|
1916
|
+
|
|
1917
|
+
mode = node.mode
|
|
1918
|
+
|
|
1919
|
+
# verify:data - Data/format verification
|
|
1920
|
+
if mode == 'data':
|
|
1921
|
+
return self._eval_verify_data(node, env, stack_trace)
|
|
1922
|
+
|
|
1923
|
+
# verify:access - Access control with blocking
|
|
1924
|
+
elif mode == 'access':
|
|
1925
|
+
return self._eval_verify_access(node, env, stack_trace)
|
|
1926
|
+
|
|
1927
|
+
# verify:db - Database verification
|
|
1928
|
+
elif mode == 'db':
|
|
1929
|
+
return self._eval_verify_db(node, env, stack_trace)
|
|
1930
|
+
|
|
1931
|
+
# verify:env - Environment variable verification
|
|
1932
|
+
elif mode == 'env':
|
|
1933
|
+
return self._eval_verify_env(node, env, stack_trace)
|
|
1934
|
+
|
|
1935
|
+
# verify:pattern - Pattern matching
|
|
1936
|
+
elif mode == 'pattern':
|
|
1937
|
+
return self._eval_verify_pattern(node, env, stack_trace)
|
|
1938
|
+
|
|
1939
|
+
return EvaluationError(f"Unknown verification mode: {mode}")
|
|
1940
|
+
|
|
1941
|
+
def _eval_verify_data(self, node, env, stack_trace):
|
|
1942
|
+
"""Evaluate verify:data - data/format verification"""
|
|
1943
|
+
from .utils import is_truthy as check_truthy
|
|
1944
|
+
import re
|
|
1945
|
+
from ..object import Boolean, String
|
|
1946
|
+
|
|
1947
|
+
# Evaluate the value to verify
|
|
1948
|
+
value_val = self.eval_node(node.condition, env, stack_trace)
|
|
1949
|
+
if is_error(value_val):
|
|
1950
|
+
return value_val
|
|
1951
|
+
|
|
1952
|
+
value = value_val.value if hasattr(value_val, 'value') else str(value_val)
|
|
1953
|
+
verify_type = node.verify_type
|
|
1954
|
+
|
|
1955
|
+
# Evaluate pattern/expected value
|
|
1956
|
+
pattern_val = self.eval_node(node.pattern, env, stack_trace) if node.pattern else None
|
|
1957
|
+
if pattern_val and is_error(pattern_val):
|
|
1958
|
+
return pattern_val
|
|
1959
|
+
|
|
1960
|
+
pattern = pattern_val.value if pattern_val and hasattr(pattern_val, 'value') else str(pattern_val) if pattern_val else None
|
|
1961
|
+
|
|
1962
|
+
# Perform verification based on type
|
|
1963
|
+
is_valid = False
|
|
1964
|
+
|
|
1965
|
+
if verify_type == 'matches':
|
|
1966
|
+
# Pattern matching
|
|
1967
|
+
if pattern:
|
|
1968
|
+
try:
|
|
1969
|
+
is_valid = bool(re.match(pattern, str(value)))
|
|
1970
|
+
except (re.error, TypeError, ValueError):
|
|
1971
|
+
is_valid = False
|
|
1972
|
+
|
|
1973
|
+
elif verify_type == 'is_type' or verify_type == 'is':
|
|
1974
|
+
# Type checking
|
|
1975
|
+
type_map = {
|
|
1976
|
+
'string': str,
|
|
1977
|
+
'number': (int, float),
|
|
1978
|
+
'integer': int,
|
|
1979
|
+
'float': float,
|
|
1980
|
+
'boolean': bool,
|
|
1981
|
+
'bool': bool,
|
|
1982
|
+
'email': lambda v: '@' in str(v) and '.' in str(v),
|
|
1983
|
+
}
|
|
1984
|
+
if pattern in type_map:
|
|
1985
|
+
expected_type = type_map[pattern]
|
|
1986
|
+
if callable(expected_type):
|
|
1987
|
+
is_valid = expected_type(value)
|
|
1988
|
+
else:
|
|
1989
|
+
is_valid = isinstance(value, expected_type)
|
|
1990
|
+
|
|
1991
|
+
elif verify_type == 'equals':
|
|
1992
|
+
# Equality check
|
|
1993
|
+
is_valid = str(value) == str(pattern)
|
|
1994
|
+
|
|
1995
|
+
# Handle verification failure
|
|
1996
|
+
if not is_valid:
|
|
1997
|
+
error_msg = "Data verification failed"
|
|
1998
|
+
if node.message:
|
|
1999
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
2000
|
+
if not is_error(msg_val):
|
|
2001
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
2002
|
+
|
|
2003
|
+
# Execute logic block if provided
|
|
2004
|
+
if node.logic_block:
|
|
2005
|
+
block_result = self.eval_node(node.logic_block, env, stack_trace)
|
|
2006
|
+
if is_error(block_result):
|
|
2007
|
+
return block_result
|
|
2008
|
+
|
|
2009
|
+
return EvaluationError(error_msg)
|
|
2010
|
+
|
|
2011
|
+
return Boolean(True)
|
|
2012
|
+
|
|
2013
|
+
def _eval_verify_access(self, node, env, stack_trace):
|
|
2014
|
+
"""Evaluate verify:access - access control with blocking actions"""
|
|
2015
|
+
from .utils import is_truthy as check_truthy
|
|
2016
|
+
from ..object import Boolean
|
|
2017
|
+
|
|
2018
|
+
# Evaluate access condition
|
|
2019
|
+
condition_val = self.eval_node(node.condition, env, stack_trace)
|
|
2020
|
+
if is_error(condition_val):
|
|
2021
|
+
return condition_val
|
|
2022
|
+
|
|
2023
|
+
# Check if access should be granted
|
|
2024
|
+
if not check_truthy(condition_val):
|
|
2025
|
+
error_msg = "Access denied"
|
|
2026
|
+
if node.message:
|
|
2027
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
2028
|
+
if not is_error(msg_val):
|
|
2029
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
2030
|
+
|
|
2031
|
+
# Execute action block (blocking actions)
|
|
2032
|
+
if node.action_block:
|
|
2033
|
+
self.eval_node(node.action_block, env, stack_trace)
|
|
2034
|
+
# Don't return error from block - it's for logging/actions
|
|
2035
|
+
# The access denial itself is the error
|
|
2036
|
+
|
|
2037
|
+
# Block access by returning error
|
|
2038
|
+
return EvaluationError(error_msg)
|
|
2039
|
+
|
|
2040
|
+
return Boolean(True)
|
|
2041
|
+
|
|
2042
|
+
def _eval_verify_db(self, node, env, stack_trace):
|
|
2043
|
+
"""Evaluate verify:db - database verification"""
|
|
2044
|
+
from ..object import Boolean, String
|
|
2045
|
+
|
|
2046
|
+
# Evaluate value to check
|
|
2047
|
+
value_val = self.eval_node(node.condition, env, stack_trace)
|
|
2048
|
+
if is_error(value_val):
|
|
2049
|
+
return value_val
|
|
2050
|
+
|
|
2051
|
+
value = value_val.value if hasattr(value_val, 'value') else str(value_val)
|
|
2052
|
+
|
|
2053
|
+
# Evaluate table name
|
|
2054
|
+
table_val = self.eval_node(node.db_table, env, stack_trace) if node.db_table else None
|
|
2055
|
+
if table_val and is_error(table_val):
|
|
2056
|
+
return table_val
|
|
2057
|
+
|
|
2058
|
+
table = table_val.value if table_val and hasattr(table_val, 'value') else str(table_val) if table_val else None
|
|
2059
|
+
|
|
2060
|
+
# Get database query type
|
|
2061
|
+
query_type = node.db_query # exists_in, unique_in, matches_in
|
|
2062
|
+
|
|
2063
|
+
# Try to get database connection from environment
|
|
2064
|
+
# This allows users to inject their own database handlers
|
|
2065
|
+
db_handler = env.get('__db_handler__') if hasattr(env, 'get') else None
|
|
2066
|
+
|
|
2067
|
+
is_valid = False
|
|
2068
|
+
|
|
2069
|
+
if db_handler and hasattr(db_handler, query_type):
|
|
2070
|
+
# Use custom database handler
|
|
2071
|
+
try:
|
|
2072
|
+
result = getattr(db_handler, query_type)(table, value)
|
|
2073
|
+
is_valid = bool(result)
|
|
2074
|
+
except Exception as e:
|
|
2075
|
+
return EvaluationError(f"Database verification error: {str(e)}")
|
|
2076
|
+
else:
|
|
2077
|
+
# Fallback: Check if persistence module is available
|
|
2078
|
+
try:
|
|
2079
|
+
from ..persistence import get_storage_backend
|
|
2080
|
+
storage = get_storage_backend()
|
|
2081
|
+
|
|
2082
|
+
if query_type == 'exists_in':
|
|
2083
|
+
# Check if value exists
|
|
2084
|
+
key = f"{table}:{value}"
|
|
2085
|
+
result = storage.get(key)
|
|
2086
|
+
is_valid = result is not None
|
|
2087
|
+
|
|
2088
|
+
elif query_type == 'unique_in':
|
|
2089
|
+
# Check if value is unique (doesn't exist)
|
|
2090
|
+
key = f"{table}:{value}"
|
|
2091
|
+
result = storage.get(key)
|
|
2092
|
+
is_valid = result is None
|
|
2093
|
+
|
|
2094
|
+
elif query_type == 'matches_in':
|
|
2095
|
+
# Custom query - requires db_handler
|
|
2096
|
+
return EvaluationError(f"Database query '{query_type}' requires custom db_handler")
|
|
2097
|
+
|
|
2098
|
+
except Exception as e:
|
|
2099
|
+
# No database available - treat as verification failure
|
|
2100
|
+
is_valid = False
|
|
2101
|
+
|
|
2102
|
+
# Handle verification failure
|
|
2103
|
+
if not is_valid:
|
|
2104
|
+
error_msg = "Database verification failed"
|
|
2105
|
+
if node.message:
|
|
2106
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
2107
|
+
if not is_error(msg_val):
|
|
2108
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
2109
|
+
|
|
2110
|
+
# Execute logic block if provided
|
|
2111
|
+
if node.logic_block:
|
|
2112
|
+
block_result = self.eval_node(node.logic_block, env, stack_trace)
|
|
2113
|
+
if is_error(block_result):
|
|
2114
|
+
return block_result
|
|
2115
|
+
|
|
2116
|
+
return EvaluationError(error_msg)
|
|
2117
|
+
|
|
2118
|
+
return Boolean(True)
|
|
2119
|
+
|
|
2120
|
+
def _eval_verify_env(self, node, env, stack_trace):
|
|
2121
|
+
"""Evaluate verify:env - environment variable verification"""
|
|
2122
|
+
import os
|
|
2123
|
+
from ..object import Boolean, String
|
|
2124
|
+
|
|
2125
|
+
# Evaluate env var name
|
|
2126
|
+
var_val = self.eval_node(node.env_var, env, stack_trace)
|
|
2127
|
+
if is_error(var_val):
|
|
2128
|
+
return var_val
|
|
2129
|
+
|
|
2130
|
+
var_name = var_val.value if hasattr(var_val, 'value') else str(var_val)
|
|
2131
|
+
verify_type = node.verify_type or 'is_set'
|
|
2132
|
+
|
|
2133
|
+
# Get environment variable value
|
|
2134
|
+
env_value = os.environ.get(var_name)
|
|
2135
|
+
|
|
2136
|
+
is_valid = False
|
|
2137
|
+
|
|
2138
|
+
if verify_type == 'is_set' or verify_type == 'exists':
|
|
2139
|
+
# Check if env var is set
|
|
2140
|
+
is_valid = env_value is not None
|
|
2141
|
+
|
|
2142
|
+
elif verify_type == 'equals':
|
|
2143
|
+
# Check if env var equals expected value
|
|
2144
|
+
expected_val = self.eval_node(node.expected_value, env, stack_trace) if node.expected_value else None
|
|
2145
|
+
if expected_val and is_error(expected_val):
|
|
2146
|
+
return expected_val
|
|
2147
|
+
|
|
2148
|
+
expected = expected_val.value if expected_val and hasattr(expected_val, 'value') else str(expected_val) if expected_val else None
|
|
2149
|
+
is_valid = env_value == expected
|
|
2150
|
+
|
|
2151
|
+
elif verify_type == 'matches':
|
|
2152
|
+
# Pattern matching on env var value
|
|
2153
|
+
import re
|
|
2154
|
+
pattern_val = self.eval_node(node.expected_value, env, stack_trace) if node.expected_value else None
|
|
2155
|
+
if pattern_val and is_error(pattern_val):
|
|
2156
|
+
return pattern_val
|
|
2157
|
+
|
|
2158
|
+
pattern = pattern_val.value if pattern_val and hasattr(pattern_val, 'value') else str(pattern_val) if pattern_val else None
|
|
2159
|
+
|
|
2160
|
+
if env_value and pattern:
|
|
2161
|
+
try:
|
|
2162
|
+
is_valid = bool(re.match(pattern, env_value))
|
|
2163
|
+
except (re.error, TypeError, ValueError):
|
|
2164
|
+
is_valid = False
|
|
2165
|
+
|
|
2166
|
+
# Handle verification failure
|
|
2167
|
+
if not is_valid:
|
|
2168
|
+
error_msg = f"Environment variable verification failed: {var_name}"
|
|
2169
|
+
if node.message:
|
|
2170
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
2171
|
+
if not is_error(msg_val):
|
|
2172
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
2173
|
+
|
|
2174
|
+
# Execute logic block if provided
|
|
2175
|
+
if node.logic_block:
|
|
2176
|
+
block_result = self.eval_node(node.logic_block, env, stack_trace)
|
|
2177
|
+
if is_error(block_result):
|
|
2178
|
+
return block_result
|
|
2179
|
+
|
|
2180
|
+
return EvaluationError(error_msg)
|
|
2181
|
+
|
|
2182
|
+
return Boolean(True)
|
|
2183
|
+
|
|
2184
|
+
def _eval_verify_pattern(self, node, env, stack_trace):
|
|
2185
|
+
"""Evaluate verify:pattern - pattern matching verification"""
|
|
2186
|
+
import re
|
|
2187
|
+
from ..object import Boolean
|
|
2188
|
+
|
|
2189
|
+
# Evaluate value to match
|
|
2190
|
+
value_val = self.eval_node(node.condition, env, stack_trace)
|
|
2191
|
+
if is_error(value_val):
|
|
2192
|
+
return value_val
|
|
2193
|
+
|
|
2194
|
+
value = value_val.value if hasattr(value_val, 'value') else str(value_val)
|
|
2195
|
+
|
|
2196
|
+
# Evaluate pattern
|
|
2197
|
+
pattern_val = self.eval_node(node.pattern, env, stack_trace) if node.pattern else None
|
|
2198
|
+
if pattern_val and is_error(pattern_val):
|
|
2199
|
+
return pattern_val
|
|
2200
|
+
|
|
2201
|
+
pattern = pattern_val.value if pattern_val and hasattr(pattern_val, 'value') else str(pattern_val) if pattern_val else None
|
|
2202
|
+
|
|
2203
|
+
# Perform pattern matching
|
|
2204
|
+
is_valid = False
|
|
2205
|
+
if pattern:
|
|
2206
|
+
try:
|
|
2207
|
+
is_valid = bool(re.match(pattern, str(value)))
|
|
2208
|
+
except Exception as e:
|
|
2209
|
+
return EvaluationError(f"Pattern matching error: {str(e)}")
|
|
2210
|
+
|
|
2211
|
+
# Handle verification failure
|
|
2212
|
+
if not is_valid:
|
|
2213
|
+
error_msg = "Pattern verification failed"
|
|
2214
|
+
if node.message:
|
|
2215
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
2216
|
+
if not is_error(msg_val):
|
|
2217
|
+
error_msg = str(msg_val.value if hasattr(msg_val, 'value') else msg_val)
|
|
2218
|
+
|
|
2219
|
+
# Execute logic block if provided
|
|
2220
|
+
if node.logic_block:
|
|
2221
|
+
block_result = self.eval_node(node.logic_block, env, stack_trace)
|
|
2222
|
+
if is_error(block_result):
|
|
2223
|
+
return block_result
|
|
2224
|
+
|
|
2225
|
+
return EvaluationError(error_msg)
|
|
2226
|
+
|
|
2227
|
+
return Boolean(True)
|
|
2228
|
+
|
|
2229
|
+
def eval_protect_statement(self, node, env, stack_trace):
|
|
2230
|
+
"""Evaluate PROTECT statement with full policy engine integration."""
|
|
2231
|
+
from ..policy_engine import get_policy_registry, PolicyBuilder
|
|
2232
|
+
from ..object import String as StringObj
|
|
2233
|
+
|
|
2234
|
+
# Evaluate target expression
|
|
2235
|
+
target = self.eval_node(node.target, env, stack_trace)
|
|
2236
|
+
if is_error(target):
|
|
2237
|
+
return target
|
|
2238
|
+
|
|
2239
|
+
# Get target name (for registration)
|
|
2240
|
+
target_name = str(node.target.value) if hasattr(node.target, 'value') else str(target)
|
|
2241
|
+
|
|
2242
|
+
# Evaluate rules - could be a Map literal or BlockStatement
|
|
2243
|
+
rules_val = self.eval_node(node.rules, env, stack_trace)
|
|
2244
|
+
if is_error(rules_val):
|
|
2245
|
+
return rules_val
|
|
2246
|
+
|
|
2247
|
+
# Convert rules to dictionary
|
|
2248
|
+
rules_dict = {}
|
|
2249
|
+
if isinstance(rules_val, Map):
|
|
2250
|
+
# Direct map literal: {rate_limit: 10, auth_required: true, ...}
|
|
2251
|
+
for k, v in rules_val.pairs.items():
|
|
2252
|
+
key = k.value if isinstance(k, String) else str(k)
|
|
2253
|
+
# Convert Zexus objects to Python values
|
|
2254
|
+
if isinstance(v, Integer):
|
|
2255
|
+
rules_dict[key] = v.value
|
|
2256
|
+
elif isinstance(v, String):
|
|
2257
|
+
rules_dict[key] = v.value
|
|
2258
|
+
elif isinstance(v, (TRUE.__class__, FALSE.__class__)):
|
|
2259
|
+
rules_dict[key] = v == TRUE
|
|
2260
|
+
elif isinstance(v, List):
|
|
2261
|
+
rules_dict[key] = [item.value if hasattr(item, 'value') else item for item in v.elements]
|
|
2262
|
+
else:
|
|
2263
|
+
rules_dict[key] = v
|
|
2264
|
+
elif hasattr(rules_val, 'statements'):
|
|
2265
|
+
# Block statement (old style)
|
|
2266
|
+
for stmt in rules_val.statements:
|
|
2267
|
+
# Handle statement-based rules
|
|
2268
|
+
pass
|
|
2269
|
+
|
|
2270
|
+
# Determine enforcement level
|
|
2271
|
+
enforcement_level = "strict" # Default
|
|
2272
|
+
if hasattr(node, 'enforcement_level') and node.enforcement_level:
|
|
2273
|
+
enforcement_level = node.enforcement_level.lower()
|
|
2274
|
+
|
|
2275
|
+
# Build policy using PolicyBuilder
|
|
2276
|
+
builder = PolicyBuilder(target_name)
|
|
2277
|
+
builder.set_enforcement(enforcement_level)
|
|
2278
|
+
|
|
2279
|
+
# Parse rules and add to policy
|
|
2280
|
+
for rule_type, rule_config in rules_dict.items():
|
|
2281
|
+
if rule_type == "verify":
|
|
2282
|
+
# Add verification rules
|
|
2283
|
+
if isinstance(rule_config, list):
|
|
2284
|
+
for condition in rule_config:
|
|
2285
|
+
builder.add_verify_rule(str(condition))
|
|
2286
|
+
|
|
2287
|
+
elif rule_type == "restrict":
|
|
2288
|
+
# Add restriction rules
|
|
2289
|
+
if isinstance(rule_config, dict):
|
|
2290
|
+
for field, constraints in rule_config.items():
|
|
2291
|
+
builder.add_restrict_rule(field, constraints if isinstance(constraints, list) else [constraints])
|
|
2292
|
+
|
|
2293
|
+
elif rule_type in ("audit", "log_access") and rule_config:
|
|
2294
|
+
# Enable audit logging
|
|
2295
|
+
builder.enable_audit()
|
|
2296
|
+
|
|
2297
|
+
# Build and register policy
|
|
2298
|
+
policy = builder.build()
|
|
2299
|
+
policy_registry = get_policy_registry()
|
|
2300
|
+
policy_registry.register_policy(target_name, policy)
|
|
2301
|
+
|
|
2302
|
+
debug_log("eval_protect_statement", f"✓ Policy registered for {target_name} (level: {enforcement_level})")
|
|
2303
|
+
|
|
2304
|
+
# Store policy reference in environment for enforcement
|
|
2305
|
+
env.set(f"__policy_{target_name}__", policy)
|
|
2306
|
+
|
|
2307
|
+
# Also register with legacy security context for backwards compatibility
|
|
2308
|
+
try:
|
|
2309
|
+
from ..security import ProtectionPolicy, get_security_context
|
|
2310
|
+
policy_legacy = ProtectionPolicy(target_name, rules_dict, enforcement_level)
|
|
2311
|
+
get_security_context().register_protection(target_name, policy_legacy)
|
|
2312
|
+
except (AttributeError, NameError, ImportError):
|
|
2313
|
+
pass # Legacy context may not be available
|
|
2314
|
+
|
|
2315
|
+
return StringObj(f"Protection policy activated for '{target_name}' (level: {enforcement_level})")
|
|
2316
|
+
|
|
2317
|
+
def eval_middleware_statement(self, node, env, stack_trace):
|
|
2318
|
+
handler = self.eval_node(node.handler, env)
|
|
2319
|
+
if is_error(handler):
|
|
2320
|
+
return handler
|
|
2321
|
+
|
|
2322
|
+
mw = Middleware(node.name.value, handler)
|
|
2323
|
+
get_security_context().middlewares[node.name.value] = mw
|
|
2324
|
+
return NULL
|
|
2325
|
+
|
|
2326
|
+
def eval_auth_statement(self, node, env, stack_trace):
|
|
2327
|
+
config = self.eval_node(node.config, env)
|
|
2328
|
+
if is_error(config):
|
|
2329
|
+
return config
|
|
2330
|
+
|
|
2331
|
+
c_dict = {}
|
|
2332
|
+
if isinstance(config, Map):
|
|
2333
|
+
for k, v in config.pairs.items():
|
|
2334
|
+
c_dict[k.value if isinstance(k, String) else str(k)] = v
|
|
2335
|
+
|
|
2336
|
+
get_security_context().auth_config = AuthConfig(c_dict)
|
|
2337
|
+
return NULL
|
|
2338
|
+
|
|
2339
|
+
def eval_throttle_statement(self, node, env, stack_trace):
|
|
2340
|
+
limits = self.eval_node(node.limits, env)
|
|
2341
|
+
|
|
2342
|
+
rpm, burst, per_user = 100, 10, False
|
|
2343
|
+
if isinstance(limits, Map):
|
|
2344
|
+
for k, v in limits.pairs.items():
|
|
2345
|
+
ks = k.value if isinstance(k, String) else str(k)
|
|
2346
|
+
if ks == "requests_per_minute" and isinstance(v, Integer):
|
|
2347
|
+
rpm = v.value
|
|
2348
|
+
elif ks == "burst_size" and isinstance(v, Integer):
|
|
2349
|
+
burst = v.value
|
|
2350
|
+
elif ks == "per_user":
|
|
2351
|
+
per_user = True if (isinstance(v, Boolean) and v.value) else False
|
|
2352
|
+
|
|
2353
|
+
limiter = RateLimiter(rpm, burst, per_user)
|
|
2354
|
+
ctx = get_security_context()
|
|
2355
|
+
if not hasattr(ctx, 'rate_limiters'):
|
|
2356
|
+
ctx.rate_limiters = {}
|
|
2357
|
+
ctx.rate_limiters[str(node.target)] = limiter
|
|
2358
|
+
return NULL
|
|
2359
|
+
|
|
2360
|
+
def eval_cache_statement(self, node, env, stack_trace):
|
|
2361
|
+
policy = self.eval_node(node.policy, env)
|
|
2362
|
+
|
|
2363
|
+
ttl, inv = 3600, []
|
|
2364
|
+
if isinstance(policy, Map):
|
|
2365
|
+
for k, v in policy.pairs.items():
|
|
2366
|
+
ks = k.value if isinstance(k, String) else str(k)
|
|
2367
|
+
if ks == "ttl" and isinstance(v, Integer):
|
|
2368
|
+
ttl = v.value
|
|
2369
|
+
elif ks == "invalidate_on" and isinstance(v, List):
|
|
2370
|
+
inv = [x.value if hasattr(x, 'value') else str(x) for x in v.elements]
|
|
2371
|
+
|
|
2372
|
+
cp = CachePolicy(ttl, inv)
|
|
2373
|
+
ctx = get_security_context()
|
|
2374
|
+
if not hasattr(ctx, 'cache_policies'):
|
|
2375
|
+
ctx.cache_policies = {}
|
|
2376
|
+
ctx.cache_policies[str(node.target)] = cp
|
|
2377
|
+
return NULL
|
|
2378
|
+
|
|
2379
|
+
|
|
2380
|
+
|
|
2381
|
+
# === MISC STATEMENTS ===
|
|
2382
|
+
|
|
2383
|
+
def eval_print_statement(self, node, env, stack_trace):
|
|
2384
|
+
# Check if conditional print
|
|
2385
|
+
if hasattr(node, 'condition') and node.condition is not None:
|
|
2386
|
+
# Evaluate the condition
|
|
2387
|
+
condition_val = self.eval_node(node.condition, env, stack_trace)
|
|
2388
|
+
if is_error(condition_val):
|
|
2389
|
+
print(f"❌ Error in print condition: {condition_val}", file=sys.stderr)
|
|
2390
|
+
return NULL
|
|
2391
|
+
|
|
2392
|
+
# Check if condition is truthy
|
|
2393
|
+
is_truthy = False
|
|
2394
|
+
if hasattr(condition_val, 'value'):
|
|
2395
|
+
# Boolean, Integer, etc.
|
|
2396
|
+
if isinstance(condition_val.value, bool):
|
|
2397
|
+
is_truthy = condition_val.value
|
|
2398
|
+
elif isinstance(condition_val.value, (int, float)):
|
|
2399
|
+
is_truthy = condition_val.value != 0
|
|
2400
|
+
elif isinstance(condition_val.value, str):
|
|
2401
|
+
is_truthy = len(condition_val.value) > 0
|
|
2402
|
+
else:
|
|
2403
|
+
is_truthy = bool(condition_val.value)
|
|
2404
|
+
else:
|
|
2405
|
+
# For objects without .value, check if it's NULL
|
|
2406
|
+
is_truthy = not (hasattr(condition_val, 'type') and condition_val.type == 'NULL')
|
|
2407
|
+
|
|
2408
|
+
# If condition is false, don't print
|
|
2409
|
+
if not is_truthy:
|
|
2410
|
+
return NULL
|
|
2411
|
+
|
|
2412
|
+
# Handle both legacy single value and new multiple values
|
|
2413
|
+
values_to_print = []
|
|
2414
|
+
|
|
2415
|
+
if hasattr(node, 'values') and node.values:
|
|
2416
|
+
# New format with multiple values
|
|
2417
|
+
for expr in node.values:
|
|
2418
|
+
val = self.eval_node(expr, env, stack_trace)
|
|
2419
|
+
if is_error(val):
|
|
2420
|
+
print(f"❌ Error: {val}", file=sys.stderr)
|
|
2421
|
+
return NULL
|
|
2422
|
+
values_to_print.append(val)
|
|
2423
|
+
elif hasattr(node, 'value') and node.value is not None:
|
|
2424
|
+
# Legacy single value format
|
|
2425
|
+
val = self.eval_node(node.value, env, stack_trace)
|
|
2426
|
+
if is_error(val):
|
|
2427
|
+
print(f"❌ Error: {val}", file=sys.stderr)
|
|
2428
|
+
return NULL
|
|
2429
|
+
values_to_print.append(val)
|
|
2430
|
+
else:
|
|
2431
|
+
return NULL
|
|
2432
|
+
|
|
2433
|
+
# Convert all values to strings and join with space
|
|
2434
|
+
output_parts = []
|
|
2435
|
+
for v in values_to_print:
|
|
2436
|
+
part = v.inspect() if hasattr(v, 'inspect') else str(v)
|
|
2437
|
+
output_parts.append(part)
|
|
2438
|
+
|
|
2439
|
+
output = ' '.join(output_parts)
|
|
2440
|
+
print(output, flush=True) # Flush immediately for async threads
|
|
2441
|
+
|
|
2442
|
+
try:
|
|
2443
|
+
ctx = get_security_context()
|
|
2444
|
+
ctx.emit_event('print', {'value': output})
|
|
2445
|
+
except Exception:
|
|
2446
|
+
pass
|
|
2447
|
+
|
|
2448
|
+
return NULL
|
|
2449
|
+
|
|
2450
|
+
def eval_screen_statement(self, node, env, stack_trace):
|
|
2451
|
+
print(f"[RENDER] Screen: {node.name.value}")
|
|
2452
|
+
return NULL
|
|
2453
|
+
|
|
2454
|
+
def eval_embedded_code_statement(self, node, env, stack_trace):
|
|
2455
|
+
obj = EmbeddedCode(node.name.value, node.language, node.code)
|
|
2456
|
+
env.set(node.name.value, obj)
|
|
2457
|
+
return NULL
|
|
2458
|
+
|
|
2459
|
+
def eval_component_statement(self, node, env, stack_trace):
|
|
2460
|
+
props = None
|
|
2461
|
+
if hasattr(node, 'properties') and node.properties:
|
|
2462
|
+
val = self.eval_node(node.properties, env, stack_trace)
|
|
2463
|
+
if is_error(val):
|
|
2464
|
+
return val
|
|
2465
|
+
props = _zexus_to_python(val)
|
|
2466
|
+
|
|
2467
|
+
# Check builtin
|
|
2468
|
+
if hasattr(self, 'builtins') and 'define_component' in self.builtins:
|
|
2469
|
+
self.builtins['define_component'].fn(String(node.name.value), Map(props) if isinstance(props, dict) else NULL)
|
|
2470
|
+
return NULL
|
|
2471
|
+
|
|
2472
|
+
env.set(node.name.value, String(f"<component {node.name.value}>"))
|
|
2473
|
+
return NULL
|
|
2474
|
+
|
|
2475
|
+
def eval_theme_statement(self, node, env, stack_trace):
|
|
2476
|
+
val = self.eval_node(node.properties, env, stack_trace) if hasattr(node, 'properties') else NULL
|
|
2477
|
+
if is_error(val):
|
|
2478
|
+
return val
|
|
2479
|
+
env.set(node.name.value, val)
|
|
2480
|
+
return NULL
|
|
2481
|
+
|
|
2482
|
+
def eval_debug_statement(self, node, env, stack_trace):
|
|
2483
|
+
# Check if conditional debug
|
|
2484
|
+
if hasattr(node, 'condition') and node.condition is not None:
|
|
2485
|
+
# Evaluate the condition
|
|
2486
|
+
condition_val = self.eval_node(node.condition, env, stack_trace)
|
|
2487
|
+
if is_error(condition_val):
|
|
2488
|
+
return condition_val
|
|
2489
|
+
|
|
2490
|
+
# Check if condition is truthy
|
|
2491
|
+
is_truthy = False
|
|
2492
|
+
if hasattr(condition_val, 'value'):
|
|
2493
|
+
# Boolean, Integer, etc.
|
|
2494
|
+
if isinstance(condition_val.value, bool):
|
|
2495
|
+
is_truthy = condition_val.value
|
|
2496
|
+
elif isinstance(condition_val.value, (int, float)):
|
|
2497
|
+
is_truthy = condition_val.value != 0
|
|
2498
|
+
elif isinstance(condition_val.value, str):
|
|
2499
|
+
is_truthy = len(condition_val.value) > 0
|
|
2500
|
+
else:
|
|
2501
|
+
is_truthy = bool(condition_val.value)
|
|
2502
|
+
else:
|
|
2503
|
+
# For objects without .value, check if it's NULL
|
|
2504
|
+
is_truthy = not (hasattr(condition_val, 'type') and condition_val.type == 'NULL')
|
|
2505
|
+
|
|
2506
|
+
# If condition is false, don't debug
|
|
2507
|
+
if not is_truthy:
|
|
2508
|
+
return NULL
|
|
2509
|
+
|
|
2510
|
+
val = self.eval_node(node.value, env, stack_trace)
|
|
2511
|
+
if is_error(val):
|
|
2512
|
+
return val
|
|
2513
|
+
|
|
2514
|
+
from ..object import Debug, String, Integer, Float, Boolean
|
|
2515
|
+
# Convert to human-readable string
|
|
2516
|
+
if isinstance(val, String):
|
|
2517
|
+
message = val.value
|
|
2518
|
+
elif isinstance(val, (Integer, Float)):
|
|
2519
|
+
message = str(val.value)
|
|
2520
|
+
elif isinstance(val, Boolean):
|
|
2521
|
+
message = "true" if val.value else "false"
|
|
2522
|
+
else:
|
|
2523
|
+
message = val.inspect() if hasattr(val, 'inspect') else str(val)
|
|
2524
|
+
|
|
2525
|
+
Debug.log(message)
|
|
2526
|
+
try:
|
|
2527
|
+
ctx = get_security_context()
|
|
2528
|
+
ctx.emit_event('debug', {'value': message})
|
|
2529
|
+
except Exception:
|
|
2530
|
+
pass
|
|
2531
|
+
return NULL
|
|
2532
|
+
|
|
2533
|
+
def eval_external_declaration(self, node, env, stack_trace):
|
|
2534
|
+
def _placeholder(*a):
|
|
2535
|
+
return EvaluationError(f"External '{node.name.value}' not linked")
|
|
2536
|
+
|
|
2537
|
+
env.set(node.name.value, Builtin(_placeholder, node.name.value))
|
|
2538
|
+
return NULL
|
|
2539
|
+
|
|
2540
|
+
def eval_exactly_statement(self, node, env, stack_trace):
|
|
2541
|
+
return self.eval_node(node.body, env, stack_trace)
|
|
2542
|
+
|
|
2543
|
+
def eval_action_statement(self, node, env, stack_trace):
|
|
2544
|
+
action = Action(node.parameters, node.body, env)
|
|
2545
|
+
|
|
2546
|
+
# Check for direct is_async attribute (from UltimateParser)
|
|
2547
|
+
if hasattr(node, 'is_async') and node.is_async:
|
|
2548
|
+
action.is_async = True
|
|
2549
|
+
|
|
2550
|
+
# Apply modifiers if present (from standard parser)
|
|
2551
|
+
modifiers = getattr(node, 'modifiers', [])
|
|
2552
|
+
if modifiers:
|
|
2553
|
+
# Set modifier flags on the action object
|
|
2554
|
+
if 'inline' in modifiers:
|
|
2555
|
+
action.is_inlined = True
|
|
2556
|
+
if 'async' in modifiers:
|
|
2557
|
+
action.is_async = True
|
|
2558
|
+
if 'secure' in modifiers:
|
|
2559
|
+
action.is_secure = True
|
|
2560
|
+
if 'pure' in modifiers:
|
|
2561
|
+
action.is_pure = True
|
|
2562
|
+
if 'native' in modifiers:
|
|
2563
|
+
action.is_native = True
|
|
2564
|
+
|
|
2565
|
+
# 'public' modifier: automatically export the action
|
|
2566
|
+
if 'public' in modifiers:
|
|
2567
|
+
try:
|
|
2568
|
+
env.export(node.name.value, action)
|
|
2569
|
+
except Exception:
|
|
2570
|
+
pass
|
|
2571
|
+
|
|
2572
|
+
env.set(node.name.value, action)
|
|
2573
|
+
return NULL
|
|
2574
|
+
|
|
2575
|
+
def eval_function_statement(self, node, env, stack_trace):
|
|
2576
|
+
"""Evaluate function statement - identical to action statement in Zexus"""
|
|
2577
|
+
print(f"[EVAL_FUNC] Starting eval_function_statement for: {node.name.value}", flush=True)
|
|
2578
|
+
action = Action(node.parameters, node.body, env)
|
|
2579
|
+
print(f"[EVAL_FUNC] Created Action object", flush=True)
|
|
2580
|
+
|
|
2581
|
+
# Apply modifiers if present
|
|
2582
|
+
modifiers = getattr(node, 'modifiers', [])
|
|
2583
|
+
print(f"[EVAL_FUNC] Modifiers: {modifiers}", flush=True)
|
|
2584
|
+
if modifiers:
|
|
2585
|
+
# Set modifier flags on the action object
|
|
2586
|
+
if 'inline' in modifiers:
|
|
2587
|
+
action.is_inlined = True
|
|
2588
|
+
if 'async' in modifiers:
|
|
2589
|
+
action.is_async = True
|
|
2590
|
+
print(f"[EVAL_FUNC] Set is_async=True", flush=True)
|
|
2591
|
+
if 'secure' in modifiers:
|
|
2592
|
+
action.is_secure = True
|
|
2593
|
+
if 'pure' in modifiers:
|
|
2594
|
+
action.is_pure = True
|
|
2595
|
+
if 'native' in modifiers:
|
|
2596
|
+
action.is_native = True
|
|
2597
|
+
|
|
2598
|
+
# 'public' modifier: automatically export the function
|
|
2599
|
+
if 'public' in modifiers:
|
|
2600
|
+
try:
|
|
2601
|
+
env.export(node.name.value, action)
|
|
2602
|
+
except Exception:
|
|
2603
|
+
pass
|
|
2604
|
+
|
|
2605
|
+
print(f"[EVAL_FUNC] About to set in environment: {node.name.value}", flush=True)
|
|
2606
|
+
env.set(node.name.value, action)
|
|
2607
|
+
print(f"[EVAL_FUNC] Successfully set in environment", flush=True)
|
|
2608
|
+
return NULL
|
|
2609
|
+
|
|
2610
|
+
# === PERFORMANCE OPTIMIZATION STATEMENTS ===
|
|
2611
|
+
|
|
2612
|
+
def eval_native_statement(self, node, env, stack_trace):
|
|
2613
|
+
"""Evaluate native statement - call C/C++ code directly."""
|
|
2614
|
+
try:
|
|
2615
|
+
import ctypes
|
|
2616
|
+
|
|
2617
|
+
# Load the shared library
|
|
2618
|
+
try:
|
|
2619
|
+
lib = ctypes.CDLL(node.library_name)
|
|
2620
|
+
except (OSError, AttributeError) as e:
|
|
2621
|
+
return EvaluationError(f"Failed to load native library '{node.library_name}': {str(e)}")
|
|
2622
|
+
|
|
2623
|
+
# Get the function from the library
|
|
2624
|
+
try:
|
|
2625
|
+
native_func = getattr(lib, node.function_name)
|
|
2626
|
+
except AttributeError:
|
|
2627
|
+
return EvaluationError(f"Function '{node.function_name}' not found in library '{node.library_name}'")
|
|
2628
|
+
|
|
2629
|
+
# Evaluate arguments
|
|
2630
|
+
args = []
|
|
2631
|
+
for arg in node.args:
|
|
2632
|
+
val = self.eval_node(arg, env, stack_trace)
|
|
2633
|
+
if is_error(val):
|
|
2634
|
+
return val
|
|
2635
|
+
# Convert Zexus objects to Python types for FFI
|
|
2636
|
+
args.append(_zexus_to_python(val))
|
|
2637
|
+
|
|
2638
|
+
# Call the native function
|
|
2639
|
+
try:
|
|
2640
|
+
result = native_func(*args)
|
|
2641
|
+
# Convert result back to Zexus object
|
|
2642
|
+
zexus_result = _python_to_zexus(result)
|
|
2643
|
+
|
|
2644
|
+
# Store result if alias provided
|
|
2645
|
+
if node.alias:
|
|
2646
|
+
env.set(node.alias, zexus_result)
|
|
2647
|
+
|
|
2648
|
+
return zexus_result
|
|
2649
|
+
except Exception as e:
|
|
2650
|
+
return EvaluationError(f"Error calling native function '{node.function_name}': {str(e)}")
|
|
2651
|
+
|
|
2652
|
+
except ImportError:
|
|
2653
|
+
return EvaluationError("ctypes module required for native statements")
|
|
2654
|
+
|
|
2655
|
+
def eval_gc_statement(self, node, env, stack_trace):
|
|
2656
|
+
"""Evaluate garbage collection statement."""
|
|
2657
|
+
try:
|
|
2658
|
+
import gc
|
|
2659
|
+
|
|
2660
|
+
action = node.action.lower()
|
|
2661
|
+
|
|
2662
|
+
if action == "collect":
|
|
2663
|
+
# Force garbage collection
|
|
2664
|
+
collected = gc.collect()
|
|
2665
|
+
return Integer(collected)
|
|
2666
|
+
|
|
2667
|
+
elif action == "pause":
|
|
2668
|
+
# Pause garbage collection
|
|
2669
|
+
gc.disable()
|
|
2670
|
+
return String("GC paused")
|
|
2671
|
+
|
|
2672
|
+
elif action == "resume":
|
|
2673
|
+
# Resume garbage collection
|
|
2674
|
+
gc.enable()
|
|
2675
|
+
return String("GC resumed")
|
|
2676
|
+
|
|
2677
|
+
elif action == "enable_debug":
|
|
2678
|
+
# Enable GC debug output
|
|
2679
|
+
gc.set_debug(gc.DEBUG_STATS)
|
|
2680
|
+
return String("GC debug enabled")
|
|
2681
|
+
|
|
2682
|
+
elif action == "disable_debug":
|
|
2683
|
+
# Disable GC debug output
|
|
2684
|
+
gc.set_debug(0)
|
|
2685
|
+
return String("GC debug disabled")
|
|
2686
|
+
|
|
2687
|
+
else:
|
|
2688
|
+
return EvaluationError(f"Unknown GC action: {action}")
|
|
2689
|
+
|
|
2690
|
+
except Exception as e:
|
|
2691
|
+
return EvaluationError(f"Error in GC statement: {str(e)}")
|
|
2692
|
+
|
|
2693
|
+
def eval_inline_statement(self, node, env, stack_trace):
|
|
2694
|
+
"""Evaluate inline statement - mark function for inlining optimization."""
|
|
2695
|
+
# Get the function to inline
|
|
2696
|
+
func_name = node.function_name
|
|
2697
|
+
if isinstance(func_name, Identifier):
|
|
2698
|
+
func_name = func_name.value
|
|
2699
|
+
|
|
2700
|
+
func = env.get(func_name)
|
|
2701
|
+
if func is None:
|
|
2702
|
+
return EvaluationError(f"Function '{func_name}' not found for inlining")
|
|
2703
|
+
|
|
2704
|
+
# Mark function as inlined by setting a flag
|
|
2705
|
+
if hasattr(func, 'is_inlined'):
|
|
2706
|
+
func.is_inlined = True
|
|
2707
|
+
elif isinstance(func, Action):
|
|
2708
|
+
func.is_inlined = True
|
|
2709
|
+
elif isinstance(func, Builtin):
|
|
2710
|
+
func.is_inlined = True
|
|
2711
|
+
else:
|
|
2712
|
+
# Try to set the attribute dynamically
|
|
2713
|
+
try:
|
|
2714
|
+
func.is_inlined = True
|
|
2715
|
+
except AttributeError:
|
|
2716
|
+
pass # Function object doesn't support dynamic attributes
|
|
2717
|
+
|
|
2718
|
+
return String(f"Function '{func_name}' marked for inlining")
|
|
2719
|
+
|
|
2720
|
+
def eval_buffer_statement(self, node, env, stack_trace):
|
|
2721
|
+
"""Evaluate buffer statement - direct memory access and manipulation."""
|
|
2722
|
+
try:
|
|
2723
|
+
import array
|
|
2724
|
+
|
|
2725
|
+
buffer_name = node.buffer_name
|
|
2726
|
+
operation = node.operation
|
|
2727
|
+
arguments = node.arguments
|
|
2728
|
+
|
|
2729
|
+
if operation == "allocate":
|
|
2730
|
+
# allocate(size) - allocate a buffer
|
|
2731
|
+
if len(arguments) != 1:
|
|
2732
|
+
return EvaluationError(f"allocate expects 1 argument, got {len(arguments)}")
|
|
2733
|
+
|
|
2734
|
+
size_val = self.eval_node(arguments[0], env, stack_trace)
|
|
2735
|
+
if is_error(size_val):
|
|
2736
|
+
return size_val
|
|
2737
|
+
|
|
2738
|
+
size = _zexus_to_python(size_val)
|
|
2739
|
+
try:
|
|
2740
|
+
size = int(size)
|
|
2741
|
+
# Create a byte array as a simple buffer representation
|
|
2742
|
+
buf = bytearray(size)
|
|
2743
|
+
env.set(buffer_name, _python_to_zexus(buf))
|
|
2744
|
+
return String(f"Buffer '{buffer_name}' allocated with size {size}")
|
|
2745
|
+
except (ValueError, TypeError):
|
|
2746
|
+
return EvaluationError(f"Invalid size for buffer allocation: {size}")
|
|
2747
|
+
|
|
2748
|
+
elif operation == "read":
|
|
2749
|
+
# buffer.read(offset, length)
|
|
2750
|
+
if len(arguments) != 2:
|
|
2751
|
+
return EvaluationError(f"read expects 2 arguments, got {len(arguments)}")
|
|
2752
|
+
|
|
2753
|
+
offset_val = self.eval_node(arguments[0], env, stack_trace)
|
|
2754
|
+
length_val = self.eval_node(arguments[1], env, stack_trace)
|
|
2755
|
+
|
|
2756
|
+
if is_error(offset_val) or is_error(length_val):
|
|
2757
|
+
return offset_val if is_error(offset_val) else length_val
|
|
2758
|
+
|
|
2759
|
+
buf = env.get(buffer_name)
|
|
2760
|
+
if buf is None:
|
|
2761
|
+
return EvaluationError(f"Buffer '{buffer_name}' not found")
|
|
2762
|
+
|
|
2763
|
+
offset = _zexus_to_python(offset_val)
|
|
2764
|
+
length = _zexus_to_python(length_val)
|
|
2765
|
+
|
|
2766
|
+
try:
|
|
2767
|
+
offset, length = int(offset), int(length)
|
|
2768
|
+
buf_data = _zexus_to_python(buf)
|
|
2769
|
+
data = buf_data[offset:offset+length]
|
|
2770
|
+
return _python_to_zexus(list(data))
|
|
2771
|
+
except Exception as e:
|
|
2772
|
+
return EvaluationError(f"Error reading from buffer: {str(e)}")
|
|
2773
|
+
|
|
2774
|
+
elif operation == "write":
|
|
2775
|
+
# buffer.write(offset, data)
|
|
2776
|
+
if len(arguments) != 2:
|
|
2777
|
+
return EvaluationError(f"write expects 2 arguments, got {len(arguments)}")
|
|
2778
|
+
|
|
2779
|
+
offset_val = self.eval_node(arguments[0], env, stack_trace)
|
|
2780
|
+
data_val = self.eval_node(arguments[1], env, stack_trace)
|
|
2781
|
+
|
|
2782
|
+
if is_error(offset_val) or is_error(data_val):
|
|
2783
|
+
return offset_val if is_error(offset_val) else data_val
|
|
2784
|
+
|
|
2785
|
+
buf = env.get(buffer_name)
|
|
2786
|
+
if buf is None:
|
|
2787
|
+
return EvaluationError(f"Buffer '{buffer_name}' not found")
|
|
2788
|
+
|
|
2789
|
+
offset = _zexus_to_python(offset_val)
|
|
2790
|
+
data = _zexus_to_python(data_val)
|
|
2791
|
+
|
|
2792
|
+
try:
|
|
2793
|
+
offset = int(offset)
|
|
2794
|
+
buf_data = _zexus_to_python(buf)
|
|
2795
|
+
if isinstance(buf_data, (bytearray, list)):
|
|
2796
|
+
if isinstance(data, list):
|
|
2797
|
+
for i, byte in enumerate(data):
|
|
2798
|
+
buf_data[offset + i] = int(byte)
|
|
2799
|
+
else:
|
|
2800
|
+
buf_data[offset] = int(data)
|
|
2801
|
+
else:
|
|
2802
|
+
return EvaluationError(f"Buffer is not writable")
|
|
2803
|
+
return String(f"Wrote {len(data) if isinstance(data, list) else 1} bytes at offset {offset}")
|
|
2804
|
+
except Exception as e:
|
|
2805
|
+
return EvaluationError(f"Error writing to buffer: {str(e)}")
|
|
2806
|
+
|
|
2807
|
+
elif operation == "free":
|
|
2808
|
+
# free() - deallocate buffer
|
|
2809
|
+
buf = env.get(buffer_name)
|
|
2810
|
+
if buf is None:
|
|
2811
|
+
return EvaluationError(f"Buffer '{buffer_name}' not found")
|
|
2812
|
+
|
|
2813
|
+
env.delete(buffer_name)
|
|
2814
|
+
return String(f"Buffer '{buffer_name}' freed")
|
|
2815
|
+
|
|
2816
|
+
else:
|
|
2817
|
+
return EvaluationError(f"Unknown buffer operation: {operation}")
|
|
2818
|
+
|
|
2819
|
+
except Exception as e:
|
|
2820
|
+
return EvaluationError(f"Error in buffer statement: {str(e)}")
|
|
2821
|
+
|
|
2822
|
+
def eval_simd_statement(self, node, env, stack_trace):
|
|
2823
|
+
"""Evaluate SIMD statement - vector operations using SIMD instructions."""
|
|
2824
|
+
try:
|
|
2825
|
+
import numpy as np
|
|
2826
|
+
|
|
2827
|
+
# Evaluate the SIMD operation expression
|
|
2828
|
+
result = self.eval_node(node.operation, env, stack_trace)
|
|
2829
|
+
|
|
2830
|
+
if is_error(result):
|
|
2831
|
+
return result
|
|
2832
|
+
|
|
2833
|
+
# Convert result to Zexus object
|
|
2834
|
+
zexus_result = result
|
|
2835
|
+
|
|
2836
|
+
return zexus_result
|
|
2837
|
+
|
|
2838
|
+
except ImportError:
|
|
2839
|
+
# Fallback to pure Python implementation if numpy not available
|
|
2840
|
+
result = self.eval_node(node.operation, env, stack_trace)
|
|
2841
|
+
return result if not is_error(result) else EvaluationError("SIMD operations require numpy or fallback implementation")
|
|
2842
|
+
|
|
2843
|
+
except Exception as e:
|
|
2844
|
+
return EvaluationError(f"Error in SIMD statement: {str(e)}")
|
|
2845
|
+
|
|
2846
|
+
def _execute_deferred_cleanup(self, env, stack_trace):
|
|
2847
|
+
"""Execute all deferred cleanup code in LIFO order (Last In, First Out)."""
|
|
2848
|
+
if not hasattr(env, '_deferred') or not env._deferred:
|
|
2849
|
+
return
|
|
2850
|
+
|
|
2851
|
+
# Execute in reverse order (LIFO - like a stack)
|
|
2852
|
+
while env._deferred:
|
|
2853
|
+
deferred_block = env._deferred.pop() # Remove and get last item
|
|
2854
|
+
try:
|
|
2855
|
+
# Execute the deferred cleanup code
|
|
2856
|
+
self.eval_node(deferred_block, env, stack_trace)
|
|
2857
|
+
except Exception as e:
|
|
2858
|
+
# Deferred cleanup should not crash the program
|
|
2859
|
+
# But we can log it for debugging
|
|
2860
|
+
debug_log(f"Error in deferred cleanup: {e}")
|
|
2861
|
+
|
|
2862
|
+
def eval_defer_statement(self, node, env, stack_trace):
|
|
2863
|
+
"""Evaluate defer statement - cleanup code execution."""
|
|
2864
|
+
# Store the deferred code for later execution (at end of scope/function)
|
|
2865
|
+
if not hasattr(env, '_deferred'):
|
|
2866
|
+
env._deferred = []
|
|
2867
|
+
|
|
2868
|
+
env._deferred.append(node.code_block)
|
|
2869
|
+
return NULL # Don't return message, just silently register
|
|
2870
|
+
|
|
2871
|
+
def eval_pattern_statement(self, node, env, stack_trace):
|
|
2872
|
+
"""Evaluate pattern statement - pattern matching."""
|
|
2873
|
+
debug_log("eval_pattern_statement", f"Matching against {len(node.cases)} cases")
|
|
2874
|
+
|
|
2875
|
+
# Evaluate the expression to match
|
|
2876
|
+
value = self.eval_node(node.expression, env, stack_trace)
|
|
2877
|
+
if is_error(value):
|
|
2878
|
+
return value
|
|
2879
|
+
|
|
2880
|
+
debug_log(" Match value", f"{value.inspect() if hasattr(value, 'inspect') else value}")
|
|
2881
|
+
|
|
2882
|
+
# Try each pattern case
|
|
2883
|
+
for i, case in enumerate(node.cases):
|
|
2884
|
+
debug_log(f" Trying case {i}", f"pattern={case.pattern}")
|
|
2885
|
+
|
|
2886
|
+
# Check if this is the default case
|
|
2887
|
+
if isinstance(case.pattern, str) and case.pattern == "default":
|
|
2888
|
+
debug_log(" ✅ Default case matched", "")
|
|
2889
|
+
action_result = self.eval_node(case.action, env, stack_trace)
|
|
2890
|
+
return action_result
|
|
2891
|
+
|
|
2892
|
+
# Evaluate the pattern expression
|
|
2893
|
+
pattern_value = self.eval_node(case.pattern, env, stack_trace)
|
|
2894
|
+
if is_error(pattern_value):
|
|
2895
|
+
debug_log(f" ❌ Pattern evaluation error", str(pattern_value))
|
|
2896
|
+
continue # Skip invalid patterns
|
|
2897
|
+
|
|
2898
|
+
debug_log(" Pattern value", f"{pattern_value.inspect() if hasattr(pattern_value, 'inspect') else pattern_value}")
|
|
2899
|
+
|
|
2900
|
+
# Compare values
|
|
2901
|
+
matched = False
|
|
2902
|
+
if isinstance(value, Integer) and isinstance(pattern_value, Integer):
|
|
2903
|
+
matched = value.value == pattern_value.value
|
|
2904
|
+
debug_log(" Integer comparison", f"{value.value} == {pattern_value.value} = {matched}")
|
|
2905
|
+
elif isinstance(value, Float) and isinstance(pattern_value, Float):
|
|
2906
|
+
matched = value.value == pattern_value.value
|
|
2907
|
+
elif isinstance(value, String) and isinstance(pattern_value, String):
|
|
2908
|
+
matched = value.value == pattern_value.value
|
|
2909
|
+
elif isinstance(value, Boolean) and isinstance(pattern_value, Boolean):
|
|
2910
|
+
matched = value.value == pattern_value.value
|
|
2911
|
+
elif value == pattern_value:
|
|
2912
|
+
matched = True
|
|
2913
|
+
|
|
2914
|
+
if matched:
|
|
2915
|
+
debug_log(" ✅ Pattern matched!", f"Executing action")
|
|
2916
|
+
# Execute action
|
|
2917
|
+
action_result = self.eval_node(case.action, env, stack_trace)
|
|
2918
|
+
debug_log(" Action result", f"{action_result}")
|
|
2919
|
+
return action_result
|
|
2920
|
+
|
|
2921
|
+
debug_log(" ❌ No pattern matched", "")
|
|
2922
|
+
# No match found
|
|
2923
|
+
return NULL
|
|
2924
|
+
|
|
2925
|
+
def eval_enum_statement(self, node, env, stack_trace):
|
|
2926
|
+
"""Evaluate enum statement - type-safe enumerations."""
|
|
2927
|
+
# Create an enum object
|
|
2928
|
+
enum_obj = Map({})
|
|
2929
|
+
|
|
2930
|
+
for i, member in enumerate(node.members):
|
|
2931
|
+
# Use provided value or auto-increment
|
|
2932
|
+
if member.value is not None:
|
|
2933
|
+
value = member.value
|
|
2934
|
+
else:
|
|
2935
|
+
value = i
|
|
2936
|
+
|
|
2937
|
+
enum_obj.set(member.name, Integer(value) if isinstance(value, int) else String(value))
|
|
2938
|
+
|
|
2939
|
+
# Store enum in environment
|
|
2940
|
+
env.set(node.name, enum_obj)
|
|
2941
|
+
return String(f"Enum '{node.name}' defined with {len(node.members)} members")
|
|
2942
|
+
|
|
2943
|
+
def eval_stream_statement(self, node, env, stack_trace):
|
|
2944
|
+
"""Evaluate stream statement - event streaming."""
|
|
2945
|
+
# Register stream handler
|
|
2946
|
+
if not hasattr(env, '_streams'):
|
|
2947
|
+
env._streams = {}
|
|
2948
|
+
|
|
2949
|
+
# Store handler for stream
|
|
2950
|
+
env._streams[node.stream_name] = {
|
|
2951
|
+
'event_var': node.event_var,
|
|
2952
|
+
'handler': node.handler
|
|
2953
|
+
}
|
|
2954
|
+
|
|
2955
|
+
return String(f"Stream '{node.stream_name}' handler registered")
|
|
2956
|
+
|
|
2957
|
+
|
|
2958
|
+
|
|
2959
|
+
|
|
2960
|
+
# === NEW SECURITY STATEMENT HANDLERS ===
|
|
2961
|
+
|
|
2962
|
+
def eval_capability_statement(self, node, env, stack_trace):
|
|
2963
|
+
"""Evaluate capability definition statement."""
|
|
2964
|
+
from ..capability_system import Capability, CapabilityLevel
|
|
2965
|
+
|
|
2966
|
+
# Get capability name
|
|
2967
|
+
cap_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
2968
|
+
|
|
2969
|
+
# Extract definition details
|
|
2970
|
+
scope = ""
|
|
2971
|
+
level = CapabilityLevel.ALLOWED
|
|
2972
|
+
|
|
2973
|
+
if node.definition and isinstance(node.definition, Map):
|
|
2974
|
+
# Extract from map
|
|
2975
|
+
for key, val in node.definition.pairs:
|
|
2976
|
+
if hasattr(key, 'value'):
|
|
2977
|
+
if key.value == "scope" and hasattr(val, 'value'):
|
|
2978
|
+
scope = val.value
|
|
2979
|
+
|
|
2980
|
+
# Create capability object
|
|
2981
|
+
cap = Capability(
|
|
2982
|
+
name=cap_name,
|
|
2983
|
+
level=level,
|
|
2984
|
+
reason=f"Defined with scope: {scope}"
|
|
2985
|
+
)
|
|
2986
|
+
|
|
2987
|
+
# Store in environment both as identifier and in _capabilities
|
|
2988
|
+
if not hasattr(env, '_capabilities'):
|
|
2989
|
+
env._capabilities = {}
|
|
2990
|
+
env._capabilities[cap_name] = cap
|
|
2991
|
+
env.set(cap_name, cap) # Also store as identifier so it can be referenced
|
|
2992
|
+
|
|
2993
|
+
debug_log("eval_capability_statement", f"Defined capability: {cap_name} ({scope})")
|
|
2994
|
+
return cap # Return the capability object instead of just a string
|
|
2995
|
+
|
|
2996
|
+
def eval_grant_statement(self, node, env, stack_trace):
|
|
2997
|
+
"""Evaluate grant statement - grant capabilities to entity."""
|
|
2998
|
+
from ..capability_system import get_capability_manager
|
|
2999
|
+
|
|
3000
|
+
manager = get_capability_manager()
|
|
3001
|
+
|
|
3002
|
+
# Get entity name
|
|
3003
|
+
entity_name = node.entity_name.value if hasattr(node.entity_name, 'value') else str(node.entity_name)
|
|
3004
|
+
|
|
3005
|
+
# Extract capability names
|
|
3006
|
+
capability_names = []
|
|
3007
|
+
for cap in node.capabilities:
|
|
3008
|
+
if hasattr(cap, 'value'):
|
|
3009
|
+
capability_names.append(cap.value)
|
|
3010
|
+
elif hasattr(cap, 'function') and hasattr(cap.function, 'value'):
|
|
3011
|
+
# Function call style
|
|
3012
|
+
capability_names.append(cap.function.value)
|
|
3013
|
+
else:
|
|
3014
|
+
capability_names.append(str(cap))
|
|
3015
|
+
|
|
3016
|
+
# Grant capabilities
|
|
3017
|
+
try:
|
|
3018
|
+
manager.grant_capabilities(entity_name, capability_names)
|
|
3019
|
+
debug_log("eval_grant_statement", f"Granted {len(capability_names)} capabilities to {entity_name}")
|
|
3020
|
+
return String(f"Granted {len(capability_names)} capabilities to '{entity_name}'")
|
|
3021
|
+
except Exception as e:
|
|
3022
|
+
return String(f"Error granting capabilities: {e}")
|
|
3023
|
+
|
|
3024
|
+
def eval_revoke_statement(self, node, env, stack_trace):
|
|
3025
|
+
"""Evaluate revoke statement - revoke capabilities from entity."""
|
|
3026
|
+
from ..capability_system import get_capability_manager
|
|
3027
|
+
|
|
3028
|
+
manager = get_capability_manager()
|
|
3029
|
+
|
|
3030
|
+
# Get entity name
|
|
3031
|
+
entity_name = node.entity_name.value if hasattr(node.entity_name, 'value') else str(node.entity_name)
|
|
3032
|
+
|
|
3033
|
+
# Extract capability names
|
|
3034
|
+
capability_names = []
|
|
3035
|
+
for cap in node.capabilities:
|
|
3036
|
+
if hasattr(cap, 'value'):
|
|
3037
|
+
capability_names.append(cap.value)
|
|
3038
|
+
elif hasattr(cap, 'function') and hasattr(cap.function, 'value'):
|
|
3039
|
+
capability_names.append(cap.function.value)
|
|
3040
|
+
else:
|
|
3041
|
+
capability_names.append(str(cap))
|
|
3042
|
+
|
|
3043
|
+
# Revoke by removing from granted set (simple implementation)
|
|
3044
|
+
# In production, this would use a proper revocation mechanism
|
|
3045
|
+
try:
|
|
3046
|
+
# Access the manager's granted_capabilities
|
|
3047
|
+
if entity_name in manager.granted_capabilities:
|
|
3048
|
+
for cap_name in capability_names:
|
|
3049
|
+
manager.granted_capabilities[entity_name].discard(cap_name)
|
|
3050
|
+
|
|
3051
|
+
debug_log("eval_revoke_statement", f"Revoked {len(capability_names)} capabilities from {entity_name}")
|
|
3052
|
+
return String(f"Revoked {len(capability_names)} capabilities from '{entity_name}'")
|
|
3053
|
+
except Exception as e:
|
|
3054
|
+
return String(f"Error revoking capabilities: {e}")
|
|
3055
|
+
|
|
3056
|
+
def eval_validate_statement(self, node, env, stack_trace):
|
|
3057
|
+
"""Evaluate validate statement - validate data against schema."""
|
|
3058
|
+
from ..validation_system import (
|
|
3059
|
+
get_validation_manager, ValidationError, StandardValidators
|
|
3060
|
+
)
|
|
3061
|
+
|
|
3062
|
+
manager = get_validation_manager()
|
|
3063
|
+
|
|
3064
|
+
# Evaluate data expression
|
|
3065
|
+
data = self.eval_node(node.data, env, stack_trace)
|
|
3066
|
+
|
|
3067
|
+
# Evaluate schema
|
|
3068
|
+
schema = None
|
|
3069
|
+
if node.schema:
|
|
3070
|
+
if isinstance(node.schema, dict):
|
|
3071
|
+
schema = node.schema
|
|
3072
|
+
elif hasattr(node.schema, 'pairs'): # Map object
|
|
3073
|
+
# Convert Map to dict
|
|
3074
|
+
schema = {}
|
|
3075
|
+
for key, val in node.schema.pairs:
|
|
3076
|
+
key_str = key.value if hasattr(key, 'value') else str(key)
|
|
3077
|
+
schema[key_str] = val
|
|
3078
|
+
else:
|
|
3079
|
+
schema = self.eval_node(node.schema, env, stack_trace)
|
|
3080
|
+
|
|
3081
|
+
# Validate data
|
|
3082
|
+
try:
|
|
3083
|
+
if isinstance(data, String):
|
|
3084
|
+
# Validate string against pattern or standard validator
|
|
3085
|
+
if isinstance(schema, String):
|
|
3086
|
+
validator_name = schema.value
|
|
3087
|
+
if hasattr(StandardValidators, validator_name.upper()):
|
|
3088
|
+
validator = getattr(StandardValidators, validator_name.upper())
|
|
3089
|
+
if validator.validate(data.value):
|
|
3090
|
+
return String(f"Validation passed for {validator_name}")
|
|
3091
|
+
else:
|
|
3092
|
+
return String(f"Validation failed: {validator.get_error_message()}")
|
|
3093
|
+
|
|
3094
|
+
# For complex validation, use schema
|
|
3095
|
+
if schema and hasattr(data, '__dict__'):
|
|
3096
|
+
manager.validate_schema(vars(data), str(schema) if not isinstance(schema, dict) else "custom")
|
|
3097
|
+
|
|
3098
|
+
debug_log("eval_validate_statement", "Validation passed")
|
|
3099
|
+
return String("Validation passed")
|
|
3100
|
+
|
|
3101
|
+
except ValidationError as e:
|
|
3102
|
+
debug_log("eval_validate_statement", f"Validation error: {e}")
|
|
3103
|
+
return String(f"Validation failed: {e}")
|
|
3104
|
+
|
|
3105
|
+
def eval_sanitize_statement(self, node, env, stack_trace):
|
|
3106
|
+
"""Evaluate sanitize statement - sanitize untrusted input."""
|
|
3107
|
+
from ..validation_system import Sanitizer, Encoding
|
|
3108
|
+
|
|
3109
|
+
# Evaluate data to sanitize
|
|
3110
|
+
data = self.eval_node(node.data, env, stack_trace)
|
|
3111
|
+
|
|
3112
|
+
# Convert to string
|
|
3113
|
+
if hasattr(data, 'value'):
|
|
3114
|
+
data_str = str(data.value)
|
|
3115
|
+
else:
|
|
3116
|
+
data_str = str(data)
|
|
3117
|
+
|
|
3118
|
+
# Determine encoding
|
|
3119
|
+
encoding = Encoding.HTML # Default
|
|
3120
|
+
if node.encoding:
|
|
3121
|
+
enc_val = self.eval_node(node.encoding, env, stack_trace)
|
|
3122
|
+
if hasattr(enc_val, 'value'):
|
|
3123
|
+
enc_name = enc_val.value.upper()
|
|
3124
|
+
try:
|
|
3125
|
+
encoding = Encoding[enc_name]
|
|
3126
|
+
except KeyError:
|
|
3127
|
+
encoding = Encoding.HTML
|
|
3128
|
+
|
|
3129
|
+
# Sanitize
|
|
3130
|
+
try:
|
|
3131
|
+
sanitized = Sanitizer.sanitize_string(data_str, encoding)
|
|
3132
|
+
debug_log("eval_sanitize_statement", f"Sanitized {len(data_str)} chars with {encoding.value}")
|
|
3133
|
+
return String(sanitized)
|
|
3134
|
+
except Exception as e:
|
|
3135
|
+
debug_log("eval_sanitize_statement", f"Sanitization error: {e}")
|
|
3136
|
+
return String(data_str) # Return original if sanitization fails
|
|
3137
|
+
|
|
3138
|
+
def eval_inject_statement(self, node, env, stack_trace):
|
|
3139
|
+
"""Evaluate inject statement - full dependency injection with mode-aware resolution."""
|
|
3140
|
+
from ..dependency_injection import get_di_registry, ExecutionMode
|
|
3141
|
+
from ..object import String as StringObj, Null as NullObj
|
|
3142
|
+
|
|
3143
|
+
# Get dependency name
|
|
3144
|
+
dep_name = node.dependency.value if hasattr(node.dependency, 'value') else str(node.dependency)
|
|
3145
|
+
|
|
3146
|
+
debug_log("eval_inject_statement", f"Resolving dependency: {dep_name}")
|
|
3147
|
+
|
|
3148
|
+
# Get DI registry and current module context
|
|
3149
|
+
di_registry = get_di_registry()
|
|
3150
|
+
|
|
3151
|
+
# Determine module name from environment context
|
|
3152
|
+
module_name = env.get("__module__")
|
|
3153
|
+
module_name = module_name.value if module_name and hasattr(module_name, 'value') else "__main__"
|
|
3154
|
+
|
|
3155
|
+
# Get or create container for this module
|
|
3156
|
+
container = di_registry.get_container(module_name)
|
|
3157
|
+
|
|
3158
|
+
# Determine execution mode from environment or default to PRODUCTION
|
|
3159
|
+
mode_obj = env.get("__execution_mode__")
|
|
3160
|
+
if mode_obj and hasattr(mode_obj, 'value'):
|
|
3161
|
+
mode_str = mode_obj.value.upper()
|
|
3162
|
+
try:
|
|
3163
|
+
execution_mode = ExecutionMode[mode_str]
|
|
3164
|
+
except KeyError:
|
|
3165
|
+
execution_mode = ExecutionMode.PRODUCTION
|
|
3166
|
+
else:
|
|
3167
|
+
execution_mode = ExecutionMode.PRODUCTION
|
|
3168
|
+
|
|
3169
|
+
# Set container's execution mode
|
|
3170
|
+
container.execution_mode = execution_mode
|
|
3171
|
+
|
|
3172
|
+
try:
|
|
3173
|
+
# Attempt to resolve dependency
|
|
3174
|
+
resolved = container.get(dep_name)
|
|
3175
|
+
|
|
3176
|
+
if resolved is not None:
|
|
3177
|
+
# Successfully resolved - store in environment
|
|
3178
|
+
env.set(dep_name, resolved)
|
|
3179
|
+
debug_log("eval_inject_statement", f"✓ Injected {dep_name} from container (mode: {execution_mode.name})")
|
|
3180
|
+
return StringObj(f"Dependency '{dep_name}' injected ({execution_mode.name} mode)")
|
|
3181
|
+
else:
|
|
3182
|
+
# Dependency not registered - create null placeholder
|
|
3183
|
+
debug_log("eval_inject_statement", f"⚠ Dependency {dep_name} not registered, using null")
|
|
3184
|
+
env.set(dep_name, NullObj())
|
|
3185
|
+
return StringObj(f"Warning: Dependency '{dep_name}' not registered")
|
|
3186
|
+
|
|
3187
|
+
except Exception as e:
|
|
3188
|
+
# Error during resolution
|
|
3189
|
+
debug_log("eval_inject_statement", f"✗ Error injecting {dep_name}: {e}")
|
|
3190
|
+
env.set(dep_name, NullObj())
|
|
3191
|
+
return StringObj(f"Error: Could not inject '{dep_name}': {str(e)}")
|
|
3192
|
+
|
|
3193
|
+
def eval_immutable_statement(self, node, env, stack_trace):
|
|
3194
|
+
"""Evaluate immutable statement - declare variable as immutable."""
|
|
3195
|
+
from ..purity_system import get_immutability_manager
|
|
3196
|
+
|
|
3197
|
+
manager = get_immutability_manager()
|
|
3198
|
+
|
|
3199
|
+
# Get variable name
|
|
3200
|
+
var_name = node.target.value if hasattr(node.target, 'value') else str(node.target)
|
|
3201
|
+
|
|
3202
|
+
# Evaluate and assign value if provided
|
|
3203
|
+
if node.value:
|
|
3204
|
+
value = self.eval_node(node.value, env, stack_trace)
|
|
3205
|
+
env.set(var_name, value)
|
|
3206
|
+
|
|
3207
|
+
# Mark as immutable
|
|
3208
|
+
manager.mark_immutable(value)
|
|
3209
|
+
debug_log("eval_immutable_statement", f"Created immutable: {var_name}")
|
|
3210
|
+
return String(f"Immutable variable '{var_name}' created")
|
|
3211
|
+
else:
|
|
3212
|
+
# Mark existing variable as immutable
|
|
3213
|
+
try:
|
|
3214
|
+
value = env.get(var_name)
|
|
3215
|
+
manager.mark_immutable(value)
|
|
3216
|
+
debug_log("eval_immutable_statement", f"Marked immutable: {var_name}")
|
|
3217
|
+
return String(f"Variable '{var_name}' marked as immutable")
|
|
3218
|
+
except Exception as e:
|
|
3219
|
+
return String(f"Error: Variable '{var_name}' not found")
|
|
3220
|
+
|
|
3221
|
+
|
|
3222
|
+
# === COMPLEXITY & LARGE PROJECT MANAGEMENT STATEMENT EVALUATORS ===
|
|
3223
|
+
|
|
3224
|
+
def eval_interface_statement(self, node, env, stack_trace):
|
|
3225
|
+
"""Evaluate interface statement - define a contract/interface."""
|
|
3226
|
+
from ..complexity_system import get_complexity_manager
|
|
3227
|
+
|
|
3228
|
+
manager = get_complexity_manager()
|
|
3229
|
+
|
|
3230
|
+
# Get interface name
|
|
3231
|
+
interface_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
3232
|
+
|
|
3233
|
+
# Create interface from AST node
|
|
3234
|
+
from ..complexity_system import Interface
|
|
3235
|
+
interface = Interface(
|
|
3236
|
+
name=interface_name,
|
|
3237
|
+
methods=node.methods if hasattr(node, 'methods') else [],
|
|
3238
|
+
properties=node.properties if hasattr(node, 'properties') else {}
|
|
3239
|
+
)
|
|
3240
|
+
|
|
3241
|
+
# Register interface
|
|
3242
|
+
manager.register_interface(interface)
|
|
3243
|
+
debug_log("eval_interface_statement", f"Registered interface: {interface_name}")
|
|
3244
|
+
|
|
3245
|
+
# Store in environment
|
|
3246
|
+
env.set(interface_name, interface)
|
|
3247
|
+
return String(f"Interface '{interface_name}' defined")
|
|
3248
|
+
|
|
3249
|
+
def eval_type_alias_statement(self, node, env, stack_trace):
|
|
3250
|
+
"""Evaluate type alias statement - create type name shortcuts."""
|
|
3251
|
+
from ..complexity_system import get_complexity_manager
|
|
3252
|
+
|
|
3253
|
+
manager = get_complexity_manager()
|
|
3254
|
+
|
|
3255
|
+
# Get type alias name
|
|
3256
|
+
alias_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
3257
|
+
|
|
3258
|
+
# Get base type (just the string name, don't evaluate as expression)
|
|
3259
|
+
base_type = node.base_type.value if hasattr(node.base_type, 'value') else str(node.base_type)
|
|
3260
|
+
|
|
3261
|
+
# Create type alias
|
|
3262
|
+
from ..complexity_system import TypeAlias
|
|
3263
|
+
alias = TypeAlias(
|
|
3264
|
+
name=alias_name,
|
|
3265
|
+
base_type=base_type
|
|
3266
|
+
)
|
|
3267
|
+
|
|
3268
|
+
# Register type alias
|
|
3269
|
+
manager.register_type_alias(alias)
|
|
3270
|
+
debug_log("eval_type_alias_statement", f"Registered type alias: {alias_name} -> {base_type}")
|
|
3271
|
+
|
|
3272
|
+
# Store in environment
|
|
3273
|
+
env.set(alias_name, alias)
|
|
3274
|
+
return String(f"Type alias '{alias_name}' defined")
|
|
3275
|
+
|
|
3276
|
+
def eval_module_statement(self, node, env, stack_trace):
|
|
3277
|
+
"""Evaluate module statement - create namespaced module."""
|
|
3278
|
+
from ..complexity_system import Module, ModuleMember, Visibility
|
|
3279
|
+
|
|
3280
|
+
# Get module name
|
|
3281
|
+
module_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
3282
|
+
|
|
3283
|
+
# Create module
|
|
3284
|
+
module = Module(name=module_name)
|
|
3285
|
+
|
|
3286
|
+
# Execute module body in new environment
|
|
3287
|
+
module_env = Environment(outer=env)
|
|
3288
|
+
|
|
3289
|
+
# Track current module for export statement handling
|
|
3290
|
+
module_env.set('__current_module__', module)
|
|
3291
|
+
|
|
3292
|
+
if hasattr(node, 'body') and node.body:
|
|
3293
|
+
self.eval_node(node.body, module_env, stack_trace)
|
|
3294
|
+
|
|
3295
|
+
# Collect module members using AST modifiers when available
|
|
3296
|
+
seen = set()
|
|
3297
|
+
if hasattr(node, 'body') and getattr(node.body, 'statements', None):
|
|
3298
|
+
for stmt in node.body.statements:
|
|
3299
|
+
# Determine declared name and modifiers if present
|
|
3300
|
+
declared_name = None
|
|
3301
|
+
modifiers = getattr(stmt, 'modifiers', []) or []
|
|
3302
|
+
|
|
3303
|
+
# Function / Action declarations
|
|
3304
|
+
if type(stmt).__name__ in ('FunctionStatement', 'ActionStatement'):
|
|
3305
|
+
if hasattr(stmt.name, 'value'):
|
|
3306
|
+
declared_name = stmt.name.value
|
|
3307
|
+
else:
|
|
3308
|
+
declared_name = str(stmt.name)
|
|
3309
|
+
member_type = 'function'
|
|
3310
|
+
|
|
3311
|
+
# Let / Const declarations
|
|
3312
|
+
elif type(stmt).__name__ in ('LetStatement', 'ConstStatement'):
|
|
3313
|
+
if hasattr(stmt.name, 'value'):
|
|
3314
|
+
declared_name = stmt.name.value
|
|
3315
|
+
else:
|
|
3316
|
+
declared_name = str(stmt.name)
|
|
3317
|
+
member_type = 'variable'
|
|
3318
|
+
|
|
3319
|
+
else:
|
|
3320
|
+
# Not a direct declaration we can extract; skip to env-scan fallback
|
|
3321
|
+
continue
|
|
3322
|
+
|
|
3323
|
+
if declared_name:
|
|
3324
|
+
seen.add(declared_name)
|
|
3325
|
+
try:
|
|
3326
|
+
value = module_env.get(declared_name)
|
|
3327
|
+
except Exception:
|
|
3328
|
+
value = None
|
|
3329
|
+
|
|
3330
|
+
# Map modifiers to visibility
|
|
3331
|
+
vis = Visibility.PUBLIC
|
|
3332
|
+
lower_mods = [m.lower() for m in modifiers]
|
|
3333
|
+
if 'private' in lower_mods or 'internal' in lower_mods:
|
|
3334
|
+
vis = Visibility.INTERNAL
|
|
3335
|
+
elif 'protected' in lower_mods:
|
|
3336
|
+
vis = Visibility.PROTECTED
|
|
3337
|
+
|
|
3338
|
+
member = ModuleMember(
|
|
3339
|
+
name=declared_name,
|
|
3340
|
+
member_type=member_type,
|
|
3341
|
+
visibility=vis,
|
|
3342
|
+
value=value
|
|
3343
|
+
)
|
|
3344
|
+
module.add_member(member)
|
|
3345
|
+
|
|
3346
|
+
# Fallback: include any remaining env keys not discovered via AST
|
|
3347
|
+
for key in module_env.store:
|
|
3348
|
+
if key.startswith('_') or key in seen:
|
|
3349
|
+
continue
|
|
3350
|
+
try:
|
|
3351
|
+
value = module_env.get(key)
|
|
3352
|
+
except Exception:
|
|
3353
|
+
value = None
|
|
3354
|
+
try:
|
|
3355
|
+
is_callable = callable(value)
|
|
3356
|
+
except Exception:
|
|
3357
|
+
is_callable = False
|
|
3358
|
+
member_type = 'function' if is_callable else 'variable'
|
|
3359
|
+
member = ModuleMember(
|
|
3360
|
+
name=key,
|
|
3361
|
+
member_type=member_type,
|
|
3362
|
+
visibility=Visibility.PUBLIC,
|
|
3363
|
+
value=value
|
|
3364
|
+
)
|
|
3365
|
+
module.add_member(member)
|
|
3366
|
+
|
|
3367
|
+
# Note: Module is stored directly in environment; manager integration can be enhanced later
|
|
3368
|
+
debug_log("eval_module_statement", f"Created module: {module_name}")
|
|
3369
|
+
debug_log("eval_module_statement", f"Module members: {list(module.members.keys())}")
|
|
3370
|
+
|
|
3371
|
+
# Store in environment
|
|
3372
|
+
env.set(module_name, module)
|
|
3373
|
+
return String(f"Module '{module_name}' created")
|
|
3374
|
+
|
|
3375
|
+
def eval_package_statement(self, node, env, stack_trace):
|
|
3376
|
+
"""Evaluate package statement - create package with hierarchical support."""
|
|
3377
|
+
from ..complexity_system import Package
|
|
3378
|
+
|
|
3379
|
+
# Get package name (may be dotted like app.api.v1)
|
|
3380
|
+
package_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
3381
|
+
|
|
3382
|
+
# Parse hierarchical package names
|
|
3383
|
+
name_parts = package_name.split('.')
|
|
3384
|
+
|
|
3385
|
+
# Create the leaf package with body content
|
|
3386
|
+
leaf_package = Package(name=name_parts[-1])
|
|
3387
|
+
|
|
3388
|
+
# Execute package body in new environment
|
|
3389
|
+
package_env = Environment(outer=env)
|
|
3390
|
+
|
|
3391
|
+
if hasattr(node, 'body') and node.body:
|
|
3392
|
+
self.eval_node(node.body, package_env, stack_trace)
|
|
3393
|
+
|
|
3394
|
+
# Collect package members from package environment
|
|
3395
|
+
for key in package_env.store:
|
|
3396
|
+
if not key.startswith('_'):
|
|
3397
|
+
value = package_env.get(key)
|
|
3398
|
+
leaf_package.modules[key] = value
|
|
3399
|
+
|
|
3400
|
+
debug_log("eval_package_statement", f"Created package: {package_name} with members: {list(leaf_package.modules.keys())}")
|
|
3401
|
+
|
|
3402
|
+
# Handle hierarchical package structure
|
|
3403
|
+
if len(name_parts) == 1:
|
|
3404
|
+
# Simple package (no hierarchy)
|
|
3405
|
+
env.set(package_name, leaf_package)
|
|
3406
|
+
else:
|
|
3407
|
+
# Hierarchical package - ensure all ancestors exist
|
|
3408
|
+
# Start from root and work down to leaf
|
|
3409
|
+
root_name = name_parts[0]
|
|
3410
|
+
root_package = env.get(root_name)
|
|
3411
|
+
|
|
3412
|
+
if root_package is None or not hasattr(root_package, 'modules'):
|
|
3413
|
+
# Create root package if it doesn't exist
|
|
3414
|
+
root_package = Package(name=root_name)
|
|
3415
|
+
env.set(root_name, root_package)
|
|
3416
|
+
debug_log("eval_package_statement", f"Created root package: {root_name}")
|
|
3417
|
+
|
|
3418
|
+
# Navigate/create intermediate packages
|
|
3419
|
+
current = root_package
|
|
3420
|
+
for i in range(1, len(name_parts)):
|
|
3421
|
+
part_name = name_parts[i]
|
|
3422
|
+
|
|
3423
|
+
if i == len(name_parts) - 1:
|
|
3424
|
+
# This is the leaf - add it
|
|
3425
|
+
current.modules[part_name] = leaf_package
|
|
3426
|
+
debug_log("eval_package_statement", f"Added {part_name} to {name_parts[i-1]}")
|
|
3427
|
+
else:
|
|
3428
|
+
# This is an intermediate package
|
|
3429
|
+
if part_name not in current.modules:
|
|
3430
|
+
# Create intermediate package
|
|
3431
|
+
intermediate = Package(name=part_name)
|
|
3432
|
+
current.modules[part_name] = intermediate
|
|
3433
|
+
debug_log("eval_package_statement", f"Created intermediate package: {part_name}")
|
|
3434
|
+
current = current.modules[part_name]
|
|
3435
|
+
|
|
3436
|
+
return String(f"Package '{package_name}' created")
|
|
3437
|
+
|
|
3438
|
+
def eval_using_statement(self, node, env, stack_trace):
|
|
3439
|
+
"""Evaluate using statement - RAII pattern for resource management."""
|
|
3440
|
+
from ..complexity_system import get_complexity_manager
|
|
3441
|
+
|
|
3442
|
+
manager = get_complexity_manager()
|
|
3443
|
+
|
|
3444
|
+
# Get resource name
|
|
3445
|
+
resource_name = node.resource_name.value if hasattr(node.resource_name, 'value') else str(node.resource_name)
|
|
3446
|
+
|
|
3447
|
+
# Acquire resource
|
|
3448
|
+
resource = self.eval_node(node.resource_expr, env, stack_trace)
|
|
3449
|
+
|
|
3450
|
+
# Store resource in environment
|
|
3451
|
+
env.set(resource_name, resource)
|
|
3452
|
+
|
|
3453
|
+
try:
|
|
3454
|
+
debug_log("eval_using_statement", f"Acquired resource: {resource_name}")
|
|
3455
|
+
|
|
3456
|
+
# Execute body in using block
|
|
3457
|
+
if hasattr(node, 'body') and node.body:
|
|
3458
|
+
body_result = self.eval_node(node.body, env, stack_trace)
|
|
3459
|
+
|
|
3460
|
+
return body_result if 'body_result' in locals() else NULL
|
|
3461
|
+
|
|
3462
|
+
finally:
|
|
3463
|
+
# Cleanup resource (RAII)
|
|
3464
|
+
if hasattr(resource, 'close'):
|
|
3465
|
+
try:
|
|
3466
|
+
resource.close()
|
|
3467
|
+
debug_log("eval_using_statement", f"Closed resource: {resource_name}")
|
|
3468
|
+
except Exception as e:
|
|
3469
|
+
debug_log("eval_using_statement", f"Error closing resource: {e}")
|
|
3470
|
+
elif hasattr(resource, 'cleanup'):
|
|
3471
|
+
try:
|
|
3472
|
+
resource.cleanup()
|
|
3473
|
+
debug_log("eval_using_statement", f"Cleaned up resource: {resource_name}")
|
|
3474
|
+
except Exception as e:
|
|
3475
|
+
debug_log("eval_using_statement", f"Error cleaning up resource: {e}")
|
|
3476
|
+
|
|
3477
|
+
# === CONCURRENCY & PERFORMANCE STATEMENT EVALUATORS ===
|
|
3478
|
+
|
|
3479
|
+
def eval_channel_statement(self, node, env, stack_trace):
|
|
3480
|
+
"""Evaluate channel statement - declare a message passing channel."""
|
|
3481
|
+
from ..concurrency_system import get_concurrency_manager
|
|
3482
|
+
|
|
3483
|
+
manager = get_concurrency_manager()
|
|
3484
|
+
|
|
3485
|
+
# Get channel name
|
|
3486
|
+
channel_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
3487
|
+
|
|
3488
|
+
# Get element type if specified
|
|
3489
|
+
element_type = None
|
|
3490
|
+
if hasattr(node, 'element_type') and node.element_type:
|
|
3491
|
+
element_type = str(node.element_type)
|
|
3492
|
+
|
|
3493
|
+
# Get capacity if specified
|
|
3494
|
+
capacity = 0
|
|
3495
|
+
if hasattr(node, 'capacity') and node.capacity:
|
|
3496
|
+
capacity = self.eval_node(node.capacity, env, stack_trace)
|
|
3497
|
+
if isinstance(capacity, Integer):
|
|
3498
|
+
capacity = capacity.value
|
|
3499
|
+
else:
|
|
3500
|
+
capacity = 0
|
|
3501
|
+
|
|
3502
|
+
# Create channel
|
|
3503
|
+
channel = manager.create_channel(channel_name, element_type, capacity)
|
|
3504
|
+
debug_log("eval_channel_statement", f"Created channel: {channel_name}")
|
|
3505
|
+
|
|
3506
|
+
# Store in environment
|
|
3507
|
+
env.set(channel_name, channel)
|
|
3508
|
+
return String(f"Channel '{channel_name}' created")
|
|
3509
|
+
|
|
3510
|
+
def eval_send_statement(self, node, env, stack_trace):
|
|
3511
|
+
"""Evaluate send statement - send value to a channel."""
|
|
3512
|
+
|
|
3513
|
+
# Evaluate channel expression
|
|
3514
|
+
channel = self.eval_node(node.channel_expr, env, stack_trace)
|
|
3515
|
+
|
|
3516
|
+
# Evaluate value to send
|
|
3517
|
+
value = self.eval_node(node.value_expr, env, stack_trace)
|
|
3518
|
+
|
|
3519
|
+
# Send to channel
|
|
3520
|
+
if hasattr(channel, 'send'):
|
|
3521
|
+
try:
|
|
3522
|
+
channel.send(value, timeout=5.0)
|
|
3523
|
+
debug_log("eval_send_statement", f"Sent to channel: {value}")
|
|
3524
|
+
return String(f"Value sent to channel")
|
|
3525
|
+
except Exception as e:
|
|
3526
|
+
return String(f"Error sending to channel: {e}")
|
|
3527
|
+
else:
|
|
3528
|
+
return String(f"Error: not a valid channel")
|
|
3529
|
+
|
|
3530
|
+
def eval_receive_statement(self, node, env, stack_trace):
|
|
3531
|
+
"""Evaluate receive statement - receive value from a channel."""
|
|
3532
|
+
|
|
3533
|
+
# Evaluate channel expression
|
|
3534
|
+
channel = self.eval_node(node.channel_expr, env, stack_trace)
|
|
3535
|
+
|
|
3536
|
+
# Receive from channel
|
|
3537
|
+
if hasattr(channel, 'receive'):
|
|
3538
|
+
try:
|
|
3539
|
+
value = channel.receive(timeout=5.0)
|
|
3540
|
+
debug_log("eval_receive_statement", f"Received from channel: {value}")
|
|
3541
|
+
|
|
3542
|
+
# Bind to target if specified
|
|
3543
|
+
if hasattr(node, 'target') and node.target:
|
|
3544
|
+
target_name = node.target.value if hasattr(node.target, 'value') else str(node.target)
|
|
3545
|
+
env.set(target_name, value)
|
|
3546
|
+
|
|
3547
|
+
return value if value is not None else NULL
|
|
3548
|
+
except Exception as e:
|
|
3549
|
+
return String(f"Error receiving from channel: {e}")
|
|
3550
|
+
else:
|
|
3551
|
+
return String(f"Error: not a valid channel")
|
|
3552
|
+
|
|
3553
|
+
def eval_atomic_statement(self, node, env, stack_trace):
|
|
3554
|
+
"""Evaluate atomic statement - execute indivisible operation."""
|
|
3555
|
+
from ..concurrency_system import get_concurrency_manager
|
|
3556
|
+
|
|
3557
|
+
manager = get_concurrency_manager()
|
|
3558
|
+
|
|
3559
|
+
# Create/get atomic region
|
|
3560
|
+
atomic_id = f"atomic_{id(node)}"
|
|
3561
|
+
atomic = manager.create_atomic(atomic_id)
|
|
3562
|
+
|
|
3563
|
+
# Execute atomically
|
|
3564
|
+
def execute_block():
|
|
3565
|
+
if hasattr(node, 'body') and node.body:
|
|
3566
|
+
# Atomic block
|
|
3567
|
+
return self.eval_node(node.body, env, stack_trace)
|
|
3568
|
+
elif hasattr(node, 'expr') and node.expr:
|
|
3569
|
+
# Atomic expression
|
|
3570
|
+
return self.eval_node(node.expr, env, stack_trace)
|
|
3571
|
+
return NULL
|
|
3572
|
+
|
|
3573
|
+
result = atomic.execute(execute_block)
|
|
3574
|
+
debug_log("eval_atomic_statement", "Atomic operation completed")
|
|
3575
|
+
|
|
3576
|
+
return result if result is not NULL else NULL
|
|
3577
|
+
|
|
3578
|
+
# === BLOCKCHAIN STATEMENT EVALUATION ===
|
|
3579
|
+
|
|
3580
|
+
def eval_ledger_statement(self, node, env, stack_trace):
|
|
3581
|
+
"""Evaluate ledger statement - create immutable ledger variable.
|
|
3582
|
+
|
|
3583
|
+
ledger balances = {};
|
|
3584
|
+
ledger state_root;
|
|
3585
|
+
"""
|
|
3586
|
+
from ..blockchain import Ledger
|
|
3587
|
+
from ..blockchain.transaction import get_current_tx, create_tx_context
|
|
3588
|
+
|
|
3589
|
+
debug_log("eval_ledger_statement", f"ledger {node.name.value}")
|
|
3590
|
+
|
|
3591
|
+
# Ensure TX context exists
|
|
3592
|
+
tx = get_current_tx()
|
|
3593
|
+
if tx is None:
|
|
3594
|
+
tx = create_tx_context(caller="system", gas_limit=1000000)
|
|
3595
|
+
|
|
3596
|
+
# Evaluate initial value if provided
|
|
3597
|
+
initial_value = NULL
|
|
3598
|
+
if node.initial_value:
|
|
3599
|
+
initial_value = self.eval_node(node.initial_value, env, stack_trace)
|
|
3600
|
+
if is_error(initial_value):
|
|
3601
|
+
return initial_value
|
|
3602
|
+
|
|
3603
|
+
# Create ledger instance
|
|
3604
|
+
ledger_name = node.name.value
|
|
3605
|
+
ledger = Ledger(ledger_name)
|
|
3606
|
+
|
|
3607
|
+
# Write initial value if provided
|
|
3608
|
+
if initial_value != NULL:
|
|
3609
|
+
# Convert Zexus object to Python value for storage
|
|
3610
|
+
py_value = _zexus_to_python(initial_value)
|
|
3611
|
+
ledger.write(ledger_name, py_value, tx.block_hash)
|
|
3612
|
+
|
|
3613
|
+
# Store the value directly in environment (ledger is for tracking history)
|
|
3614
|
+
env.set(node.name.value, initial_value)
|
|
3615
|
+
|
|
3616
|
+
debug_log("eval_ledger_statement", f"Created ledger: {node.name.value}")
|
|
3617
|
+
return NULL
|
|
3618
|
+
|
|
3619
|
+
def eval_state_statement(self, node, env, stack_trace):
|
|
3620
|
+
"""Evaluate state statement - create mutable state variable.
|
|
3621
|
+
|
|
3622
|
+
state counter = 0;
|
|
3623
|
+
state owner = TX.caller;
|
|
3624
|
+
"""
|
|
3625
|
+
debug_log("eval_state_statement", f"state {node.name.value}")
|
|
3626
|
+
|
|
3627
|
+
# Evaluate initial value
|
|
3628
|
+
value = NULL
|
|
3629
|
+
if node.initial_value:
|
|
3630
|
+
value = self.eval_node(node.initial_value, env, stack_trace)
|
|
3631
|
+
if is_error(value):
|
|
3632
|
+
return value
|
|
3633
|
+
|
|
3634
|
+
# Store in environment (regular mutable variable)
|
|
3635
|
+
env.set(node.name.value, value)
|
|
3636
|
+
|
|
3637
|
+
debug_log("eval_state_statement", f"Created state: {node.name.value}")
|
|
3638
|
+
return NULL
|
|
3639
|
+
|
|
3640
|
+
def eval_require_statement(self, node, env, stack_trace):
|
|
3641
|
+
"""Evaluate require statement - prerequisites, dependencies, resources.
|
|
3642
|
+
|
|
3643
|
+
Basic:
|
|
3644
|
+
require(balance >= amount);
|
|
3645
|
+
require(TX.caller == owner, "Only owner");
|
|
3646
|
+
|
|
3647
|
+
With tolerance:
|
|
3648
|
+
require balance >= 0.1 { tolerance_logic() }
|
|
3649
|
+
|
|
3650
|
+
File/Module dependencies:
|
|
3651
|
+
require \"file.zx\" imported, \"File required\";
|
|
3652
|
+
require module \"db\" available, \"Database required\";
|
|
3653
|
+
|
|
3654
|
+
Resource requirements:
|
|
3655
|
+
require:balance amount >= minimum;
|
|
3656
|
+
require:gas available >= needed;
|
|
3657
|
+
"""
|
|
3658
|
+
debug_log("eval_require_statement", "Checking requirement")
|
|
3659
|
+
|
|
3660
|
+
# Handle file dependencies
|
|
3661
|
+
if node.requirement_type == 'file' and node.file_path:
|
|
3662
|
+
return self._eval_require_file(node, env, stack_trace)
|
|
3663
|
+
|
|
3664
|
+
# Handle module dependencies
|
|
3665
|
+
if node.requirement_type == 'module' and node.module_name:
|
|
3666
|
+
return self._eval_require_module(node, env, stack_trace)
|
|
3667
|
+
|
|
3668
|
+
# Handle resource requirements
|
|
3669
|
+
if node.requirement_type in ['balance', 'gas', 'prereq']:
|
|
3670
|
+
return self._eval_require_resource(node, env, stack_trace)
|
|
3671
|
+
|
|
3672
|
+
# Standard condition requirement
|
|
3673
|
+
if node.condition:
|
|
3674
|
+
# Evaluate condition
|
|
3675
|
+
condition = self.eval_node(node.condition, env, stack_trace)
|
|
3676
|
+
if is_error(condition):
|
|
3677
|
+
return condition
|
|
3678
|
+
|
|
3679
|
+
# Check if condition is true
|
|
3680
|
+
if not is_truthy(condition):
|
|
3681
|
+
# Execute tolerance block if provided (for conditional allowances)
|
|
3682
|
+
if node.tolerance_block:
|
|
3683
|
+
print(f"⚡ TOLERANCE BLOCK: type={type(node.tolerance_block).__name__}")
|
|
3684
|
+
debug_log("eval_require_statement", "Condition failed - executing tolerance logic")
|
|
3685
|
+
tolerance_result = self.eval_node(node.tolerance_block, env, stack_trace)
|
|
3686
|
+
print(f"⚡ TOLERANCE RESULT: type={type(tolerance_result).__name__}")
|
|
3687
|
+
|
|
3688
|
+
# Check if tolerance logic allows proceeding
|
|
3689
|
+
if is_error(tolerance_result):
|
|
3690
|
+
return tolerance_result
|
|
3691
|
+
|
|
3692
|
+
# Unwrap ReturnValue if present
|
|
3693
|
+
from ..object import ReturnValue
|
|
3694
|
+
if isinstance(tolerance_result, ReturnValue):
|
|
3695
|
+
print(f"⚡ UNWRAPPING ReturnValue")
|
|
3696
|
+
tolerance_result = tolerance_result.value
|
|
3697
|
+
print(f"⚡ UNWRAPPED VALUE: {tolerance_result}")
|
|
3698
|
+
|
|
3699
|
+
# If tolerance block returns true/truthy, allow it
|
|
3700
|
+
if is_truthy(tolerance_result):
|
|
3701
|
+
print(f"⚡ TOLERANCE APPROVED")
|
|
3702
|
+
debug_log("eval_require_statement", "Tolerance logic approved - allowing requirement")
|
|
3703
|
+
return NULL
|
|
3704
|
+
|
|
3705
|
+
# If tolerance block returns false, requirement still fails
|
|
3706
|
+
print(f"⚡ TOLERANCE REJECTED")
|
|
3707
|
+
debug_log("eval_require_statement", "Tolerance logic rejected - requirement fails")
|
|
3708
|
+
# Fall through to error below
|
|
3709
|
+
|
|
3710
|
+
# Evaluate error message
|
|
3711
|
+
message = "Requirement not met"
|
|
3712
|
+
if node.message:
|
|
3713
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
3714
|
+
if isinstance(msg_val, String):
|
|
3715
|
+
message = msg_val.value
|
|
3716
|
+
elif not is_error(msg_val):
|
|
3717
|
+
message = str(msg_val.inspect() if hasattr(msg_val, 'inspect') else msg_val)
|
|
3718
|
+
|
|
3719
|
+
# Trigger revert
|
|
3720
|
+
debug_log("eval_require_statement", f"REVERT: {message}")
|
|
3721
|
+
return EvaluationError(f"Requirement failed: {message}", stack_trace=stack_trace)
|
|
3722
|
+
|
|
3723
|
+
debug_log("eval_require_statement", "Requirement satisfied")
|
|
3724
|
+
return NULL
|
|
3725
|
+
|
|
3726
|
+
# No condition or special type
|
|
3727
|
+
return EvaluationError("Invalid require statement: missing condition")
|
|
3728
|
+
|
|
3729
|
+
def _eval_require_file(self, node, env, stack_trace):
|
|
3730
|
+
"""Evaluate file dependency requirement."""
|
|
3731
|
+
from ..object import Boolean, String
|
|
3732
|
+
import os
|
|
3733
|
+
|
|
3734
|
+
file_path = node.file_path
|
|
3735
|
+
debug_log("_eval_require_file", f"Checking if {file_path} is imported")
|
|
3736
|
+
|
|
3737
|
+
# Check if file was imported
|
|
3738
|
+
# Look for the file in imported modules
|
|
3739
|
+
imported_files = env.get('__imported_files__') if hasattr(env, 'get') else set()
|
|
3740
|
+
|
|
3741
|
+
# Also check if file exists
|
|
3742
|
+
file_exists = os.path.exists(file_path)
|
|
3743
|
+
file_imported = file_path in imported_files if isinstance(imported_files, set) else False
|
|
3744
|
+
|
|
3745
|
+
if not file_imported and not file_exists:
|
|
3746
|
+
message = f"Required file '{file_path}' not imported"
|
|
3747
|
+
if node.message:
|
|
3748
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
3749
|
+
if isinstance(msg_val, String):
|
|
3750
|
+
message = msg_val.value
|
|
3751
|
+
|
|
3752
|
+
return EvaluationError(f"File dependency: {message}", stack_trace=stack_trace)
|
|
3753
|
+
|
|
3754
|
+
debug_log("_eval_require_file", f"File {file_path} available")
|
|
3755
|
+
return NULL
|
|
3756
|
+
|
|
3757
|
+
def _eval_require_module(self, node, env, stack_trace):
|
|
3758
|
+
"""Evaluate module dependency requirement."""
|
|
3759
|
+
from ..object import Boolean, String
|
|
3760
|
+
|
|
3761
|
+
module_name = node.module_name
|
|
3762
|
+
debug_log("_eval_require_module", f"Checking if module '{module_name}' is available")
|
|
3763
|
+
|
|
3764
|
+
# Check if module is loaded/available
|
|
3765
|
+
# Look in environment for the module
|
|
3766
|
+
module_available = False
|
|
3767
|
+
|
|
3768
|
+
if hasattr(env, 'get'):
|
|
3769
|
+
module_obj = env.get(module_name)
|
|
3770
|
+
module_available = module_obj is not None
|
|
3771
|
+
|
|
3772
|
+
# Also check Python sys.modules for Python modules
|
|
3773
|
+
if not module_available:
|
|
3774
|
+
import sys
|
|
3775
|
+
module_available = module_name in sys.modules
|
|
3776
|
+
|
|
3777
|
+
if not module_available:
|
|
3778
|
+
message = f"Required module '{module_name}' not available"
|
|
3779
|
+
if node.message:
|
|
3780
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
3781
|
+
if isinstance(msg_val, String):
|
|
3782
|
+
message = msg_val.value
|
|
3783
|
+
|
|
3784
|
+
return EvaluationError(f"Module dependency: {message}", stack_trace=stack_trace)
|
|
3785
|
+
|
|
3786
|
+
debug_log("_eval_require_module", f"Module '{module_name}' available")
|
|
3787
|
+
return NULL
|
|
3788
|
+
|
|
3789
|
+
def _eval_require_resource(self, node, env, stack_trace):
|
|
3790
|
+
"""Evaluate resource requirement (balance, gas, prerequisites)."""
|
|
3791
|
+
from ..object import Boolean, String
|
|
3792
|
+
|
|
3793
|
+
req_type = node.requirement_type
|
|
3794
|
+
debug_log("_eval_require_resource", f"Checking {req_type} requirement")
|
|
3795
|
+
|
|
3796
|
+
# Evaluate condition
|
|
3797
|
+
condition = self.eval_node(node.condition, env, stack_trace)
|
|
3798
|
+
if is_error(condition):
|
|
3799
|
+
return condition
|
|
3800
|
+
|
|
3801
|
+
# Check condition
|
|
3802
|
+
if not is_truthy(condition):
|
|
3803
|
+
# Execute tolerance block if provided
|
|
3804
|
+
if node.tolerance_block:
|
|
3805
|
+
debug_log("_eval_require_resource", f"{req_type} requirement not met - checking tolerance")
|
|
3806
|
+
tolerance_result = self.eval_node(node.tolerance_block, env, stack_trace)
|
|
3807
|
+
|
|
3808
|
+
if is_error(tolerance_result):
|
|
3809
|
+
return tolerance_result
|
|
3810
|
+
|
|
3811
|
+
if is_truthy(tolerance_result):
|
|
3812
|
+
debug_log("_eval_require_resource", f"Tolerance approved for {req_type}")
|
|
3813
|
+
return NULL
|
|
3814
|
+
|
|
3815
|
+
# Requirement not met
|
|
3816
|
+
message = f"{req_type.capitalize()} requirement not met"
|
|
3817
|
+
if node.message:
|
|
3818
|
+
msg_val = self.eval_node(node.message, env, stack_trace)
|
|
3819
|
+
if isinstance(msg_val, String):
|
|
3820
|
+
message = msg_val.value
|
|
3821
|
+
|
|
3822
|
+
return EvaluationError(f"Resource requirement: {message}", stack_trace=stack_trace)
|
|
3823
|
+
|
|
3824
|
+
debug_log("_eval_require_resource", f"{req_type} requirement satisfied")
|
|
3825
|
+
return NULL
|
|
3826
|
+
|
|
3827
|
+
def eval_revert_statement(self, node, env, stack_trace):
|
|
3828
|
+
"""Evaluate revert statement - rollback transaction.
|
|
3829
|
+
|
|
3830
|
+
revert();
|
|
3831
|
+
revert("Unauthorized");
|
|
3832
|
+
"""
|
|
3833
|
+
debug_log("eval_revert_statement", "Reverting transaction")
|
|
3834
|
+
|
|
3835
|
+
# Evaluate revert reason if provided
|
|
3836
|
+
reason = "Transaction reverted"
|
|
3837
|
+
if node.reason:
|
|
3838
|
+
reason_val = self.eval_node(node.reason, env, stack_trace)
|
|
3839
|
+
if isinstance(reason_val, String):
|
|
3840
|
+
reason = reason_val.value
|
|
3841
|
+
elif not is_error(reason_val):
|
|
3842
|
+
reason = str(reason_val.inspect() if hasattr(reason_val, 'inspect') else reason_val)
|
|
3843
|
+
|
|
3844
|
+
debug_log("eval_revert_statement", f"REVERT: {reason}")
|
|
3845
|
+
return EvaluationError(f"Transaction reverted: {reason}", stack_trace=stack_trace)
|
|
3846
|
+
|
|
3847
|
+
def eval_limit_statement(self, node, env, stack_trace):
|
|
3848
|
+
"""Evaluate limit statement - set gas limit.
|
|
3849
|
+
|
|
3850
|
+
limit(5000);
|
|
3851
|
+
"""
|
|
3852
|
+
from ..blockchain import get_current_tx
|
|
3853
|
+
|
|
3854
|
+
debug_log("eval_limit_statement", "Setting gas limit")
|
|
3855
|
+
|
|
3856
|
+
# Evaluate gas limit amount
|
|
3857
|
+
limit_val = self.eval_node(node.amount, env, stack_trace)
|
|
3858
|
+
if is_error(limit_val):
|
|
3859
|
+
return limit_val
|
|
3860
|
+
|
|
3861
|
+
# Extract numeric value
|
|
3862
|
+
if isinstance(limit_val, Integer):
|
|
3863
|
+
limit_amount = limit_val.value
|
|
3864
|
+
else:
|
|
3865
|
+
return EvaluationError(f"Gas limit must be an integer, got {type(limit_val).__name__}")
|
|
3866
|
+
|
|
3867
|
+
# Get current transaction context
|
|
3868
|
+
tx = get_current_tx()
|
|
3869
|
+
if tx:
|
|
3870
|
+
tx.gas_limit = limit_amount
|
|
3871
|
+
debug_log("eval_limit_statement", f"Set gas limit to {limit_amount}")
|
|
3872
|
+
else:
|
|
3873
|
+
debug_log("eval_limit_statement", "No active TX context, limit statement ignored")
|
|
3874
|
+
|
|
3875
|
+
return NULL
|
|
3876
|
+
|
|
3877
|
+
# === BLOCKCHAIN EXPRESSION EVALUATION ===
|
|
3878
|
+
|
|
3879
|
+
def eval_tx_expression(self, node, env, stack_trace):
|
|
3880
|
+
"""Evaluate TX expression - access transaction context.
|
|
3881
|
+
|
|
3882
|
+
TX.caller
|
|
3883
|
+
TX.timestamp
|
|
3884
|
+
TX.gas_remaining
|
|
3885
|
+
"""
|
|
3886
|
+
from ..blockchain import get_current_tx
|
|
3887
|
+
|
|
3888
|
+
tx = get_current_tx()
|
|
3889
|
+
if not tx:
|
|
3890
|
+
debug_log("eval_tx_expression", "No active TX context")
|
|
3891
|
+
return NULL
|
|
3892
|
+
|
|
3893
|
+
# If no property specified, return the TX object itself
|
|
3894
|
+
if not node.property_name:
|
|
3895
|
+
# Wrap TX context as Zexus object
|
|
3896
|
+
return _python_to_zexus(tx)
|
|
3897
|
+
|
|
3898
|
+
# Access specific property
|
|
3899
|
+
prop = node.property_name
|
|
3900
|
+
if prop == "caller":
|
|
3901
|
+
return String(tx.caller)
|
|
3902
|
+
elif prop == "timestamp":
|
|
3903
|
+
return Integer(tx.timestamp)
|
|
3904
|
+
elif prop == "block_hash":
|
|
3905
|
+
return String(tx.block_hash)
|
|
3906
|
+
elif prop == "gas_limit":
|
|
3907
|
+
return Integer(tx.gas_limit)
|
|
3908
|
+
elif prop == "gas_used":
|
|
3909
|
+
return Integer(tx.gas_used)
|
|
3910
|
+
elif prop == "gas_remaining":
|
|
3911
|
+
return Integer(tx.gas_remaining)
|
|
3912
|
+
else:
|
|
3913
|
+
return EvaluationError(f"Unknown TX property: {prop}")
|
|
3914
|
+
|
|
3915
|
+
def eval_hash_expression(self, node, env, stack_trace):
|
|
3916
|
+
"""Evaluate hash expression - cryptographic hashing.
|
|
3917
|
+
|
|
3918
|
+
hash(data, "SHA256")
|
|
3919
|
+
hash(message, "KECCAK256")
|
|
3920
|
+
"""
|
|
3921
|
+
from ..blockchain.crypto import CryptoPlugin
|
|
3922
|
+
|
|
3923
|
+
# Evaluate data to hash
|
|
3924
|
+
data_val = self.eval_node(node.data, env, stack_trace)
|
|
3925
|
+
if is_error(data_val):
|
|
3926
|
+
return data_val
|
|
3927
|
+
|
|
3928
|
+
# Evaluate algorithm
|
|
3929
|
+
algorithm_val = self.eval_node(node.algorithm, env, stack_trace)
|
|
3930
|
+
if is_error(algorithm_val):
|
|
3931
|
+
return algorithm_val
|
|
3932
|
+
|
|
3933
|
+
# Convert to string values
|
|
3934
|
+
if isinstance(data_val, String):
|
|
3935
|
+
data = data_val.value
|
|
3936
|
+
else:
|
|
3937
|
+
data = str(data_val.inspect() if hasattr(data_val, 'inspect') else data_val)
|
|
3938
|
+
|
|
3939
|
+
if isinstance(algorithm_val, String):
|
|
3940
|
+
algorithm = algorithm_val.value
|
|
3941
|
+
else:
|
|
3942
|
+
algorithm = str(algorithm_val)
|
|
3943
|
+
|
|
3944
|
+
# Perform hashing
|
|
3945
|
+
try:
|
|
3946
|
+
hash_result = CryptoPlugin.hash_data(data, algorithm)
|
|
3947
|
+
return String(hash_result)
|
|
3948
|
+
except Exception as e:
|
|
3949
|
+
return EvaluationError(f"Hash error: {str(e)}")
|
|
3950
|
+
|
|
3951
|
+
def eval_signature_expression(self, node, env, stack_trace):
|
|
3952
|
+
"""Evaluate signature expression - create digital signature.
|
|
3953
|
+
|
|
3954
|
+
signature(data, private_key, "ECDSA")
|
|
3955
|
+
"""
|
|
3956
|
+
from ..blockchain.crypto import CryptoPlugin
|
|
3957
|
+
|
|
3958
|
+
# Evaluate arguments
|
|
3959
|
+
data_val = self.eval_node(node.data, env, stack_trace)
|
|
3960
|
+
if is_error(data_val):
|
|
3961
|
+
return data_val
|
|
3962
|
+
|
|
3963
|
+
key_val = self.eval_node(node.private_key, env, stack_trace)
|
|
3964
|
+
if is_error(key_val):
|
|
3965
|
+
return key_val
|
|
3966
|
+
|
|
3967
|
+
algorithm_val = self.eval_node(node.algorithm, env, stack_trace) if node.algorithm else String("ECDSA")
|
|
3968
|
+
if is_error(algorithm_val):
|
|
3969
|
+
return algorithm_val
|
|
3970
|
+
|
|
3971
|
+
# Convert to string values
|
|
3972
|
+
data = data_val.value if isinstance(data_val, String) else str(data_val)
|
|
3973
|
+
private_key = key_val.value if isinstance(key_val, String) else str(key_val)
|
|
3974
|
+
algorithm = algorithm_val.value if isinstance(algorithm_val, String) else str(algorithm_val)
|
|
3975
|
+
|
|
3976
|
+
# Create signature
|
|
3977
|
+
try:
|
|
3978
|
+
signature = CryptoPlugin.sign_data(data, private_key, algorithm)
|
|
3979
|
+
return String(signature)
|
|
3980
|
+
except Exception as e:
|
|
3981
|
+
return EvaluationError(f"Signature error: {str(e)}")
|
|
3982
|
+
|
|
3983
|
+
def eval_verify_signature_expression(self, node, env, stack_trace):
|
|
3984
|
+
"""Evaluate verify_sig expression - verify digital signature.
|
|
3985
|
+
|
|
3986
|
+
verify_sig(data, signature, public_key, "ECDSA")
|
|
3987
|
+
"""
|
|
3988
|
+
from ..blockchain.crypto import CryptoPlugin
|
|
3989
|
+
|
|
3990
|
+
# Evaluate arguments
|
|
3991
|
+
data_val = self.eval_node(node.data, env, stack_trace)
|
|
3992
|
+
if is_error(data_val):
|
|
3993
|
+
return data_val
|
|
3994
|
+
|
|
3995
|
+
sig_val = self.eval_node(node.signature, env, stack_trace)
|
|
3996
|
+
if is_error(sig_val):
|
|
3997
|
+
return sig_val
|
|
3998
|
+
|
|
3999
|
+
key_val = self.eval_node(node.public_key, env, stack_trace)
|
|
4000
|
+
if is_error(key_val):
|
|
4001
|
+
return key_val
|
|
4002
|
+
|
|
4003
|
+
algorithm_val = self.eval_node(node.algorithm, env, stack_trace) if node.algorithm else String("ECDSA")
|
|
4004
|
+
if is_error(algorithm_val):
|
|
4005
|
+
return algorithm_val
|
|
4006
|
+
|
|
4007
|
+
# Convert to string values
|
|
4008
|
+
data = data_val.value if isinstance(data_val, String) else str(data_val)
|
|
4009
|
+
signature = sig_val.value if isinstance(sig_val, String) else str(sig_val)
|
|
4010
|
+
public_key = key_val.value if isinstance(key_val, String) else str(key_val)
|
|
4011
|
+
algorithm = algorithm_val.value if isinstance(algorithm_val, String) else str(algorithm_val)
|
|
4012
|
+
|
|
4013
|
+
# Verify signature
|
|
4014
|
+
try:
|
|
4015
|
+
is_valid = CryptoPlugin.verify_signature(data, signature, public_key, algorithm)
|
|
4016
|
+
return TRUE if is_valid else FALSE
|
|
4017
|
+
except Exception as e:
|
|
4018
|
+
return EvaluationError(f"Signature verification error: {str(e)}")
|
|
4019
|
+
|
|
4020
|
+
def eval_gas_expression(self, node, env, stack_trace):
|
|
4021
|
+
"""Evaluate gas expression - access gas tracking.
|
|
4022
|
+
|
|
4023
|
+
gas.used
|
|
4024
|
+
gas.remaining
|
|
4025
|
+
gas.limit
|
|
4026
|
+
"""
|
|
4027
|
+
from ..blockchain import get_current_tx
|
|
4028
|
+
|
|
4029
|
+
tx = get_current_tx()
|
|
4030
|
+
if not tx:
|
|
4031
|
+
debug_log("eval_gas_expression", "No active TX context")
|
|
4032
|
+
return NULL
|
|
4033
|
+
|
|
4034
|
+
# If no property specified, return gas info as object
|
|
4035
|
+
if not node.property_name:
|
|
4036
|
+
gas_info = {
|
|
4037
|
+
"limit": Integer(tx.gas_limit),
|
|
4038
|
+
"used": Integer(tx.gas_used),
|
|
4039
|
+
"remaining": Integer(tx.gas_remaining)
|
|
4040
|
+
}
|
|
4041
|
+
return Map(gas_info)
|
|
4042
|
+
|
|
4043
|
+
# Access specific property
|
|
4044
|
+
prop = node.property_name
|
|
4045
|
+
if prop == "limit":
|
|
4046
|
+
return Integer(tx.gas_limit)
|
|
4047
|
+
elif prop == "used":
|
|
4048
|
+
return Integer(tx.gas_used)
|
|
4049
|
+
elif prop == "remaining":
|
|
4050
|
+
return Integer(tx.gas_remaining)
|
|
4051
|
+
else:
|
|
4052
|
+
return EvaluationError(f"Unknown gas property: {prop}")
|
|
4053
|
+
|
|
4054
|
+
def eval_protocol_statement(self, node, env, stack_trace):
|
|
4055
|
+
"""Evaluate PROTOCOL statement - define an interface/trait.
|
|
4056
|
+
|
|
4057
|
+
protocol Transferable {
|
|
4058
|
+
action transfer(to, amount)
|
|
4059
|
+
action balance() -> int
|
|
4060
|
+
}
|
|
4061
|
+
"""
|
|
4062
|
+
from ..object import String as StringObj
|
|
4063
|
+
|
|
4064
|
+
# Create protocol definition (similar to entity but for interfaces)
|
|
4065
|
+
protocol_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
4066
|
+
|
|
4067
|
+
# Store method signatures
|
|
4068
|
+
methods = {}
|
|
4069
|
+
for method in node.methods:
|
|
4070
|
+
method_name = method.name.value if hasattr(method.name, 'value') else str(method.name)
|
|
4071
|
+
methods[method_name] = {
|
|
4072
|
+
'params': method.parameters if hasattr(method, 'parameters') else [],
|
|
4073
|
+
'return_type': method.return_type if hasattr(method, 'return_type') else None
|
|
4074
|
+
}
|
|
4075
|
+
|
|
4076
|
+
# Create protocol object
|
|
4077
|
+
protocol_def = {
|
|
4078
|
+
'type': 'protocol',
|
|
4079
|
+
'name': protocol_name,
|
|
4080
|
+
'methods': methods
|
|
4081
|
+
}
|
|
4082
|
+
|
|
4083
|
+
# Store in environment
|
|
4084
|
+
env.set(protocol_name, protocol_def)
|
|
4085
|
+
|
|
4086
|
+
return StringObj(f"Protocol '{protocol_name}' defined with {len(methods)} methods")
|
|
4087
|
+
|
|
4088
|
+
def eval_persistent_statement(self, node, env, stack_trace):
|
|
4089
|
+
"""Evaluate PERSISTENT statement - declare persistent storage in contracts.
|
|
4090
|
+
|
|
4091
|
+
persistent storage balances: map
|
|
4092
|
+
persistent storage owner: string = "0x0"
|
|
4093
|
+
"""
|
|
4094
|
+
from ..object import NULL
|
|
4095
|
+
|
|
4096
|
+
# Get variable name
|
|
4097
|
+
var_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
4098
|
+
|
|
4099
|
+
# Evaluate initial value if provided
|
|
4100
|
+
value = NULL
|
|
4101
|
+
if node.initial_value:
|
|
4102
|
+
value = self.eval_node(node.initial_value, env, stack_trace)
|
|
4103
|
+
if is_error(value):
|
|
4104
|
+
return value
|
|
4105
|
+
|
|
4106
|
+
# Mark as persistent in environment (special marker)
|
|
4107
|
+
env.set(f"__persistent_{var_name}__", True)
|
|
4108
|
+
|
|
4109
|
+
# Store the actual value
|
|
4110
|
+
env.set(var_name, value)
|
|
4111
|
+
|
|
4112
|
+
return NULL
|
|
4113
|
+
|
|
4114
|
+
def eval_this_expression(self, node, env, stack_trace):
|
|
4115
|
+
"""Evaluate THIS expression - reference to current contract instance, data method instance, or entity instance.
|
|
4116
|
+
|
|
4117
|
+
this.balances[account]
|
|
4118
|
+
this.owner = TX.caller
|
|
4119
|
+
this.width # in data method
|
|
4120
|
+
this.logger.log() # in entity method
|
|
4121
|
+
"""
|
|
4122
|
+
from ..object import EvaluationError
|
|
4123
|
+
|
|
4124
|
+
# Look for current contract instance in environment
|
|
4125
|
+
contract_instance = env.get("__contract_instance__")
|
|
4126
|
+
|
|
4127
|
+
if contract_instance is not None:
|
|
4128
|
+
return contract_instance
|
|
4129
|
+
|
|
4130
|
+
# For data methods and entity methods, look for 'this' binding
|
|
4131
|
+
instance = env.get("this")
|
|
4132
|
+
|
|
4133
|
+
if instance is not None:
|
|
4134
|
+
return instance
|
|
4135
|
+
|
|
4136
|
+
return EvaluationError("'this' can only be used inside a contract, data method, or entity method")
|
|
4137
|
+
|
|
4138
|
+
def eval_emit_statement(self, node, env, stack_trace):
|
|
4139
|
+
"""Evaluate EMIT statement - emit an event.
|
|
4140
|
+
|
|
4141
|
+
emit Transfer(from, to, amount);
|
|
4142
|
+
emit StateChange("balance_updated", new_balance);
|
|
4143
|
+
"""
|
|
4144
|
+
from ..object import NULL, String
|
|
4145
|
+
|
|
4146
|
+
# Get event name
|
|
4147
|
+
event_name = node.event_name.value if hasattr(node.event_name, 'value') else str(node.event_name)
|
|
4148
|
+
|
|
4149
|
+
# Evaluate arguments
|
|
4150
|
+
args = []
|
|
4151
|
+
for arg in node.arguments:
|
|
4152
|
+
val = self.eval_node(arg, env, stack_trace)
|
|
4153
|
+
if is_error(val):
|
|
4154
|
+
return val
|
|
4155
|
+
args.append(val)
|
|
4156
|
+
|
|
4157
|
+
# Get or create event log in environment
|
|
4158
|
+
event_log = env.get("__event_log__")
|
|
4159
|
+
if event_log is None:
|
|
4160
|
+
event_log = []
|
|
4161
|
+
env.set("__event_log__", event_log)
|
|
4162
|
+
|
|
4163
|
+
# Add event to log
|
|
4164
|
+
event_data = {
|
|
4165
|
+
"event": event_name,
|
|
4166
|
+
"args": args
|
|
4167
|
+
}
|
|
4168
|
+
event_log.append(event_data)
|
|
4169
|
+
|
|
4170
|
+
# Print event for debugging (optional)
|
|
4171
|
+
args_str = ", ".join(str(arg.inspect() if hasattr(arg, 'inspect') else arg) for arg in args)
|
|
4172
|
+
print(f"📢 Event: {event_name}({args_str})")
|
|
4173
|
+
|
|
4174
|
+
return NULL
|
|
4175
|
+
|
|
4176
|
+
def eval_modifier_declaration(self, node, env, stack_trace):
|
|
4177
|
+
"""Evaluate MODIFIER declaration - store modifier for later use.
|
|
4178
|
+
|
|
4179
|
+
modifier onlyOwner {
|
|
4180
|
+
require(TX.caller == owner, \"Not owner\");
|
|
4181
|
+
}
|
|
4182
|
+
"""
|
|
4183
|
+
from ..object import Modifier, NULL
|
|
4184
|
+
|
|
4185
|
+
# Get modifier name
|
|
4186
|
+
modifier_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
4187
|
+
|
|
4188
|
+
# Create modifier object
|
|
4189
|
+
modifier = Modifier(
|
|
4190
|
+
name=modifier_name,
|
|
4191
|
+
parameters=node.parameters,
|
|
4192
|
+
body=node.body,
|
|
4193
|
+
env=env
|
|
4194
|
+
)
|
|
4195
|
+
|
|
4196
|
+
# Store modifier in environment
|
|
4197
|
+
env.set(modifier_name, modifier)
|
|
4198
|
+
|
|
4199
|
+
return NULL
|
|
4200
|
+
# === CONCURRENCY STATEMENT EVALUATORS ===
|
|
4201
|
+
|
|
4202
|
+
def eval_channel_statement(self, node, env, stack_trace):
|
|
4203
|
+
"""Evaluate channel declaration: channel<T> name [= capacity]
|
|
4204
|
+
|
|
4205
|
+
Examples:
|
|
4206
|
+
channel<integer> numbers
|
|
4207
|
+
channel<string> messages = 10
|
|
4208
|
+
"""
|
|
4209
|
+
from ..concurrency_system import Channel
|
|
4210
|
+
|
|
4211
|
+
# Get channel name
|
|
4212
|
+
channel_name = node.name.value if hasattr(node.name, 'value') else str(node.name)
|
|
4213
|
+
|
|
4214
|
+
# Get element type (optional)
|
|
4215
|
+
element_type = node.element_type if hasattr(node, 'element_type') else None
|
|
4216
|
+
if element_type and hasattr(element_type, 'value'):
|
|
4217
|
+
element_type = element_type.value
|
|
4218
|
+
|
|
4219
|
+
# Get capacity (optional, default 0 = unbuffered)
|
|
4220
|
+
capacity = 0
|
|
4221
|
+
if hasattr(node, 'capacity') and node.capacity:
|
|
4222
|
+
cap_val = self.eval_node(node.capacity, env, stack_trace)
|
|
4223
|
+
if is_error(cap_val):
|
|
4224
|
+
return cap_val
|
|
4225
|
+
if isinstance(cap_val, Integer):
|
|
4226
|
+
capacity = cap_val.value
|
|
4227
|
+
|
|
4228
|
+
# Create channel object
|
|
4229
|
+
channel = Channel(
|
|
4230
|
+
name=channel_name,
|
|
4231
|
+
element_type=element_type,
|
|
4232
|
+
capacity=capacity
|
|
4233
|
+
)
|
|
4234
|
+
|
|
4235
|
+
# Store in environment
|
|
4236
|
+
env.set(channel_name, channel)
|
|
4237
|
+
|
|
4238
|
+
debug_log("eval_channel_statement", f"Created channel '{channel_name}' (capacity={capacity})")
|
|
4239
|
+
return NULL
|
|
4240
|
+
|
|
4241
|
+
def eval_send_statement(self, node, env, stack_trace):
|
|
4242
|
+
"""Evaluate send statement: send(channel, value)
|
|
4243
|
+
|
|
4244
|
+
This is for the statement form, not the builtin function.
|
|
4245
|
+
"""
|
|
4246
|
+
# Evaluate channel
|
|
4247
|
+
channel = self.eval_node(node.channel_expr, env, stack_trace)
|
|
4248
|
+
if is_error(channel):
|
|
4249
|
+
return channel
|
|
4250
|
+
|
|
4251
|
+
# Evaluate value
|
|
4252
|
+
value = self.eval_node(node.value_expr, env, stack_trace)
|
|
4253
|
+
if is_error(value):
|
|
4254
|
+
return value
|
|
4255
|
+
|
|
4256
|
+
# Send to channel
|
|
4257
|
+
if not hasattr(channel, 'send'):
|
|
4258
|
+
return EvaluationError(f"send target is not a channel: {type(channel).__name__}")
|
|
4259
|
+
|
|
4260
|
+
try:
|
|
4261
|
+
channel.send(value, timeout=5.0)
|
|
4262
|
+
return NULL
|
|
4263
|
+
except Exception as e:
|
|
4264
|
+
return EvaluationError(f"send error: {str(e)}")
|
|
4265
|
+
|
|
4266
|
+
def eval_receive_statement(self, node, env, stack_trace):
|
|
4267
|
+
"""Evaluate receive statement: value = receive(channel)
|
|
4268
|
+
|
|
4269
|
+
This is for the statement form, not the builtin function.
|
|
4270
|
+
"""
|
|
4271
|
+
# Evaluate channel
|
|
4272
|
+
channel = self.eval_node(node.channel_expr, env, stack_trace)
|
|
4273
|
+
if is_error(channel):
|
|
4274
|
+
return channel
|
|
4275
|
+
|
|
4276
|
+
# Receive from channel
|
|
4277
|
+
if not hasattr(channel, 'receive'):
|
|
4278
|
+
return EvaluationError(f"receive target is not a channel: {type(channel).__name__}")
|
|
4279
|
+
|
|
4280
|
+
try:
|
|
4281
|
+
value = channel.receive(timeout=5.0)
|
|
4282
|
+
return value if value is not None else NULL
|
|
4283
|
+
except Exception as e:
|
|
4284
|
+
return EvaluationError(f"receive error: {str(e)}")
|
|
4285
|
+
|
|
4286
|
+
def eval_atomic_statement(self, node, env, stack_trace):
|
|
4287
|
+
"""Evaluate atomic block: atomic { statements }
|
|
4288
|
+
|
|
4289
|
+
Ensures all statements execute atomically (thread-safe).
|
|
4290
|
+
Uses a global lock to serialize atomic blocks.
|
|
4291
|
+
"""
|
|
4292
|
+
from threading import Lock
|
|
4293
|
+
|
|
4294
|
+
# Global atomic lock (class-level to share across all evaluators)
|
|
4295
|
+
if not hasattr(self.__class__, '_atomic_lock'):
|
|
4296
|
+
self.__class__._atomic_lock = Lock()
|
|
4297
|
+
|
|
4298
|
+
# Execute block under lock
|
|
4299
|
+
with self.__class__._atomic_lock:
|
|
4300
|
+
result = self.eval_node(node.body, env, stack_trace)
|
|
4301
|
+
if is_error(result):
|
|
4302
|
+
return result
|
|
4303
|
+
return result if not isinstance(result, ReturnValue) else result
|