tensorify-runtime 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. tensorify_runtime-0.1.0/MANIFEST.in +36 -0
  2. tensorify_runtime-0.1.0/PKG-INFO +16 -0
  3. tensorify_runtime-0.1.0/pyproject.toml +36 -0
  4. tensorify_runtime-0.1.0/setup.cfg +4 -0
  5. tensorify_runtime-0.1.0/src/tensorify/plugins/discord_send.py +36 -0
  6. tensorify_runtime-0.1.0/src/tensorify/plugins/discord_trigger.py +14 -0
  7. tensorify_runtime-0.1.0/src/tensorify/plugins/file_writer.py +22 -0
  8. tensorify_runtime-0.1.0/src/tensorify/plugins/http_request.py +49 -0
  9. tensorify_runtime-0.1.0/src/tensorify/plugins/http_response.py +14 -0
  10. tensorify_runtime-0.1.0/src/tensorify/plugins/json_transform.py +46 -0
  11. tensorify_runtime-0.1.0/src/tensorify/plugins/python_script.py +54 -0
  12. tensorify_runtime-0.1.0/src/tensorify/plugins/telegram_send.py +32 -0
  13. tensorify_runtime-0.1.0/src/tensorify/plugins/telegram_trigger.py +13 -0
  14. tensorify_runtime-0.1.0/src/tensorify/runtime/__init__.py +31 -0
  15. tensorify_runtime-0.1.0/src/tensorify/runtime/core.py +556 -0
  16. tensorify_runtime-0.1.0/src/tensorify/runtime/expression.py +55 -0
  17. tensorify_runtime-0.1.0/src/tensorify/runtime/state.py +106 -0
  18. tensorify_runtime-0.1.0/src/tensorify/runtime/subworkflow.py +115 -0
  19. tensorify_runtime-0.1.0/src/tensorify_runtime.egg-info/PKG-INFO +16 -0
  20. tensorify_runtime-0.1.0/src/tensorify_runtime.egg-info/SOURCES.txt +21 -0
  21. tensorify_runtime-0.1.0/src/tensorify_runtime.egg-info/dependency_links.txt +1 -0
  22. tensorify_runtime-0.1.0/src/tensorify_runtime.egg-info/requires.txt +9 -0
  23. tensorify_runtime-0.1.0/src/tensorify_runtime.egg-info/top_level.txt +1 -0
@@ -0,0 +1,36 @@
1
+ # MANIFEST.in for tensorify-runtime Python package
2
+ # Controls what files are included in the published PyPI package
3
+
4
+ # Include Python source
5
+ include src/tensorify/runtime/*.py
6
+ include src/tensorify/runtime/**/*.py
7
+
8
+ # Include metadata
9
+ include pyproject.toml
10
+ include README.md
11
+
12
+ # Exclude development artifacts
13
+ exclude *.pyc
14
+ exclude *.pyo
15
+ exclude __pycache__
16
+ exclude **/__pycache__
17
+ exclude **/*.pyc
18
+ exclude **/*.pyo
19
+
20
+ # Exclude build artifacts
21
+ exclude build/
22
+ exclude dist/
23
+ exclude *.egg-info/
24
+ exclude *.egg
25
+
26
+ # Exclude development files
27
+ exclude .git/
28
+ exclude .gitignore
29
+ exclude .pytest_cache/
30
+ exclude .mypy_cache/
31
+
32
+ # Exclude tests
33
+ exclude test/
34
+ exclude tests/
35
+ exclude **/test_*.py
36
+ exclude **/*_test.py
@@ -0,0 +1,16 @@
1
+ Metadata-Version: 2.4
2
+ Name: tensorify-runtime
3
+ Version: 0.1.0
4
+ Summary: Runtime engine for Tensorify workflows (Universal Code Editor)
5
+ Author-email: Tensorify Team <dev@tensorify.io>
6
+ Classifier: Programming Language :: Python :: 3
7
+ Classifier: Operating System :: OS Independent
8
+ Requires-Python: >=3.9
9
+ Requires-Dist: RestrictedPython>=6.0
10
+ Requires-Dist: pydantic>=2.0
11
+ Requires-Dist: redis>=5.0
12
+ Requires-Dist: aiohttp>=3.9
13
+ Provides-Extra: dev
14
+ Requires-Dist: pytest; extra == "dev"
15
+ Requires-Dist: black; extra == "dev"
16
+ Requires-Dist: mypy; extra == "dev"
@@ -0,0 +1,36 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "tensorify-runtime"
7
+ version = "0.1.0"
8
+ description = "Runtime engine for Tensorify workflows (Universal Code Editor)"
9
+ authors = [
10
+ { name = "Tensorify Team", email = "dev@tensorify.io" },
11
+ ]
12
+ classifiers = [
13
+ "Programming Language :: Python :: 3",
14
+ "Operating System :: OS Independent",
15
+ ]
16
+ requires-python = ">=3.9"
17
+ dependencies = [
18
+ "RestrictedPython>=6.0",
19
+ "pydantic>=2.0",
20
+ "redis>=5.0",
21
+ "aiohttp>=3.9",
22
+ ]
23
+
24
+ [project.optional-dependencies]
25
+ dev = [
26
+ "pytest",
27
+ "black",
28
+ "mypy"
29
+ ]
30
+
31
+ [tool.setuptools.package-dir]
32
+ "" = "src"
33
+
34
+ [tool.setuptools.packages.find]
35
+ where = ["src"]
36
+ namespaces = true
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,36 @@
1
+
2
+ import aiohttp
3
+ from typing import Any, Dict
4
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
5
+
6
+ class DiscordSendPlugin:
7
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
8
+ token = ctx.get_secret("DISCORD_TOKEN")
9
+ if not token:
10
+ return PluginOutput(status="error", error="Missing DISCORD_TOKEN secret")
11
+
12
+ channel_id = inputs.get("channel_id")
13
+ content = inputs.get("message_content")
14
+
15
+ if not channel_id or not content:
16
+ return PluginOutput(status="error", error="Missing channel_id or message_content")
17
+
18
+ if token == "MOCK_TOKEN":
19
+ print(f"[Discord Mock] Sending to {channel_id}: {content}")
20
+ return PluginOutput(status="success", data={"id": "mock-message-id", "content": content})
21
+
22
+ url = f"https://discord.com/api/v10/channels/{channel_id}/messages"
23
+ headers = {
24
+ "Authorization": f"Bot {token}",
25
+ "Content-Type": "application/json"
26
+ }
27
+ payload = {"content": str(content)}
28
+
29
+ async with aiohttp.ClientSession() as session:
30
+ async with session.post(url, headers=headers, json=payload) as resp:
31
+ if resp.status in (200, 201):
32
+ data = await resp.json()
33
+ return PluginOutput(status="success", data=data)
34
+ else:
35
+ text = await resp.text()
36
+ return PluginOutput(status="error", error=f"Discord API Error {resp.status}: {text}")
@@ -0,0 +1,14 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+
5
+ class DiscordTriggerPlugin:
6
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
7
+ # In a real scenario, the Runtime passes the trigger event in ctx.inputs
8
+ trigger_data = ctx.inputs.get("discord_event")
9
+
10
+ if not trigger_data:
11
+ # Fallback for testing or if invoked manually
12
+ return PluginOutput(status="error", error="No discord_event found in inputs")
13
+
14
+ return PluginOutput(status="success", data=trigger_data)
@@ -0,0 +1,22 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+
5
+ class FileWriterPlugin:
6
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
7
+ file_path = inputs.get("file_path", "output.txt")
8
+ mode = inputs.get("mode", "w")
9
+ content = inputs.get("content", "")
10
+
11
+ # Convert content to string if not
12
+ if not isinstance(content, str):
13
+ content = str(content)
14
+
15
+ try:
16
+ with open(file_path, mode) as f:
17
+ f.write(content)
18
+
19
+ print(f"[File Writer] Wrote to {file_path}")
20
+ return PluginOutput(status="success", data=file_path)
21
+ except Exception as e:
22
+ return PluginOutput(status="error", error=str(e))
@@ -0,0 +1,49 @@
1
+
2
+ import aiohttp
3
+ import json
4
+ from typing import Any, Dict
5
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
6
+
7
+ class HttpRequestPlugin:
8
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
9
+ url = inputs.get("url")
10
+ method = inputs.get("method", "GET").upper()
11
+ headers_str = inputs.get("headers", "{}")
12
+ body = inputs.get("body")
13
+
14
+ if not url:
15
+ return PluginOutput(status="error", error="Missing URL")
16
+
17
+ try:
18
+ headers = json.loads(headers_str) if isinstance(headers_str, str) else headers_str
19
+ except Exception:
20
+ return PluginOutput(status="error", error="Invalid Headers JSON")
21
+
22
+ # Mock Mode for Testing
23
+ if url == "MOCK_URL":
24
+ print(f"[HTTP Mock] {method} {url}")
25
+ return PluginOutput(status="success", data={"status": 200, "data": {"mock": "result"}, "headers": {}})
26
+
27
+ async with aiohttp.ClientSession() as session:
28
+ try:
29
+ async with session.request(method, url, headers=headers, json=body) as resp:
30
+ try:
31
+ data = await resp.json()
32
+ except:
33
+ data = await resp.text()
34
+
35
+ result_obj = {
36
+ "status": resp.status,
37
+ "data": data,
38
+ "headers": dict(resp.headers)
39
+ }
40
+
41
+ if 200 <= resp.status < 300:
42
+ return PluginOutput(status="success", data=result_obj)
43
+ else:
44
+ # Even if error status, we return as success so workflow can handle logic?
45
+ # Or return error? V3 Philosophy: Let user decide via If condition on status.
46
+ return PluginOutput(status="success", data=result_obj)
47
+
48
+ except Exception as e:
49
+ return PluginOutput(status="error", error=str(e))
@@ -0,0 +1,14 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+
5
+ class HttpResponsePlugin:
6
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
7
+ body = inputs.get("body")
8
+ status = inputs.get("status", 200)
9
+ headers = inputs.get("headers", {})
10
+
11
+ ctx.set_return_value(body, int(status), headers)
12
+
13
+ print(f"[HTTP Response] Set workflow output: Status {status}")
14
+ return PluginOutput(status="success", data={"returned": True})
@@ -0,0 +1,46 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+
5
+ class JsonTransformPlugin:
6
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
7
+ data = inputs.get("data")
8
+ expr = inputs.get("expression", "data")
9
+
10
+ # Inject 'data' into the evaluation context
11
+ # We rely on ctx.eval taking care of restricted execution
12
+ # But ctx.eval uses ctx._variables.
13
+ # We should probably pass a temporary context to eval?
14
+ # core.py `eval` method: `return safe_eval(expression, eval_context)`
15
+ # `eval_context` includes `inputs`, `vars`.
16
+ # So if we want `data` to be available as a variable `data`, we need to inject it.
17
+ # But `core.py` eval constructs context from member variables.
18
+ # It doesn't accept "extra locals".
19
+ #
20
+ # Helper workaround: We set a temporary variable in ctx? No, dangerous concurrency.
21
+ # The `ctx.eval` method in core.py needs to support overrides?
22
+ #
23
+ # Let's check core.py.
24
+ # def eval(self, expression: str) -> Any:
25
+ # ...
26
+ # return safe_eval(expression, eval_context)
27
+ #
28
+ # This means I CANNOT pass extra vars to eval unless I modify core.py.
29
+ # OR I use `safe_eval` directly here in the plugin.
30
+ # Plugin classes sit next to core, but can import safe_eval?
31
+ # Yes, from tensorify.runtime.expression import safe_eval.
32
+
33
+ from tensorify.runtime.expression import safe_eval
34
+
35
+ eval_ctx = ctx._variables.copy()
36
+ eval_ctx["data"] = data
37
+ # Also include standard utils if needed, safe_eval handles it.
38
+
39
+ try:
40
+ print(f"[JsonTransform] Data: {data}, Expr: {expr}")
41
+ result = safe_eval(expr, eval_ctx)
42
+ print(f"[JsonTransform] Result: {result}")
43
+ return PluginOutput(status="success", data=result)
44
+ except Exception as e:
45
+ print(f"[JsonTransform] Error: {e}")
46
+ return PluginOutput(status="error", error=f"Transform error: {str(e)}")
@@ -0,0 +1,54 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+ from RestrictedPython import compile_restricted, safe_globals
5
+
6
+ class PythonScriptPlugin:
7
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
8
+ code_str = inputs.get("code", "")
9
+ script_params = inputs.get("script_params", {})
10
+
11
+ # Prepare globs
12
+ # For "Universal" script, we use standard builtins to allow imports
13
+ # In a real production environment, this should be sandboxed via Docker/WASM,
14
+ # but for V3 MVP "Local Execution", we trust the user code (or warn them).
15
+ script_globals = {
16
+ "__builtins__": __builtins__,
17
+ "__name__": "universal_script",
18
+ "ctx": ctx,
19
+ "print": print
20
+ }
21
+
22
+ # Locals will contain inputs and collect outputs
23
+ loc = {
24
+ "params": script_params,
25
+ "result": None,
26
+ "ctx": ctx # Expose Context for V3 Variable Access (Temporary/Advanced)
27
+ }
28
+
29
+ # Helper to indent user code
30
+ indented_code = "\n".join([" " + line for line in code_str.split("\n")])
31
+
32
+ # Wrapper function
33
+ wrapped_code = f"""
34
+ async def _user_script(ctx, params):
35
+ # Standard globals
36
+ {indented_code}
37
+ """
38
+
39
+ try:
40
+ # Compile standard (unsafe for now, but universal)
41
+ exec(wrapped_code, script_globals, loc)
42
+
43
+ # Execute the async function
44
+ if "_user_script" in loc:
45
+ await loc["_user_script"](ctx, script_params)
46
+
47
+ # Proxy captured output (if any)
48
+ # Note: standard print goes to stdout, so we don't need to capture 'printed' variable unless we redirect stdout.
49
+ # Since we use standard exec, 'print' just prints.
50
+
51
+ return PluginOutput(status="success", data=loc.get("result"))
52
+ except Exception as e:
53
+ print(f"[Script Error] {str(e)}")
54
+ return PluginOutput(status="error", error=f"Script Error: {str(e)}")
@@ -0,0 +1,32 @@
1
+
2
+ import aiohttp
3
+ from typing import Any, Dict
4
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
5
+
6
+ class TelegramSendPlugin:
7
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
8
+ token = ctx.get_secret("TELEGRAM_BOT_TOKEN")
9
+ if not token:
10
+ return PluginOutput(status="error", error="Missing TELEGRAM_BOT_TOKEN")
11
+
12
+ chat_id = inputs.get("chat_id")
13
+ text = inputs.get("text")
14
+
15
+ if not chat_id or not text:
16
+ return PluginOutput(status="error", error="Missing chat_id or text")
17
+
18
+ # Mock Support
19
+ if token == "MOCK_TOKEN":
20
+ print(f"[Telegram Mock] Sending to {chat_id}: {text}")
21
+ return PluginOutput(status="success", data={"ok": True, "result": {"message_id": 123, "text": text}})
22
+
23
+ url = f"https://api.telegram.org/bot{token}/sendMessage"
24
+ payload = {"chat_id": chat_id, "text": str(text)}
25
+
26
+ async with aiohttp.ClientSession() as session:
27
+ async with session.post(url, json=payload) as resp:
28
+ data = await resp.json()
29
+ if resp.status == 200 and data.get("ok"):
30
+ return PluginOutput(status="success", data=data)
31
+ else:
32
+ return PluginOutput(status="error", error=f"Telegram API Error: {data}")
@@ -0,0 +1,13 @@
1
+
2
+ from typing import Any, Dict
3
+ from tensorify.runtime.core import ExecutionContext, PluginOutput
4
+
5
+ class TelegramTriggerPlugin:
6
+ async def run(self, ctx: ExecutionContext, inputs: Dict[str, Any]) -> PluginOutput:
7
+ # Runtime extracts payload from webhook/polling source and puts it in inputs
8
+ tg_event = ctx.inputs.get("telegram_event")
9
+
10
+ if not tg_event:
11
+ return PluginOutput(status="error", error="No telegram_event found in inputs")
12
+
13
+ return PluginOutput(status="success", data=tg_event)
@@ -0,0 +1,31 @@
1
+ from .core import (
2
+ ExecutionContext,
3
+ PluginOutput,
4
+ ActionPluginV3,
5
+ EventSource,
6
+ run_plugin,
7
+ SnapshotCapturer,
8
+ WorkflowReturnSignal
9
+ )
10
+ from .subworkflow import SubworkflowLoader
11
+ from .state import (
12
+ StateManager,
13
+ SQLiteStateManager
14
+ )
15
+ from .expression import safe_eval
16
+
17
+ __all__ = [
18
+ "ExecutionContext",
19
+ "PluginOutput",
20
+ "ActionPluginV3",
21
+ "StateManager",
22
+ "SQLiteStateManager",
23
+ "safe_eval",
24
+ "run_plugin",
25
+ "SubworkflowLoader",
26
+ "SnapshotCapturer",
27
+ "WorkflowReturnSignal"
28
+ ]
29
+
30
+ # Version info could go here
31
+ __version__ = "0.1.0"
@@ -0,0 +1,556 @@
1
+ import os
2
+ import re
3
+ from abc import ABC, abstractmethod
4
+ from dataclasses import dataclass
5
+ from typing import Any, Dict, Optional, Protocol, runtime_checkable
6
+ import uuid
7
+ import datetime
8
+
9
+ from .state import StateManager, SQLiteStateManager
10
+ from .expression import safe_eval
11
+ import json
12
+ import importlib.util
13
+ import sys
14
+ import aiohttp
15
+
16
+ class WorkflowReturnSignal(Exception):
17
+ """Sentinel exception used to propagate a subworkflow result."""
18
+ def __init__(self, value: Any):
19
+ self.value = value
20
+
21
+ @dataclass
22
+ class PluginOutput:
23
+ """
24
+ Standard output from any plugin execution.
25
+ """
26
+ status: str = "success" # "success" | "error"
27
+ data: Optional[Dict[str, Any]] = None
28
+ error: Optional[str] = None
29
+
30
+ @runtime_checkable
31
+ class ActionPluginV3(Protocol):
32
+ """
33
+ Protocol for V3 Action Plugins.
34
+ All plugins must implement a 'run' function matching this signature.
35
+ """
36
+ async def run(self, ctx: "ExecutionContext", inputs: Dict[str, Any]) -> PluginOutput:
37
+ pass
38
+
39
+ class EventSource(ABC):
40
+ """
41
+ Base class for Trigger Plugins.
42
+ """
43
+ def __init__(self):
44
+ self._handlers = []
45
+
46
+ def on_event(self, handler):
47
+ """Register an async handler function."""
48
+ self._handlers.append(handler)
49
+
50
+ async def emit(self, event_data: Any) -> list:
51
+ """Call all registered handlers with event data and return their results."""
52
+ results = []
53
+ for h in self._handlers:
54
+ result = await h(event_data)
55
+ results.append(result)
56
+ return results
57
+
58
+ @abstractmethod
59
+ async def start(self):
60
+ """Start the event loop / listener."""
61
+ pass
62
+
63
+ @abstractmethod
64
+ async def stop(self):
65
+ """Stop the listener."""
66
+ pass
67
+
68
+ class ExecutionContext:
69
+ """
70
+ The Runtime Context passed to every node execution.
71
+ Provides safe access to:
72
+ - Inputs (from Trigger)
73
+ - Secrets (Env/Vault)
74
+ - State (Persistence)
75
+ - Variables (Runtime Data Flow)
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ workflow_id: str,
81
+ execution_id: str,
82
+ inputs: Dict[str, Any] = None,
83
+ secrets: Optional[Dict[str, str]] = None,
84
+ state_manager: Optional[StateManager] = None,
85
+ ):
86
+ self.workflow_id = workflow_id
87
+ # The compiler still emits placeholder execution IDs like "local" or "event"
88
+ # for generated entrypoints. Prefer the runner-provided run ID when available
89
+ # so snapshots and job audit trails attach to the actual execution record.
90
+ self.execution_id = os.environ.get("TENSORIFY_RUN_ID") or execution_id
91
+ self.inputs = inputs or {}
92
+
93
+ # Security: Use provided secrets dict or fall back to os.environ
94
+ # In production, this should be restricted.
95
+ self._secrets = secrets if secrets is not None else os.environ
96
+
97
+ self._state = state_manager or SQLiteStateManager()
98
+
99
+ # Runtime Variables (The "Universal Editor" Data Bus)
100
+ # Key = Node ID or Variable Name, Value = Any
101
+ self._variables: Dict[str, Any] = {}
102
+
103
+ # Workflow Return Value (for Webhooks/Sync executions)
104
+ self._return_value = None
105
+ self._is_subworkflow = False
106
+
107
+ # === Secrets ===
108
+ def get_secret(self, key: str) -> Optional[str]:
109
+ """Securely retrieve a secret."""
110
+ # TODO: Add audit logging here (Who accessed what?)
111
+ return self._secrets.get(key)
112
+
113
+ # === State (Persistent) ===
114
+ def set_state(self, key: str, value: Any):
115
+ """Store data that survives workflow restarts."""
116
+ self._state.set(self.workflow_id, key, value)
117
+
118
+ def get_state(self, key: str, default: Any = None) -> Any:
119
+ """Retrieve persistent data."""
120
+ return self._state.get(self.workflow_id, key, default)
121
+
122
+ # === Variables (Transient / Data Flow) ===
123
+ def set_var(self, key: str, value: Any):
124
+ """
125
+ Store a runtime variable.
126
+ Used by the Compiler to store Node Outputs.
127
+ """
128
+ # DEBUG: Print variable update
129
+ val_str = str(value)
130
+ truncated_val = val_str[:150] + "..." if len(val_str) > 150 else val_str
131
+ print(f"[Runtime] [VAR] Setting {key} = {truncated_val}", flush=True)
132
+
133
+ self._variables[key] = value
134
+ print(f"[Runtime] [CTX] Available Variables: {list(self._variables.keys())}", flush=True)
135
+ # Auto-persist interaction outputs for "Time Travel" / "IntelliSense"
136
+ # We only persist if it's JSON-serializable (which most plugin outputs are)
137
+ try:
138
+ self._state.set(self.workflow_id, f"var_{key}", value)
139
+
140
+ # HYBRID TEST MODE SNAPSHOT
141
+ execution_mode = os.environ.get("TENSORIFY_EXECUTION_MODE", "serve")
142
+ if os.environ.get("TENSORIFY_TEST_MODE") == "true" or execution_mode == "invoke":
143
+ current_node_id = getattr(self, "_current_node_id", None)
144
+ current_inputs = getattr(self, "_current_node_inputs", None)
145
+ # Inherit inputs if it's the node's primary output OR a friendly alias set during its execution
146
+ snapshot_inputs = current_inputs if (current_node_id == key or current_node_id is not None) else None
147
+ SnapshotCapturer.capture_async(self, key, value, snapshot_inputs)
148
+
149
+ except Exception as e:
150
+ print(f"[Runtime] State Error/Snapshot Failed: {e}", flush=True)
151
+ # Ignore serialization errors for non-persistable objects
152
+ pass
153
+
154
+
155
+
156
+ def get_var(self, key: str, default: Any = None) -> Any:
157
+ """
158
+ Retrieve a runtime variable.
159
+ Used by Nodes to access upstream data.
160
+ """
161
+ return self._variables.get(key, default)
162
+
163
+ # === Expressions ===
164
+ def eval(self, expression: str) -> Any:
165
+ """
166
+ Safely evaluate a TSL expression.
167
+ Context includes: inputs, vars, state.
168
+ """
169
+ eval_context = {
170
+ "input": self.inputs,
171
+ "inputs": self.inputs,
172
+ "vars": self._variables,
173
+ "state": self._state.get_all(self.workflow_id)
174
+ }
175
+ # Flatten variables for direct access (e.g. loop iterators)
176
+ eval_context.update(self._variables)
177
+
178
+ # Helper for accessing variables with invalid python identifiers (e.g. UUIDs)
179
+ eval_context["get"] = self.get_var
180
+
181
+ # Check if this is a template string (contains {{ }})
182
+ if isinstance(expression, str) and "{{" in expression and "}}" in expression:
183
+ # Count how many template sections we have
184
+ template_count = len(re.findall(r"\{\{.+?\}\}", expression))
185
+
186
+ def replace_template(match):
187
+ inner = match.group(1).strip()
188
+ resolved = safe_eval(inner, eval_context)
189
+ return str(resolved) # Return string representation, not repr()
190
+
191
+ # Perform template substitution
192
+ result = re.sub(r"\{\{(.+?)\}\}", replace_template, expression)
193
+
194
+ # If it's a single template with nothing else, eval to preserve type
195
+ # Otherwise return the substituted string directly
196
+ if template_count == 1 and expression.strip().startswith("{{") and expression.strip().endswith("}}"):
197
+ try:
198
+ return safe_eval(result.strip("'\"", eval_context))
199
+ except:
200
+ return result
201
+ else:
202
+ # Multiple templates or mixed content - return string as-is
203
+ return result
204
+
205
+ try:
206
+ return safe_eval(expression, eval_context)
207
+ except Exception as e:
208
+ # Provide helpful debugging
209
+ available_keys = list(self._variables.keys())
210
+ print(f"[Runtime] Eval Error: {e}")
211
+ print(f"[Runtime] Available variables: {available_keys}")
212
+ raise e
213
+
214
+ def resolve(self, value: Any) -> Any:
215
+ """
216
+ Resolve a value that might contain template strings {{ var }}.
217
+ If value is a string with {{ }}, it evaluates the expression.
218
+ If value is not a string, returns as is.
219
+ """
220
+ if not isinstance(value, str):
221
+ return value
222
+
223
+ # Check if this is a template string
224
+ if "{{" in value and "}}" in value:
225
+ # Count how many template sections we have
226
+ template_count = len(re.findall(r"\{\{.+?\}\}", value))
227
+
228
+ # Perform template substitution
229
+ def replace(m):
230
+ res = self.eval(m.group(1).strip())
231
+ return str(res)
232
+
233
+ result = re.sub(r"\{\{(.+?)\}\}", replace, value)
234
+
235
+ # If it's a single template with nothing else, eval to preserve type
236
+ # Check that it starts with {{ and ends with }}
237
+ stripped = value.strip()
238
+ if template_count == 1 and stripped.startswith("{{") and stripped.endswith("}}"):
239
+ try:
240
+ return self.eval(stripped[2:-2].strip())
241
+ except:
242
+ return result
243
+ else:
244
+ # Multiple templates or mixed content - return substituted string
245
+ print(f"[Runtime] [Template] Resolved: {value[:100]}... → {result[:100]}", flush=True)
246
+ return result
247
+
248
+ return value
249
+
250
+ def set_return_value(self, value: Any, status: int = 200, headers: Dict = None):
251
+ """Set the final output of the workflow."""
252
+ self._return_value = {"body": value, "status": status, "headers": headers or {}}
253
+
254
+ def get_return_value(self):
255
+ return self._return_value
256
+
257
+
258
+ import asyncio
259
+ import aiohttp
260
+ import urllib.request
261
+ import urllib.error
262
+
263
+ class SnapshotCapturer:
264
+ _pending_tasks = set()
265
+
266
+ @staticmethod
267
+ def capture_async(ctx: "ExecutionContext", node_id: str, data: Any, inputs: Any = None, status: str = "success"):
268
+ """Fire-and-forget snapshot upload."""
269
+ mode = os.environ.get("TENSORIFY_TEST_MODE")
270
+ print(f"[Runtime] Snapshot Capture Triggered for {node_id}. Mode: {mode}", flush=True)
271
+ try:
272
+ loop = asyncio.get_event_loop()
273
+ if loop.is_running():
274
+ task = loop.create_task(SnapshotCapturer._upload(ctx, node_id, data, inputs, status))
275
+ SnapshotCapturer._pending_tasks.add(task)
276
+ task.add_done_callback(SnapshotCapturer._pending_tasks.discard)
277
+ else:
278
+ SnapshotCapturer._upload_sync(ctx, node_id, data, inputs, status)
279
+ except RuntimeError:
280
+ # Fallback if no event loop exists (e.g. strict exec environments)
281
+ print(f"[Runtime] Falling back to synchronous snapshot upload for {node_id}")
282
+ SnapshotCapturer._upload_sync(ctx, node_id, data, inputs, status)
283
+ except Exception as e:
284
+ print(f"[Runtime] Snapshot Error: {e}")
285
+
286
+ @staticmethod
287
+ async def wait_all():
288
+ if SnapshotCapturer._pending_tasks:
289
+ await asyncio.gather(*SnapshotCapturer._pending_tasks, return_exceptions=True)
290
+ SnapshotCapturer._pending_tasks.clear()
291
+
292
+ @staticmethod
293
+ async def _upload(ctx: "ExecutionContext", node_id: str, data: Any, inputs: Any = None, status: str = "success"):
294
+ import redis
295
+ import json
296
+ import datetime
297
+
298
+ try:
299
+ # 1. Mask Secrets
300
+ masked_inputs = SnapshotCapturer._mask_recursive(inputs, ctx._secrets.values())
301
+ masked_data = SnapshotCapturer._mask_recursive(data, ctx._secrets.values())
302
+
303
+ # 2. Prepare Payload
304
+ payload = {
305
+ "id": str(uuid.uuid4()),
306
+ "workflowId": ctx.workflow_id,
307
+ "nodeId": node_id,
308
+ "runId": ctx.execution_id,
309
+ "status": status,
310
+ "inputs": masked_inputs,
311
+ "output": masked_data,
312
+ "capturedAt": datetime.datetime.now(datetime.timezone.utc).isoformat()
313
+ }
314
+
315
+ # 3. Publish to Redis for real-time WebSocket streaming
316
+ try:
317
+ # Support REDIS_URL connection string (like DATABASE_URL)
318
+ # Format: redis://localhost:6379/0 or rediss:// for TLS
319
+ redis_url = os.environ.get("REDIS_URL")
320
+ if redis_url:
321
+ redis_client = redis.from_url(redis_url, decode_responses=True)
322
+ else:
323
+ # Fallback to individual env vars for backward compatibility
324
+ redis_client = redis.Redis(
325
+ host=os.environ.get("REDIS_HOST", "localhost"),
326
+ port=int(os.environ.get("REDIS_PORT", 6379)),
327
+ db=int(os.environ.get("REDIS_DB", 0)),
328
+ decode_responses=True
329
+ )
330
+
331
+ channel = f"workflow:{ctx.workflow_id}:snapshots"
332
+ redis_client.publish(channel, json.dumps(payload))
333
+ print(f"[Runtime] 📤 Published snapshot to Redis: {channel}", flush=True)
334
+
335
+ except Exception as redis_err:
336
+ print(f"[Runtime] ⚠️ Redis publish failed: {redis_err}", flush=True)
337
+
338
+ # 4. Also upload via HTTP for database persistence (fallback)
339
+ api_url = os.environ.get("TENSORIFY_API_URL", "http://localhost:3000")
340
+ token = os.environ.get("TENSORIFY_API_KEY")
341
+ auth_token = os.environ.get("TENSORIFY_AUTH_TOKEN", token)
342
+
343
+ url = f"{api_url}/api/v1/snapshots"
344
+ headers = {
345
+ "Content-Type": "application/json",
346
+ "Authorization": f"Bearer {auth_token}"
347
+ }
348
+
349
+ timeout = aiohttp.ClientTimeout(total=2)
350
+ async with aiohttp.ClientSession(timeout=timeout) as session:
351
+ async with session.post(url, json=payload, headers=headers) as resp:
352
+ if resp.status != 200:
353
+ print(f"[Runtime] Snapshot HTTP Upload Failed: {await resp.text()}", flush=True)
354
+
355
+ except Exception as e:
356
+ print(f"[Runtime] Snapshot Upload Exception: {e}")
357
+
358
+ @staticmethod
359
+ def _upload_sync(ctx: "ExecutionContext", node_id: str, data: Any, inputs: Any = None, status: str = "success"):
360
+ """Synchronous fallback for snapshot uploads when async event loops are missing."""
361
+ try:
362
+ masked_inputs = SnapshotCapturer._mask_recursive(inputs, ctx._secrets.values())
363
+ masked_data = SnapshotCapturer._mask_recursive(data, ctx._secrets.values())
364
+
365
+ payload = {
366
+ "workflowId": ctx.workflow_id,
367
+ "nodeId": node_id,
368
+ "runId": ctx.execution_id,
369
+ "status": status,
370
+ "inputs": masked_inputs,
371
+ "output": masked_data
372
+ }
373
+
374
+ api_url = os.environ.get("TENSORIFY_API_URL", "http://localhost:3000")
375
+ token = os.environ.get("TENSORIFY_API_KEY")
376
+ auth_token = os.environ.get("TENSORIFY_AUTH_TOKEN", token)
377
+
378
+ url = f"{api_url}/api/v1/snapshots"
379
+ headers = {
380
+ "Content-Type": "application/json",
381
+ "Authorization": f"Bearer {auth_token}"
382
+ }
383
+
384
+ req = urllib.request.Request(
385
+ url,
386
+ data=json.dumps(payload).encode('utf-8'),
387
+ headers=headers,
388
+ method='POST'
389
+ )
390
+ try:
391
+ with urllib.request.urlopen(req, timeout=2) as response:
392
+ if response.status != 200:
393
+ print(f"[Runtime] Sync Snapshot Upload Failed: HTTP {response.status}", flush=True)
394
+ except urllib.error.HTTPError as e:
395
+ print(f"[Runtime] Sync Snapshot Upload HTTP Error: {e.code} - {e.reason}", flush=True)
396
+ except Exception as e:
397
+ print(f"[Runtime] Sync Snapshot Upload Failed: {e}", flush=True)
398
+
399
+ except Exception as e:
400
+ print(f"[Runtime] Sync Snapshot Exception: {e}")
401
+
402
+ @staticmethod
403
+ def _mask_recursive(data: Any, secrets: Any) -> Any:
404
+ if isinstance(data, dict):
405
+ return {k: SnapshotCapturer._mask_recursive(v, secrets) for k, v in data.items()}
406
+ elif isinstance(data, list):
407
+ return [SnapshotCapturer._mask_recursive(i, secrets) for i in data]
408
+ elif isinstance(data, str):
409
+ for secret in secrets:
410
+ if secret and len(secret) > 4 and secret in data:
411
+ data = data.replace(secret, "******")
412
+ return data
413
+ else:
414
+ return data
415
+
416
+
417
+ async def run_plugin(ctx: ExecutionContext, plugin_key: str, inputs: Dict[str, Any]) -> Any:
418
+ """
419
+ Executes a plugin action.
420
+ """
421
+ # Local import to avoid circular dependency
422
+ # from ..plugins.discord_send import DiscordSendPlugin
423
+ # from ..plugins.discord_trigger import DiscordTriggerPlugin
424
+ # from ..plugins.telegram_send import TelegramSendPlugin
425
+ # from ..plugins.telegram_trigger import TelegramTriggerPlugin
426
+ # from ..plugins.python_script import PythonScriptPlugin
427
+ from ..plugins.http_request import HttpRequestPlugin
428
+ # from ..plugins.http_response import HttpResponsePlugin
429
+ # from ..plugins.file_writer import FileWriterPlugin
430
+ # from ..plugins.json_transform import JsonTransformPlugin
431
+
432
+ # Simple registry (Phase 5)
433
+ registry = {}
434
+
435
+ plugin = registry.get(plugin_key)
436
+
437
+ # --- Dynamic Discovery from Bundled Plugins ---
438
+ if not plugin:
439
+ try:
440
+ # Try to load as a flat module from 'plugins' package first
441
+ try:
442
+ # heuristic: @tensorify/webhook-trigger:3.0.0 -> webhook_trigger
443
+ # clean_name = plugin_key.split(":")[0].replace("@tensorify/", "").replace("-", "_")
444
+ # But 'plugins' dir is in sys.path? No, we need to import from 'plugins' package.
445
+ # Assuming 'main.py' is in root and 'plugins' is a package next to it.
446
+
447
+ clean_name = plugin_key.split(":")[0].replace("@tensorify/", "").replace("-", "_")
448
+ module_name = f"plugins.{clean_name}"
449
+
450
+ print(f"[Runtime] Attempting import: {module_name}")
451
+ module = importlib.import_module(module_name)
452
+
453
+ # We need to find the class.
454
+ # Heuristic: CamelCase of clean_name? webhook_trigger -> WebhookTriggerPlugin
455
+ # Or just search the module for a subclass of something?
456
+ # Or check __all__?
457
+
458
+ # Let's inspect the module
459
+ for attr_name in dir(module):
460
+ attr = getattr(module, attr_name)
461
+ # Simple check: class name ends with 'Plugin'
462
+ if isinstance(attr, type) and attr_name.endswith("Plugin") and attr_name != "Plugin":
463
+ print(f"[Runtime] Found plugin class {attr_name} in {module_name}")
464
+ plugin_instance = attr()
465
+ registry[plugin_key] = plugin_instance
466
+ plugin = plugin_instance
467
+ break
468
+
469
+ except ImportError:
470
+ # Fallback to nested folder discovery (if manual bundle)
471
+ pass
472
+ except Exception as e:
473
+ print(f"[Runtime] Flat import failed for {plugin_key}: {e}")
474
+
475
+ if not plugin:
476
+ # Look for plugins directory (sibling to this runtime or in executing root)
477
+ # We assume the execution root has 'plugins' folder (from our bundle logic)
478
+ cwd = pathlib.Path.cwd()
479
+ plugin_dir = cwd / "plugins" / plugin_key
480
+
481
+ if plugin_dir.exists() and plugin_dir.is_dir():
482
+ manifest_path = plugin_dir / "manifest.json"
483
+
484
+ # Check python directory
485
+ python_dir = plugin_dir / "python"
486
+
487
+ print(f"[Runtime] Checking {plugin_dir}")
488
+
489
+ if manifest_path.exists() and python_dir.exists():
490
+ with open(manifest_path, "r") as f:
491
+ manifest = json.load(f)
492
+
493
+ class_name = manifest.get("entrypointClassName")
494
+
495
+ if class_name:
496
+ # Scan python files for the class
497
+ found_class = False
498
+ for py_file in python_dir.glob("*.py"):
499
+ # Dynamically load module
500
+ module_name = f"dynamic_plugins.{plugin_key.replace('/', '_').replace(':', '_').replace('@', '').replace('-', '_')}"
501
+ try:
502
+ spec = importlib.util.spec_from_file_location(module_name, py_file)
503
+ if spec and spec.loader:
504
+ module = importlib.util.module_from_spec(spec)
505
+ sys.modules[module_name] = module
506
+ spec.loader.exec_module(module)
507
+
508
+ if hasattr(module, class_name):
509
+ PluginClass = getattr(module, class_name)
510
+ # Instantiate
511
+ plugin_instance = PluginClass()
512
+
513
+ # Cache and use
514
+ registry[plugin_key] = plugin_instance
515
+ plugin = plugin_instance
516
+ print(f"[Runtime] Dynamically loaded {plugin_key} from {py_file}")
517
+ found_class = True
518
+ break
519
+ except Exception as e:
520
+ print(f"[Runtime] Error loading module {py_file}: {e}")
521
+
522
+ if not found_class:
523
+ print(f"[Runtime] Class {class_name} not found in {python_dir}")
524
+ else:
525
+ print(f"[Runtime] entrypointClassName not found in metadata for {plugin_key}")
526
+ else:
527
+ print(f"[Runtime] Manifest or python dir missing in {plugin_dir}")
528
+ else:
529
+ print(f"[Runtime] Plugin dir not found: {plugin_dir} (CWD: {cwd})")
530
+ except Exception as e:
531
+ print(f"[Runtime] Dynamic discovery failed for {plugin_key}: {e}")
532
+
533
+ if not plugin:
534
+ print(f"[Runtime] Warning: Plugin {plugin_key} not found in registry. Using Mock.")
535
+ return PluginOutput(status="success", data={"message": f"Mock Output from {plugin_key}"})
536
+
537
+ print(f"[Runtime] Executing {plugin_key}")
538
+ try:
539
+ ctx._current_node_inputs = inputs
540
+
541
+ # Return the full PluginOutput object so the caller (generated code)
542
+ # can treat it consistently with direct class calls (accessing .data).
543
+ result = await plugin.run(ctx, inputs)
544
+
545
+ # LOGGING FOR USER INSPECTION
546
+ if result.status == "success":
547
+ try:
548
+ print(f"[Runtime] Output from {plugin_key}: {json.dumps(result.data, default=str)}", flush=True)
549
+ except:
550
+ print(f"[Runtime] Output from {plugin_key}: {result.data} (Not JSON serializable)", flush=True)
551
+ else:
552
+ print(f"[Runtime] Error from {plugin_key}: {result.error}", flush=True)
553
+
554
+ return result
555
+ except Exception as e:
556
+ return PluginOutput(status="error", error=str(e))
@@ -0,0 +1,55 @@
1
+ from RestrictedPython import compile_restricted, safe_globals, utility_builtins
2
+ from typing import Any, Dict
3
+
4
+ def safe_eval(expression: str, context: Dict[str, Any]) -> Any:
5
+ """
6
+ Safely evaluate a Python expression within a context.
7
+
8
+ Security:
9
+ - No imports
10
+ - No disk access
11
+ - No dunder (__) access
12
+
13
+ Args:
14
+ expression: Python expression string (e.g. "a > 10")
15
+ context: Dictionary of variables available to the expression
16
+
17
+ Returns:
18
+ The result of the expression
19
+ """
20
+ if not expression or not expression.strip():
21
+ return None
22
+
23
+ try:
24
+ # Prepare restricted execution environment
25
+ loc = {}
26
+
27
+ # Helper for RestrictedPython subscript access
28
+ def _getitem_(obj, index):
29
+ return obj[index]
30
+
31
+ def _getattr_(obj, name):
32
+ if isinstance(obj, dict) and name in obj:
33
+ return obj[name]
34
+ return getattr(obj, name)
35
+
36
+ def _getiter_(obj):
37
+ return iter(obj)
38
+
39
+ # Combine safe builtins with user context
40
+ glob = safe_globals.copy()
41
+ glob.update(utility_builtins)
42
+ glob["_getitem_"] = _getitem_
43
+ glob["_getattr_"] = _getattr_
44
+ glob["_getiter_"] = _getiter_
45
+ glob.update(context)
46
+
47
+ # Compile expression (eval mode)
48
+ code = compile_restricted(expression, '<string>', 'eval')
49
+
50
+ # Execute
51
+ result = eval(code, glob, loc)
52
+ return result
53
+
54
+ except Exception as e:
55
+ raise ValueError(f"Expression evaluation failed: {expression}. Error: {str(e)}")
@@ -0,0 +1,106 @@
1
+ from abc import ABC, abstractmethod
2
+ import json
3
+ import os
4
+ import sqlite3
5
+ from typing import Any, Dict, Optional
6
+
7
+ class StateManager(ABC):
8
+ """
9
+ Interface for persistent state storage.
10
+ Used by workflows to store data that survives restarts/crashes.
11
+ """
12
+
13
+ @abstractmethod
14
+ def set(self, workflow_id: str, key: str, value: Any):
15
+ pass
16
+
17
+ @abstractmethod
18
+ def get(self, workflow_id: str, key: str, default: Any = None) -> Any:
19
+ pass
20
+
21
+ @abstractmethod
22
+ def delete(self, workflow_id: str, key: str):
23
+ pass
24
+
25
+ @abstractmethod
26
+ def get_all(self, workflow_id: str) -> Dict[str, Any]:
27
+ pass
28
+
29
+ class SQLiteStateManager(StateManager):
30
+ """
31
+ Production-ready local state storage using SQLite.
32
+ Stores values as JSON strings.
33
+ """
34
+
35
+ def __init__(self, db_path: str = ".tensorify/state.db"):
36
+ self.db_path = db_path
37
+ self._init_db()
38
+
39
+ def _init_db(self):
40
+ os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
41
+ conn = sqlite3.connect(self.db_path)
42
+ conn.execute("""
43
+ CREATE TABLE IF NOT EXISTS state (
44
+ workflow_id TEXT,
45
+ key TEXT,
46
+ value TEXT,
47
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
48
+ PRIMARY KEY (workflow_id, key)
49
+ )
50
+ """)
51
+ conn.commit()
52
+ conn.close()
53
+
54
+ def set(self, workflow_id: str, key: str, value: Any):
55
+ conn = sqlite3.connect(self.db_path)
56
+ try:
57
+ serialized_value = json.dumps(value)
58
+ conn.execute(
59
+ """
60
+ INSERT OR REPLACE INTO state (workflow_id, key, value)
61
+ VALUES (?, ?, ?)
62
+ """,
63
+ (workflow_id, key, serialized_value)
64
+ )
65
+ conn.commit()
66
+ finally:
67
+ conn.close()
68
+
69
+ def get(self, workflow_id: str, key: str, default: Any = None) -> Any:
70
+ conn = sqlite3.connect(self.db_path)
71
+ try:
72
+ cursor = conn.execute(
73
+ "SELECT value FROM state WHERE workflow_id = ? AND key = ?",
74
+ (workflow_id, key)
75
+ )
76
+ row = cursor.fetchone()
77
+ if row:
78
+ return json.loads(row[0])
79
+ return default
80
+ finally:
81
+ conn.close()
82
+
83
+ def delete(self, workflow_id: str, key: str):
84
+ conn = sqlite3.connect(self.db_path)
85
+ try:
86
+ conn.execute(
87
+ "DELETE FROM state WHERE workflow_id = ? AND key = ?",
88
+ (workflow_id, key)
89
+ )
90
+ conn.commit()
91
+ finally:
92
+ conn.close()
93
+
94
+ def get_all(self, workflow_id: str) -> Dict[str, Any]:
95
+ conn = sqlite3.connect(self.db_path)
96
+ try:
97
+ cursor = conn.execute(
98
+ "SELECT key, value FROM state WHERE workflow_id = ?",
99
+ (workflow_id,)
100
+ )
101
+ result = {}
102
+ for key, value_json in cursor.fetchall():
103
+ result[key] = json.loads(value_json)
104
+ return result
105
+ finally:
106
+ conn.close()
@@ -0,0 +1,115 @@
1
+ import importlib.util
2
+ import os
3
+ import pathlib
4
+ import sys
5
+ from typing import Any
6
+ from .core import ExecutionContext, WorkflowReturnSignal
7
+
8
+ class SubworkflowLoader:
9
+ """
10
+ Loads and executes other workflows as sub-routines.
11
+ This is the core engine for V3's modular architecture.
12
+ """
13
+ def __init__(self, execution_root: pathlib.Path | str = "workflows"):
14
+ root = pathlib.Path(execution_root)
15
+ if root.name == "workflows":
16
+ self.execution_root = root.parent if root.parent != pathlib.Path("") else pathlib.Path.cwd()
17
+ self.workflows_dir = root
18
+ else:
19
+ self.execution_root = root
20
+ self.workflows_dir = root / "workflows"
21
+ self._cache = {}
22
+ self._call_stack: list[str] = []
23
+
24
+ async def execute(self, workflow_id: str, parent_ctx: ExecutionContext, input_value: Any) -> Any:
25
+ """
26
+ Executes a subworkflow by ID.
27
+
28
+ Args:
29
+ workflow_id: The ID/filename of the subworkflow (w/o extension).
30
+ parent_ctx: The context of the caller (used to inherit config/secrets).
31
+ input_value: Value to pass to the child workflow trigger payload.
32
+
33
+ Returns:
34
+ Value returned by the child workflow's Return node, or None.
35
+ """
36
+ if workflow_id in self._call_stack:
37
+ raise ValueError(
38
+ f"Circular subworkflow reference detected: {workflow_id} in stack {self._call_stack}"
39
+ )
40
+
41
+ self._call_stack.append(workflow_id)
42
+ try:
43
+ module = self._load_module(workflow_id)
44
+
45
+ child_payload = self._normalize_input_payload(input_value)
46
+
47
+ child_ctx = ExecutionContext(
48
+ workflow_id=workflow_id,
49
+ execution_id=parent_ctx.execution_id,
50
+ inputs=child_payload,
51
+ secrets=parent_ctx._secrets,
52
+ state_manager=parent_ctx._state,
53
+ )
54
+ child_ctx._is_subworkflow = True
55
+
56
+ if not hasattr(module, "run"):
57
+ raise ValueError(
58
+ f"Subworkflow {workflow_id} does not export 'run(ctx, injected_payload=None)'"
59
+ )
60
+
61
+ try:
62
+ return await module.run(child_ctx, injected_payload=child_payload)
63
+ except WorkflowReturnSignal as signal:
64
+ return signal.value
65
+ finally:
66
+ self._call_stack.pop()
67
+
68
+ def _normalize_input_payload(self, input_value: Any) -> dict[str, Any]:
69
+ if isinstance(input_value, dict):
70
+ envelope_keys = {"body", "headers", "query", "path", "method"}
71
+ if "body" in input_value and envelope_keys.issubset(input_value.keys()):
72
+ return {
73
+ "body": input_value.get("body"),
74
+ "headers": input_value.get("headers") or {},
75
+ "query": input_value.get("query") or {},
76
+ "path": input_value.get("path") or "",
77
+ "method": input_value.get("method") or "INTERNAL",
78
+ }
79
+
80
+ return {
81
+ "body": input_value,
82
+ "headers": {},
83
+ "query": {},
84
+ "path": "",
85
+ "method": "INTERNAL",
86
+ }
87
+
88
+ def _load_module(self, workflow_id: str):
89
+ """Dynamic import of the generated python file."""
90
+ if workflow_id in self._cache:
91
+ return self._cache[workflow_id]
92
+
93
+ # Security Check: Prevent directory traversal
94
+ if ".." in workflow_id or "/" in workflow_id or "\\" in workflow_id:
95
+ raise ValueError(f"Invalid workflow ID: {workflow_id}")
96
+
97
+ file_path = os.path.join(str(self.workflows_dir), f"{workflow_id}.py")
98
+
99
+ if not os.path.exists(file_path):
100
+ # Try resolving absolute path if provided dir was relative
101
+ abs_path = os.path.abspath(file_path)
102
+ if not os.path.exists(abs_path):
103
+ raise FileNotFoundError(f"Subworkflow file not found: {file_path}")
104
+ file_path = abs_path
105
+
106
+ spec = importlib.util.spec_from_file_location(f"subflow_{workflow_id}", file_path)
107
+ if spec is None or spec.loader is None:
108
+ raise ImportError(f"Could not load spec for {workflow_id}")
109
+
110
+ module = importlib.util.module_from_spec(spec)
111
+ sys.modules[f"subflow_{workflow_id}"] = module
112
+ spec.loader.exec_module(module)
113
+
114
+ self._cache[workflow_id] = module
115
+ return module
@@ -0,0 +1,16 @@
1
+ Metadata-Version: 2.4
2
+ Name: tensorify-runtime
3
+ Version: 0.1.0
4
+ Summary: Runtime engine for Tensorify workflows (Universal Code Editor)
5
+ Author-email: Tensorify Team <dev@tensorify.io>
6
+ Classifier: Programming Language :: Python :: 3
7
+ Classifier: Operating System :: OS Independent
8
+ Requires-Python: >=3.9
9
+ Requires-Dist: RestrictedPython>=6.0
10
+ Requires-Dist: pydantic>=2.0
11
+ Requires-Dist: redis>=5.0
12
+ Requires-Dist: aiohttp>=3.9
13
+ Provides-Extra: dev
14
+ Requires-Dist: pytest; extra == "dev"
15
+ Requires-Dist: black; extra == "dev"
16
+ Requires-Dist: mypy; extra == "dev"
@@ -0,0 +1,21 @@
1
+ MANIFEST.in
2
+ pyproject.toml
3
+ src/tensorify/plugins/discord_send.py
4
+ src/tensorify/plugins/discord_trigger.py
5
+ src/tensorify/plugins/file_writer.py
6
+ src/tensorify/plugins/http_request.py
7
+ src/tensorify/plugins/http_response.py
8
+ src/tensorify/plugins/json_transform.py
9
+ src/tensorify/plugins/python_script.py
10
+ src/tensorify/plugins/telegram_send.py
11
+ src/tensorify/plugins/telegram_trigger.py
12
+ src/tensorify/runtime/__init__.py
13
+ src/tensorify/runtime/core.py
14
+ src/tensorify/runtime/expression.py
15
+ src/tensorify/runtime/state.py
16
+ src/tensorify/runtime/subworkflow.py
17
+ src/tensorify_runtime.egg-info/PKG-INFO
18
+ src/tensorify_runtime.egg-info/SOURCES.txt
19
+ src/tensorify_runtime.egg-info/dependency_links.txt
20
+ src/tensorify_runtime.egg-info/requires.txt
21
+ src/tensorify_runtime.egg-info/top_level.txt
@@ -0,0 +1,9 @@
1
+ RestrictedPython>=6.0
2
+ pydantic>=2.0
3
+ redis>=5.0
4
+ aiohttp>=3.9
5
+
6
+ [dev]
7
+ pytest
8
+ black
9
+ mypy