qyro 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. qyro/__init__.py +17 -0
  2. qyro/adapters/__init__.py +4 -0
  3. qyro/adapters/language_adapters/__init__.py +4 -0
  4. qyro/adapters/language_adapters/c/__init__.py +4 -0
  5. qyro/adapters/language_adapters/python/__init__.py +4 -0
  6. qyro/adapters/language_adapters/python/python_adapter.py +584 -0
  7. qyro/cli/__init__.py +8 -0
  8. qyro/cli/__main__.py +5 -0
  9. qyro/cli/cli.py +392 -0
  10. qyro/cli/interactive.py +297 -0
  11. qyro/common/__init__.py +37 -0
  12. qyro/common/animation.py +82 -0
  13. qyro/common/builder.py +434 -0
  14. qyro/common/compiler.py +895 -0
  15. qyro/common/config.py +93 -0
  16. qyro/common/constants.py +99 -0
  17. qyro/common/errors.py +176 -0
  18. qyro/common/frontend.py +74 -0
  19. qyro/common/health.py +358 -0
  20. qyro/common/kafka_manager.py +192 -0
  21. qyro/common/logging.py +149 -0
  22. qyro/common/memory.py +147 -0
  23. qyro/common/metrics.py +301 -0
  24. qyro/common/monitoring.py +468 -0
  25. qyro/common/parser.py +91 -0
  26. qyro/common/platform.py +609 -0
  27. qyro/common/redis_memory.py +1108 -0
  28. qyro/common/rpc.py +287 -0
  29. qyro/common/sandbox.py +191 -0
  30. qyro/common/schema_loader.py +33 -0
  31. qyro/common/secure_sandbox.py +490 -0
  32. qyro/common/toolchain_validator.py +617 -0
  33. qyro/common/type_generator.py +176 -0
  34. qyro/common/validation.py +401 -0
  35. qyro/common/validator.py +204 -0
  36. qyro/gateway/__init__.py +8 -0
  37. qyro/gateway/gateway.py +303 -0
  38. qyro/orchestrator/__init__.py +8 -0
  39. qyro/orchestrator/orchestrator.py +1223 -0
  40. qyro-2.0.0.dist-info/METADATA +244 -0
  41. qyro-2.0.0.dist-info/RECORD +45 -0
  42. qyro-2.0.0.dist-info/WHEEL +5 -0
  43. qyro-2.0.0.dist-info/entry_points.txt +2 -0
  44. qyro-2.0.0.dist-info/licenses/LICENSE +21 -0
  45. qyro-2.0.0.dist-info/top_level.txt +1 -0
qyro/common/rpc.py ADDED
@@ -0,0 +1,287 @@
1
+ """
2
+ Nexus Cross-Language Function Calling System (Redis-Backed)
3
+ Enables any language to call functions registered in any other language.
4
+
5
+ Re-architected for Phase 0 Core Repair to use Redis Lists/PubSub.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ import uuid
11
+ import threading
12
+ from typing import Dict, Any, Callable, Optional, List
13
+ from dataclasses import dataclass, field, asdict
14
+ from enum import IntEnum
15
+ from functools import wraps
16
+
17
+ from .logging import get_logger
18
+
19
+ logger = get_logger("nexus.rpc")
20
+
21
+ class CallStatus(IntEnum):
22
+ PENDING = 0
23
+ PROCESSING = 1
24
+ COMPLETED = 2
25
+ FAILED = 3
26
+ TIMEOUT = 4
27
+
28
+ @dataclass
29
+ class FunctionInfo:
30
+ name: str
31
+ language: str
32
+ process_id: str
33
+ param_types: List[str] = field(default_factory=list)
34
+ return_type: str = "any"
35
+
36
+ def to_dict(self): return asdict(self)
37
+ @classmethod
38
+ def from_dict(cls, d): return cls(**d)
39
+
40
+ @dataclass
41
+ class FunctionCall:
42
+ call_id: str
43
+ function_name: str
44
+ args: List[Any]
45
+ kwargs: Dict[str, Any]
46
+ caller_id: str
47
+ timestamp: float
48
+ status: CallStatus = CallStatus.PENDING
49
+ result: Any = None
50
+ error: Optional[str] = None
51
+
52
+ def to_dict(self):
53
+ d = asdict(self)
54
+ d['status'] = int(self.status)
55
+ return d
56
+
57
+ @classmethod
58
+ def from_dict(cls, d):
59
+ d['status'] = CallStatus(d.get('status', 0))
60
+ return cls(**d)
61
+
62
+ class NexusRPC:
63
+ """
64
+ Redis-backed RPC System.
65
+ Uses Redis Lists for Task Queue and Pub/Sub for Results.
66
+ """
67
+
68
+ # Redis Keys
69
+ KEY_REGISTRY = "nexus:rpc:registry"
70
+ KEY_QUEUE = "nexus:rpc:queue"
71
+ KEY_RESULT_PREFIX = "nexus:rpc:result:"
72
+ KEY_BROKER_LOCK = "nexus:rpc:broker_lock"
73
+
74
+ def __init__(self, memory, mode: str = "event"):
75
+ self._memory = memory # Expects RedisNexusMemory
76
+ self._mode = mode
77
+ self._local_handlers: Dict[str, Callable] = {}
78
+ self._process_id = str(uuid.uuid4())[:8]
79
+ self._running = False
80
+ self._worker_thread: Optional[threading.Thread] = None
81
+
82
+ # Ensure we have a Redis client
83
+ if not hasattr(memory, '_redis'):
84
+ logger.warning("rpc_compatibility_mode", msg="Falling back to legacy memory (not supported)")
85
+ self._redis = None
86
+ else:
87
+ self._redis = memory._redis
88
+
89
+ def register(self, name: str, handler: Callable, **kwargs):
90
+ """Register a function."""
91
+ self._local_handlers[name] = handler
92
+
93
+ info = FunctionInfo(
94
+ name=name,
95
+ language="python",
96
+ process_id=self._process_id,
97
+ param_types=kwargs.get('param_types', []),
98
+ return_type=kwargs.get('return_type', 'any')
99
+ )
100
+
101
+ if self._redis:
102
+ self._redis.hset(self.KEY_REGISTRY, name, json.dumps(info.to_dict()))
103
+
104
+ logger.info("rpc_registered", name=name)
105
+
106
+ def unregister(self, name: str):
107
+ if name in self._local_handlers:
108
+ del self._local_handlers[name]
109
+ if self._redis:
110
+ self._redis.hdel(self.KEY_REGISTRY, name)
111
+
112
+ def export(self, name: str = None, **kwargs):
113
+ """Decorator to export function."""
114
+ def decorator(func: Callable):
115
+ fn_name = name or func.__name__
116
+ self.register(fn_name, func, **kwargs)
117
+
118
+ @wraps(func)
119
+ def wrapper(*args, **kw):
120
+ return func(*args, **kw)
121
+ return wrapper
122
+ return decorator
123
+
124
+ def call(self, function_name: str, *args, timeout: float = 10.0, **kwargs) -> Any:
125
+ """Execute a remote function call."""
126
+ if not self._redis:
127
+ raise RuntimeError("RPC requires Redis connection")
128
+
129
+ # 1. Look up function owner
130
+ raw_info = self._redis.hget(self.KEY_REGISTRY, function_name)
131
+ if not raw_info:
132
+ raise ValueError(f"Function not found in registry: {function_name}")
133
+
134
+ info = json.loads(raw_info)
135
+ target_pid = info.get('process_id')
136
+
137
+ if not target_pid:
138
+ raise ValueError(f"Function {function_name} has no owner (僵尸 state?)")
139
+
140
+ call_id = str(uuid.uuid4())
141
+ call = FunctionCall(
142
+ call_id=call_id,
143
+ function_name=function_name,
144
+ args=list(args),
145
+ kwargs=kwargs,
146
+ caller_id=self._process_id,
147
+ timestamp=time.time()
148
+ )
149
+
150
+ # 2. Push to TARGET Queue (O(1) routing)
151
+ target_queue = f"{self.KEY_QUEUE}:{target_pid}"
152
+ self._redis.rpush(target_queue, json.dumps(call.to_dict()))
153
+
154
+ # 3. Use Pub/Sub for result instead of polling
155
+ result_channel = f"{self.KEY_RESULT_PREFIX}{call_id}"
156
+
157
+ # Create a pubsub connection to listen for the result
158
+ pubsub = self._redis.pubsub()
159
+ pubsub.subscribe(result_channel)
160
+
161
+ # 4. Wait for Result via Pub/Sub with proper timeout handling
162
+ start = time.time()
163
+ try:
164
+ for message in pubsub.listen():
165
+ if message['type'] == 'message':
166
+ try:
167
+ res = FunctionCall.from_dict(json.loads(message['data']))
168
+
169
+ if res.status == CallStatus.FAILED:
170
+ raise RuntimeError(res.error)
171
+ return res.result
172
+ except json.JSONDecodeError as e:
173
+ logger.error("rpc_result_parse_error", call_id=call_id, error=str(e))
174
+ continue
175
+ except Exception as e:
176
+ logger.error("rpc_result_processing_error", call_id=call_id, error=str(e))
177
+ raise
178
+
179
+ # Check for timeout
180
+ if time.time() - start >= timeout:
181
+ raise TimeoutError(f"RPC Timeout: {function_name} after {timeout}s")
182
+
183
+ except Exception as e:
184
+ # Clean up the result key if there's an error
185
+ try:
186
+ self._redis.delete(f"{self.KEY_RESULT_PREFIX}{call_id}")
187
+ except:
188
+ pass
189
+ raise
190
+ finally:
191
+ try:
192
+ pubsub.close()
193
+ except:
194
+ pass
195
+
196
+ def start_handler(self):
197
+ """Start the worker thread."""
198
+ if self._running: return
199
+ self._running = True
200
+ self._worker_thread = threading.Thread(target=self._worker_loop, daemon=True)
201
+ self._worker_thread.start()
202
+ logger.info("rpc_worker_started", pid=self._process_id)
203
+
204
+ def stop_handler(self):
205
+ self._running = False
206
+ if self._worker_thread:
207
+ self._worker_thread.join(timeout=1.0)
208
+
209
+ def _worker_loop(self):
210
+ """Continuously process calls from MY queue."""
211
+ my_queue = f"{self.KEY_QUEUE}:{self._process_id}"
212
+
213
+ while self._running:
214
+ try:
215
+ if not self._redis:
216
+ time.sleep(1)
217
+ continue
218
+
219
+ # BLPOP from MY dedicated queue with proper error handling
220
+ try:
221
+ item = self._redis.blpop(my_queue, timeout=1)
222
+ if not item:
223
+ continue
224
+
225
+ _, data = item
226
+ call = FunctionCall.from_dict(json.loads(data))
227
+
228
+ # Execute (No need to check ownership, if it's in my queue, it's for me)
229
+ self._execute_call(call)
230
+
231
+ except Exception as blpop_error:
232
+ logger.error("rpc_blpop_error", error=str(blpop_error))
233
+ # Brief pause before continuing to avoid tight loop on connection errors
234
+ time.sleep(0.1)
235
+ continue
236
+
237
+ except Exception as e:
238
+ logger.error("rpc_worker_error", error=str(e))
239
+ time.sleep(1)
240
+
241
+ def _execute_call(self, call: FunctionCall):
242
+ """Execute a function call and publish the result."""
243
+ try:
244
+ if call.function_name in self._local_handlers:
245
+ handler = self._local_handlers[call.function_name]
246
+
247
+ # Update call status to processing
248
+ call.status = CallStatus.PROCESSING
249
+
250
+ # Execute the handler
251
+ result = handler(*call.args, **call.kwargs)
252
+
253
+ # Update call with result
254
+ call.status = CallStatus.COMPLETED
255
+ call.result = result
256
+ else:
257
+ # Function not found in this process
258
+ call.status = CallStatus.FAILED
259
+ call.error = f"Function '{call.function_name}' not found in this module"
260
+ logger.error("rpc_function_not_found", function_name=call.function_name)
261
+
262
+ except Exception as e:
263
+ call.status = CallStatus.FAILED
264
+ call.error = str(e)
265
+ logger.error("rpc_execution_failed", fn=call.function_name, error=str(e))
266
+
267
+ # Publish result via Pub/Sub instead of storing in key
268
+ result_channel = f"{self.KEY_RESULT_PREFIX}{call.call_id}"
269
+
270
+ # Use Redis pipeline for atomic operation
271
+ with self._redis.pipeline() as pipe:
272
+ pipe.publish(result_channel, json.dumps(call.to_dict()))
273
+ # Set expiration for the result channel to prevent accumulation
274
+ pipe.expire(result_channel, 300) # 5 minutes
275
+ pipe.execute()
276
+
277
+ logger.debug("rpc_call_executed", call_id=call.call_id, status=call.status.name)
278
+
279
+
280
+
281
+ # Dummy Registry class for backwards compatibility imports if needed
282
+ class FunctionRegistry:
283
+ def __init__(self, *args): pass
284
+
285
+ def nexus_export(name=None, **kwargs):
286
+ def dec(f): return f
287
+ return dec
qyro/common/sandbox.py ADDED
@@ -0,0 +1,191 @@
1
+ """
2
+ Nexus Secure Sandbox System
3
+ Provides containerized execution for untrusted code using Docker isolation.
4
+ """
5
+
6
+ import docker
7
+ import tempfile
8
+ import os
9
+ import json
10
+ from pathlib import Path
11
+ from typing import Dict, Any, Optional, List
12
+ import time
13
+ import signal
14
+ import subprocess
15
+
16
+
17
+ class NexusSandbox:
18
+ """
19
+ Secure execution environment for untrusted code using Docker containers.
20
+
21
+ Features:
22
+ - Isolated container execution
23
+ - Resource limits (CPU, memory, disk)
24
+ - Network isolation
25
+ - File system restrictions
26
+ - Timeout enforcement
27
+ """
28
+
29
+ def __init__(self):
30
+ try:
31
+ self.client = docker.from_env()
32
+ # Test connection
33
+ self.client.ping()
34
+ except Exception as e:
35
+ raise RuntimeError(f"Docker is required for sandboxing: {e}")
36
+
37
+ def execute_untrusted_code(
38
+ self,
39
+ code: str,
40
+ language: str,
41
+ timeout: int = 30,
42
+ memory_limit: str = "128m",
43
+ cpu_quota: int = 100000, # 10% of 1 CPU
44
+ network_disabled: bool = True
45
+ ) -> Dict[str, Any]:
46
+ """
47
+ Execute untrusted code in a secure Docker container.
48
+
49
+ Args:
50
+ code: Source code to execute
51
+ language: Programming language ('python', 'javascript', 'go', etc.)
52
+ timeout: Execution timeout in seconds
53
+ memory_limit: Memory limit (e.g., '128m', '1g')
54
+ cpu_quota: CPU quota in microseconds per period
55
+ network_disabled: Whether to disable network access
56
+
57
+ Returns:
58
+ Dictionary with execution results
59
+ """
60
+ # Map language to Docker image
61
+ image_map = {
62
+ 'python': 'python:3.11-alpine',
63
+ 'js': 'node:18-alpine',
64
+ 'javascript': 'node:18-alpine',
65
+ 'go': 'golang:1.21-alpine',
66
+ 'rust': 'rust:1.75-slim',
67
+ 'java': 'openjdk:17-jdk-slim',
68
+ 'c': 'gcc:11.4-bookworm',
69
+ 'cpp': 'gcc:11.4-bookworm',
70
+ 'ruby': 'ruby:3.2-alpine',
71
+ 'php': 'php:8.2-cli-alpine',
72
+ 'lua': 'lua:5.4-alpine'
73
+ }
74
+
75
+ if language not in image_map:
76
+ raise ValueError(f"Unsupported language for sandbox: {language}")
77
+
78
+ image = image_map[language]
79
+
80
+ # Create temporary directory for code
81
+ with tempfile.TemporaryDirectory() as temp_dir:
82
+ temp_path = Path(temp_dir)
83
+
84
+ # Write code to temporary file based on language
85
+ file_extensions = {
86
+ 'python': '.py',
87
+ 'js': '.js',
88
+ 'javascript': '.js',
89
+ 'go': '.go',
90
+ 'rust': '.rs',
91
+ 'java': '.java',
92
+ 'c': '.c',
93
+ 'cpp': '.cpp',
94
+ 'ruby': '.rb',
95
+ 'php': '.php',
96
+ 'lua': '.lua'
97
+ }
98
+
99
+ ext = file_extensions[language]
100
+ code_file = temp_path / f"code{ext}"
101
+ code_file.write_text(code)
102
+
103
+ # Determine execution command based on language
104
+ commands = {
105
+ 'python': ['python', '/code/code.py'],
106
+ 'js': ['node', '/code/code.js'],
107
+ 'javascript': ['node', '/code/code.js'],
108
+ 'go': ['sh', '-c', 'cd /code && go run *.go'],
109
+ 'rust': ['sh', '-c', 'rustc /code/code.rs -o /tmp/program && /tmp/program'],
110
+ 'java': ['sh', '-c', 'cd /code && javac *.java && java $(ls *.class | head -c -7)'],
111
+ 'c': ['sh', '-c', 'cd /code && gcc code.c -o program && ./program'],
112
+ 'cpp': ['sh', '-c', 'cd /code && g++ code.cpp -o program && ./program'],
113
+ 'ruby': ['ruby', '/code/code.rb'],
114
+ 'php': ['php', '/code/code.php'],
115
+ 'lua': ['lua', '/code/code.lua']
116
+ }
117
+
118
+ cmd = commands[language]
119
+
120
+ try:
121
+ # Run container with security restrictions
122
+ container = self.client.containers.run(
123
+ image=image,
124
+ command=cmd,
125
+ volumes={str(temp_path): {'bind': '/code', 'mode': 'ro'}}, # Read-only mount
126
+ network_mode='none' if network_disabled else None,
127
+ mem_limit=memory_limit,
128
+ cpu_quota=cpu_quota,
129
+ environment={
130
+ 'HOME': '/tmp',
131
+ 'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
132
+ },
133
+ working_dir='/code',
134
+ remove=True, # Auto-remove when done
135
+ stdout=True,
136
+ stderr=True,
137
+ detach=False,
138
+ timeout=timeout
139
+ )
140
+
141
+ # Parse output
142
+ stdout = container[0].decode('utf-8', errors='replace') if container[0] else ""
143
+ stderr = container[1].decode('utf-8', errors='replace') if container[1] else ""
144
+
145
+ return {
146
+ 'success': True,
147
+ 'stdout': stdout,
148
+ 'stderr': stderr,
149
+ 'exit_code': 0 # Docker run returns combined output
150
+ }
151
+
152
+ except docker.errors.ContainerError as e:
153
+ return {
154
+ 'success': False,
155
+ 'stdout': e.stdout.decode('utf-8', errors='replace') if e.stdout else "",
156
+ 'stderr': e.stderr.decode('utf-8', errors='replace') if e.stderr else "",
157
+ 'exit_code': e.exit_code,
158
+ 'error': f"Container execution failed: {e}"
159
+ }
160
+ except Exception as e:
161
+ return {
162
+ 'success': False,
163
+ 'stdout': '',
164
+ 'stderr': '',
165
+ 'exit_code': -1,
166
+ 'error': str(e)
167
+ }
168
+
169
+ def is_available(self) -> bool:
170
+ """Check if Docker is available for sandboxing."""
171
+ try:
172
+ self.client.ping()
173
+ return True
174
+ except:
175
+ return False
176
+
177
+
178
+ # Singleton instance
179
+ _nexus_sandbox = None
180
+
181
+
182
+ def get_sandbox() -> Optional[NexusSandbox]:
183
+ """Get the global sandbox instance."""
184
+ global _nexus_sandbox
185
+ if _nexus_sandbox is None:
186
+ try:
187
+ _nexus_sandbox = NexusSandbox()
188
+ except RuntimeError:
189
+ # Docker not available
190
+ return None
191
+ return _nexus_sandbox
@@ -0,0 +1,33 @@
1
+ import json
2
+ from .type_generator import QyroTypeGenerator
3
+ import os
4
+
5
+ class QyroSchemaLoader:
6
+ def __init__(self):
7
+ pass
8
+
9
+ def process_schema(self, schema_block: str, output_dir="nexus_generated"):
10
+ try:
11
+ schema = json.loads(schema_block)
12
+ generator = QyroTypeGenerator(schema)
13
+
14
+ os.makedirs(output_dir, exist_ok=True)
15
+
16
+ with open(os.path.join(output_dir, "nexus_types.h"), "w") as f:
17
+ f.write(generator.generate_c_structs())
18
+
19
+ with open(os.path.join(output_dir, "nexus_types.rs"), "w") as f:
20
+ f.write(generator.generate_rust_structs())
21
+
22
+ with open(os.path.join(output_dir, "GlobalState.java"), "w") as f:
23
+ f.write(generator.generate_java_class())
24
+
25
+ with open(os.path.join(output_dir, "types.ts"), "w") as f:
26
+ f.write(generator.generate_ts_interface())
27
+
28
+ print(f"[NEXUS] Schema processed. Types generated in {output_dir}/")
29
+ return schema
30
+
31
+ except json.JSONDecodeError as e:
32
+ print(f"[NEXUS] Error parsing schema block: {e}")
33
+ return None