qyro 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. qyro/__init__.py +17 -0
  2. qyro/adapters/__init__.py +4 -0
  3. qyro/adapters/language_adapters/__init__.py +4 -0
  4. qyro/adapters/language_adapters/c/__init__.py +4 -0
  5. qyro/adapters/language_adapters/python/__init__.py +4 -0
  6. qyro/adapters/language_adapters/python/python_adapter.py +584 -0
  7. qyro/cli/__init__.py +8 -0
  8. qyro/cli/__main__.py +5 -0
  9. qyro/cli/cli.py +392 -0
  10. qyro/cli/interactive.py +297 -0
  11. qyro/common/__init__.py +37 -0
  12. qyro/common/animation.py +82 -0
  13. qyro/common/builder.py +434 -0
  14. qyro/common/compiler.py +895 -0
  15. qyro/common/config.py +93 -0
  16. qyro/common/constants.py +99 -0
  17. qyro/common/errors.py +176 -0
  18. qyro/common/frontend.py +74 -0
  19. qyro/common/health.py +358 -0
  20. qyro/common/kafka_manager.py +192 -0
  21. qyro/common/logging.py +149 -0
  22. qyro/common/memory.py +147 -0
  23. qyro/common/metrics.py +301 -0
  24. qyro/common/monitoring.py +468 -0
  25. qyro/common/parser.py +91 -0
  26. qyro/common/platform.py +609 -0
  27. qyro/common/redis_memory.py +1108 -0
  28. qyro/common/rpc.py +287 -0
  29. qyro/common/sandbox.py +191 -0
  30. qyro/common/schema_loader.py +33 -0
  31. qyro/common/secure_sandbox.py +490 -0
  32. qyro/common/toolchain_validator.py +617 -0
  33. qyro/common/type_generator.py +176 -0
  34. qyro/common/validation.py +401 -0
  35. qyro/common/validator.py +204 -0
  36. qyro/gateway/__init__.py +8 -0
  37. qyro/gateway/gateway.py +303 -0
  38. qyro/orchestrator/__init__.py +8 -0
  39. qyro/orchestrator/orchestrator.py +1223 -0
  40. qyro-2.0.0.dist-info/METADATA +244 -0
  41. qyro-2.0.0.dist-info/RECORD +45 -0
  42. qyro-2.0.0.dist-info/WHEEL +5 -0
  43. qyro-2.0.0.dist-info/entry_points.txt +2 -0
  44. qyro-2.0.0.dist-info/licenses/LICENSE +21 -0
  45. qyro-2.0.0.dist-info/top_level.txt +1 -0
qyro/common/health.py ADDED
@@ -0,0 +1,358 @@
1
+ """
2
+ Nexus Health Check System
3
+ Provides health check endpoints and metrics for monitoring.
4
+ """
5
+
6
+ import time
7
+ import os
8
+ import psutil
9
+ from typing import Dict, Any, List, Optional
10
+ from dataclasses import dataclass, field
11
+ from enum import Enum
12
+
13
+ from .logging import get_logger
14
+ from .constants import PROTOCOL_VERSION, MEM_FILE
15
+
16
+ logger = get_logger("nexus.health")
17
+
18
+
19
+ class HealthStatus(Enum):
20
+ """Health status levels."""
21
+ HEALTHY = "healthy"
22
+ DEGRADED = "degraded"
23
+ UNHEALTHY = "unhealthy"
24
+ UNKNOWN = "unknown"
25
+
26
+
27
+ @dataclass
28
+ class ComponentHealth:
29
+ """Health status of a single component."""
30
+ name: str
31
+ status: HealthStatus
32
+ message: str = ""
33
+ latency_ms: float = 0.0
34
+ metadata: Dict[str, Any] = field(default_factory=dict)
35
+
36
+
37
+ @dataclass
38
+ class SystemHealth:
39
+ """Overall system health status."""
40
+ status: HealthStatus
41
+ version: str
42
+ uptime_seconds: float
43
+ components: List[ComponentHealth]
44
+ timestamp: float = field(default_factory=time.time)
45
+
46
+ def to_dict(self) -> Dict[str, Any]:
47
+ return {
48
+ "status": self.status.value,
49
+ "version": self.version,
50
+ "uptime_seconds": round(self.uptime_seconds, 2),
51
+ "timestamp": self.timestamp,
52
+ "components": [
53
+ {
54
+ "name": c.name,
55
+ "status": c.status.value,
56
+ "message": c.message,
57
+ "latency_ms": round(c.latency_ms, 2),
58
+ "metadata": c.metadata
59
+ }
60
+ for c in self.components
61
+ ]
62
+ }
63
+
64
+
65
+ class HealthChecker:
66
+ """
67
+ Health check system for Nexus runtime.
68
+
69
+ Checks:
70
+ - Memory availability and integrity
71
+ - Lock file accessibility
72
+ - Process status
73
+ - Resource usage
74
+ """
75
+
76
+ def __init__(self, memory=None, orchestrator=None):
77
+ self._memory = memory
78
+ self._orchestrator = orchestrator
79
+ self._start_time = time.time()
80
+ self._version = f"2.0.0-nbp{PROTOCOL_VERSION}"
81
+
82
+ def check(self) -> SystemHealth:
83
+ """Perform full health check."""
84
+ components = []
85
+
86
+ # Check memory
87
+ components.append(self._check_memory())
88
+
89
+ # Check lock file
90
+ components.append(self._check_lock())
91
+
92
+ # Check processes
93
+ if self._orchestrator:
94
+ components.append(self._check_processes())
95
+
96
+ # Check system resources
97
+ components.append(self._check_resources())
98
+
99
+ # Determine overall status
100
+ statuses = [c.status for c in components]
101
+ if all(s == HealthStatus.HEALTHY for s in statuses):
102
+ overall = HealthStatus.HEALTHY
103
+ elif any(s == HealthStatus.UNHEALTHY for s in statuses):
104
+ overall = HealthStatus.UNHEALTHY
105
+ else:
106
+ overall = HealthStatus.DEGRADED
107
+
108
+ return SystemHealth(
109
+ status=overall,
110
+ version=self._version,
111
+ uptime_seconds=time.time() - self._start_time,
112
+ components=components
113
+ )
114
+
115
+ def _check_memory(self) -> ComponentHealth:
116
+ """Check shared memory health."""
117
+ start = time.time()
118
+
119
+ try:
120
+ if not os.path.exists(MEM_FILE):
121
+ return ComponentHealth(
122
+ name="memory",
123
+ status=HealthStatus.UNHEALTHY,
124
+ message="Memory file not found"
125
+ )
126
+
127
+ # Check file size
128
+ size = os.path.getsize(MEM_FILE)
129
+ if size < 1000:
130
+ return ComponentHealth(
131
+ name="memory",
132
+ status=HealthStatus.DEGRADED,
133
+ message=f"Memory file too small: {size} bytes"
134
+ )
135
+
136
+ # Try to read if memory object available
137
+ if self._memory:
138
+ try:
139
+ stats = self._memory.get_stats()
140
+ latency = (time.time() - start) * 1000
141
+
142
+ return ComponentHealth(
143
+ name="memory",
144
+ status=HealthStatus.HEALTHY,
145
+ message="Memory accessible",
146
+ latency_ms=latency,
147
+ metadata={
148
+ "size_mb": round(stats.get("total_size", 0) / 1024 / 1024, 2),
149
+ "utilization": round(stats.get("utilization", 0) * 100, 1),
150
+ "sequence": stats.get("sequence", 0),
151
+ "encrypted": stats.get("encrypted", False),
152
+ "protocol": stats.get("protocol_version", 0)
153
+ }
154
+ )
155
+ except Exception as e:
156
+ return ComponentHealth(
157
+ name="memory",
158
+ status=HealthStatus.DEGRADED,
159
+ message=f"Memory read error: {str(e)}"
160
+ )
161
+
162
+ return ComponentHealth(
163
+ name="memory",
164
+ status=HealthStatus.HEALTHY,
165
+ message=f"Memory file exists ({size} bytes)",
166
+ latency_ms=(time.time() - start) * 1000
167
+ )
168
+
169
+ except Exception as e:
170
+ return ComponentHealth(
171
+ name="memory",
172
+ status=HealthStatus.UNHEALTHY,
173
+ message=str(e)
174
+ )
175
+
176
+ def _check_lock(self) -> ComponentHealth:
177
+ """Check lock file accessibility."""
178
+ lock_file = ".nexus_global.lock"
179
+ start = time.time()
180
+
181
+ try:
182
+ # Try to open lock file
183
+ with open(lock_file, 'a') as f:
184
+ pass
185
+
186
+ return ComponentHealth(
187
+ name="lock",
188
+ status=HealthStatus.HEALTHY,
189
+ message="Lock file accessible",
190
+ latency_ms=(time.time() - start) * 1000
191
+ )
192
+ except Exception as e:
193
+ return ComponentHealth(
194
+ name="lock",
195
+ status=HealthStatus.DEGRADED,
196
+ message=f"Lock file issue: {str(e)}"
197
+ )
198
+
199
+ def _check_processes(self) -> ComponentHealth:
200
+ """Check supervised process health."""
201
+ try:
202
+ if not self._orchestrator:
203
+ return ComponentHealth(
204
+ name="processes",
205
+ status=HealthStatus.UNKNOWN,
206
+ message="No orchestrator"
207
+ )
208
+
209
+ status = self._orchestrator.get_status()
210
+ processes = status.get("processes", [])
211
+
212
+ running = sum(1 for p in processes if p.get("status") == "running")
213
+ total = len(processes)
214
+
215
+ if running == total:
216
+ return ComponentHealth(
217
+ name="processes",
218
+ status=HealthStatus.HEALTHY,
219
+ message=f"All {total} processes running",
220
+ metadata={"running": running, "total": total}
221
+ )
222
+ elif running > 0:
223
+ return ComponentHealth(
224
+ name="processes",
225
+ status=HealthStatus.DEGRADED,
226
+ message=f"{running}/{total} processes running",
227
+ metadata={"running": running, "total": total}
228
+ )
229
+ else:
230
+ return ComponentHealth(
231
+ name="processes",
232
+ status=HealthStatus.UNHEALTHY,
233
+ message="No processes running",
234
+ metadata={"running": 0, "total": total}
235
+ )
236
+
237
+ except Exception as e:
238
+ return ComponentHealth(
239
+ name="processes",
240
+ status=HealthStatus.UNKNOWN,
241
+ message=str(e)
242
+ )
243
+
244
+ def _check_resources(self) -> ComponentHealth:
245
+ """Check system resource usage."""
246
+ try:
247
+ cpu_percent = psutil.cpu_percent(interval=0.1)
248
+ memory = psutil.virtual_memory()
249
+ disk = psutil.disk_usage('.')
250
+
251
+ # Determine status based on usage
252
+ if memory.percent > 90 or disk.percent > 90:
253
+ status = HealthStatus.UNHEALTHY
254
+ message = "Critical resource usage"
255
+ elif memory.percent > 75 or disk.percent > 80:
256
+ status = HealthStatus.DEGRADED
257
+ message = "High resource usage"
258
+ else:
259
+ status = HealthStatus.HEALTHY
260
+ message = "Resources OK"
261
+
262
+ return ComponentHealth(
263
+ name="resources",
264
+ status=status,
265
+ message=message,
266
+ metadata={
267
+ "cpu_percent": round(cpu_percent, 1),
268
+ "memory_percent": round(memory.percent, 1),
269
+ "memory_available_gb": round(memory.available / 1024 / 1024 / 1024, 2),
270
+ "disk_percent": round(disk.percent, 1),
271
+ "disk_free_gb": round(disk.free / 1024 / 1024 / 1024, 2)
272
+ }
273
+ )
274
+
275
+ except Exception as e:
276
+ return ComponentHealth(
277
+ name="resources",
278
+ status=HealthStatus.UNKNOWN,
279
+ message=str(e)
280
+ )
281
+
282
+ def liveness(self) -> bool:
283
+ """Simple liveness check (is the process alive)."""
284
+ return True
285
+
286
+ def readiness(self) -> bool:
287
+ """
288
+ Readiness check (is the service ready to handle requests).
289
+ Returns True if memory is accessible.
290
+ """
291
+ try:
292
+ return os.path.exists(MEM_FILE)
293
+ except:
294
+ return False
295
+
296
+
297
+ # HTTP Health Endpoint Handler (for FastAPI/Flask integration)
298
+ def create_health_routes(app, health_checker: HealthChecker):
299
+ """
300
+ Add health check routes to a FastAPI or Flask app.
301
+
302
+ Usage (FastAPI):
303
+ from nexus_core.health import HealthChecker, create_health_routes
304
+ health = HealthChecker(memory)
305
+ create_health_routes(app, health)
306
+ """
307
+ try:
308
+ # Try FastAPI
309
+ from fastapi import APIRouter
310
+ router = APIRouter()
311
+
312
+ @router.get("/health")
313
+ def health():
314
+ return health_checker.check().to_dict()
315
+
316
+ @router.get("/health/live")
317
+ def liveness():
318
+ return {"status": "alive" if health_checker.liveness() else "dead"}
319
+
320
+ @router.get("/health/ready")
321
+ def readiness():
322
+ return {"status": "ready" if health_checker.readiness() else "not_ready"}
323
+
324
+ app.include_router(router)
325
+ return router
326
+
327
+ except ImportError:
328
+ pass
329
+
330
+ try:
331
+ # Try Flask
332
+ from flask import jsonify
333
+
334
+ @app.route("/health")
335
+ def health():
336
+ return jsonify(health_checker.check().to_dict())
337
+
338
+ @app.route("/health/live")
339
+ def liveness():
340
+ return jsonify({"status": "alive" if health_checker.liveness() else "dead"})
341
+
342
+ @app.route("/health/ready")
343
+ def readiness():
344
+ return jsonify({"status": "ready" if health_checker.readiness() else "not_ready"})
345
+
346
+ except ImportError:
347
+ logger.warning("no_web_framework", msg="Neither FastAPI nor Flask found")
348
+
349
+
350
+ # Global health checker instance
351
+ _health_checker: Optional[HealthChecker] = None
352
+
353
+ def get_health_checker(memory=None, orchestrator=None) -> HealthChecker:
354
+ """Get or create the global health checker."""
355
+ global _health_checker
356
+ if _health_checker is None:
357
+ _health_checker = HealthChecker(memory, orchestrator)
358
+ return _health_checker
@@ -0,0 +1,192 @@
1
+ """
2
+ Kafka Manager for Nexus
3
+ Handles Kafka integration for inter-module communication.
4
+ """
5
+
6
+ import asyncio
7
+ import json
8
+ from typing import Dict, Any, Callable, Optional
9
+ from dataclasses import dataclass
10
+ from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
11
+ import logging
12
+
13
+ from .config import QyroConfig
14
+ from .logging import get_logger
15
+
16
+
17
+ logger = get_logger("nexus.kafka_manager")
18
+
19
+
20
+ @dataclass
21
+ class KafkaConfig:
22
+ """Configuration for Kafka integration."""
23
+ bootstrap_servers: str = "localhost:9092"
24
+ topic_prefix: str = "nexus_"
25
+ consumer_group: str = "nexus_group"
26
+ enable_auto_commit: bool = True
27
+ auto_offset_reset: str = "earliest"
28
+
29
+
30
+ class KafkaManager:
31
+ """Manages Kafka producers and consumers for Nexus modules."""
32
+
33
+ def __init__(self, config: QyroConfig):
34
+ self.config = config
35
+ self.producer: Optional[AIOKafkaProducer] = None
36
+ self.consumer: Optional[AIOKafkaConsumer] = None
37
+ self.running = False
38
+
39
+ # Initialize Kafka configuration
40
+ self.kafka_config = KafkaConfig(
41
+ bootstrap_servers=config.kafka_bootstrap_servers
42
+ )
43
+
44
+ logger.info(f"Kafka manager initialized with servers: {self.kafka_config.bootstrap_servers}")
45
+
46
+ async def start_producer(self):
47
+ """Start the Kafka producer."""
48
+ try:
49
+ self.producer = AIOKafkaProducer(
50
+ bootstrap_servers=self.kafka_config.bootstrap_servers.split(','),
51
+ value_serializer=lambda x: json.dumps(x).encode('utf-8'),
52
+ acks='all'
53
+ )
54
+ await self.producer.start()
55
+ logger.info("Kafka producer started successfully")
56
+ except Exception as e:
57
+ logger.error(f"Failed to start Kafka producer: {e}")
58
+ raise
59
+
60
+ async def start_consumer(self, topics: list):
61
+ """Start the Kafka consumer."""
62
+ try:
63
+ self.consumer = AIOKafkaConsumer(
64
+ *topics,
65
+ bootstrap_servers=self.kafka_config.bootstrap_servers.split(','),
66
+ value_deserializer=lambda x: json.loads(x.decode('utf-8')),
67
+ group_id=self.kafka_config.consumer_group,
68
+ enable_auto_commit=self.kafka_config.enable_auto_commit,
69
+ auto_offset_reset=self.kafka_config.auto_offset_reset
70
+ )
71
+ await self.consumer.start()
72
+ logger.info(f"Kafka consumer started successfully for topics: {topics}")
73
+ except Exception as e:
74
+ logger.error(f"Failed to start Kafka consumer: {e}")
75
+ raise
76
+
77
+ async def stop_producer(self):
78
+ """Stop the Kafka producer."""
79
+ if self.producer:
80
+ await self.producer.stop()
81
+ logger.info("Kafka producer stopped")
82
+
83
+ async def stop_consumer(self):
84
+ """Stop the Kafka consumer."""
85
+ if self.consumer:
86
+ await self.consumer.stop()
87
+ logger.info("Kafka consumer stopped")
88
+
89
+ async def send_message(self, topic: str, message: Dict[str, Any], key: Optional[str] = None):
90
+ """Send a message to a Kafka topic."""
91
+ if not self.producer:
92
+ await self.start_producer()
93
+
94
+ try:
95
+ await self.producer.send_and_wait(topic, message, key=key.encode('utf-8') if key else None)
96
+ logger.debug(f"Message sent to topic '{topic}': {message}")
97
+ except Exception as e:
98
+ logger.error(f"Failed to send message to topic '{topic}': {e}")
99
+ raise
100
+
101
+ async def consume_messages(self, topic: str, callback: Callable[[Dict[str, Any]], None]):
102
+ """Consume messages from a Kafka topic."""
103
+ if not self.consumer:
104
+ await self.start_consumer([topic])
105
+
106
+ try:
107
+ async for msg in self.consumer:
108
+ if msg.topic == topic:
109
+ logger.debug(f"Received message from topic '{topic}': {msg.value}")
110
+ callback(msg.value)
111
+ except Exception as e:
112
+ logger.error(f"Error consuming messages from topic '{topic}': {e}")
113
+ raise
114
+
115
+ def start(self):
116
+ """Start the Kafka manager in a background task."""
117
+ self.running = True
118
+ # Start producer in background
119
+ asyncio.create_task(self._run_producer())
120
+
121
+ def stop(self):
122
+ """Stop the Kafka manager."""
123
+ self.running = False
124
+
125
+ async def _run_producer(self):
126
+ """Internal method to run the producer."""
127
+ await self.start_producer()
128
+ while self.running:
129
+ await asyncio.sleep(0.1) # Keep the task alive
130
+
131
+ async def publish_state_change(self, state_diff: Dict[str, Any], module_name: str):
132
+ """Publish a state change to the state change topic."""
133
+ topic = f"{self.kafka_config.topic_prefix}state_change"
134
+ message = {
135
+ "module": module_name,
136
+ "timestamp": asyncio.get_event_loop().time(),
137
+ "state_diff": state_diff
138
+ }
139
+ await self.send_message(topic, message)
140
+
141
+ async def subscribe_to_state_changes(self, callback: Callable[[Dict[str, Any]], None]):
142
+ """Subscribe to state change events."""
143
+ topic = f"{self.kafka_config.topic_prefix}state_change"
144
+ await self.consume_messages(topic, callback)
145
+
146
+ async def publish_module_event(self, event_type: str, module_name: str, data: Dict[str, Any]):
147
+ """Publish a module event to the events topic."""
148
+ topic = f"{self.kafka_config.topic_prefix}module_events"
149
+ message = {
150
+ "event_type": event_type,
151
+ "module": module_name,
152
+ "timestamp": asyncio.get_event_loop().time(),
153
+ "data": data
154
+ }
155
+ await self.send_message(topic, message)
156
+
157
+ async def publish_rpc_request(self, func_name: str, args: Dict[str, Any], request_id: str):
158
+ """Publish an RPC request to the appropriate topic."""
159
+ topic = f"{self.kafka_config.topic_prefix}rpc_requests"
160
+ message = {
161
+ "func_name": func_name,
162
+ "args": args,
163
+ "request_id": request_id,
164
+ "timestamp": asyncio.get_event_loop().time()
165
+ }
166
+ await self.send_message(topic, message)
167
+
168
+ async def publish_rpc_response(self, request_id: str, result: Any, error: Optional[str] = None):
169
+ """Publish an RPC response to the appropriate topic."""
170
+ topic = f"{self.kafka_config.topic_prefix}rpc_responses"
171
+ message = {
172
+ "request_id": request_id,
173
+ "result": result,
174
+ "error": error,
175
+ "timestamp": asyncio.get_event_loop().time()
176
+ }
177
+ await self.send_message(topic, message)
178
+
179
+ async def broadcast_message(self, message: Dict[str, Any], broadcast_id: str):
180
+ """Broadcast a message to all modules."""
181
+ topic = f"{self.kafka_config.topic_prefix}broadcast"
182
+ broadcast_msg = {
183
+ "broadcast_id": broadcast_id,
184
+ "message": message,
185
+ "timestamp": asyncio.get_event_loop().time()
186
+ }
187
+ await self.send_message(topic, broadcast_msg)
188
+
189
+ async def subscribe_to_broadcasts(self, callback: Callable[[Dict[str, Any]], None]):
190
+ """Subscribe to broadcast messages."""
191
+ topic = f"{self.kafka_config.topic_prefix}broadcast"
192
+ await self.consume_messages(topic, callback)
qyro/common/logging.py ADDED
@@ -0,0 +1,149 @@
1
+ """
2
+ Qyro Structured Logging
3
+ JSON-based logging with severity levels for cross-language consistency.
4
+ """
5
+
6
+ import json
7
+ import sys
8
+ import time
9
+ from datetime import datetime
10
+ from enum import IntEnum
11
+ from typing import Any, Dict, Optional
12
+ from pathlib import Path
13
+
14
+
15
+ class LogLevel(IntEnum):
16
+ DEBUG = 10
17
+ INFO = 20
18
+ WARNING = 30
19
+ ERROR = 40
20
+ CRITICAL = 50
21
+
22
+
23
+ class QyroLogger:
24
+ """
25
+ Structured JSON logger for Qyro system.
26
+ Outputs machine-readable logs that can be aggregated across languages.
27
+ """
28
+
29
+ _instances: Dict[str, 'QyroLogger'] = {}
30
+
31
+ def __init__(
32
+ self,
33
+ name: str = "qyro",
34
+ level: LogLevel = LogLevel.INFO,
35
+ output_file: Optional[str] = None,
36
+ json_output: bool = True
37
+ ):
38
+ self.name = name
39
+ self.level = level
40
+ self.json_output = json_output
41
+ self._file = None
42
+
43
+ if output_file:
44
+ Path(output_file).parent.mkdir(parents=True, exist_ok=True)
45
+ self._file = open(output_file, 'a', encoding='utf-8')
46
+
47
+ @classmethod
48
+ def get_logger(cls, name: str = "qyro") -> 'QyroLogger':
49
+ """Get or create a logger instance."""
50
+ if name not in cls._instances:
51
+ cls._instances[name] = cls(name)
52
+ return cls._instances[name]
53
+
54
+ def _format_message(
55
+ self,
56
+ level: LogLevel,
57
+ message: str,
58
+ **context
59
+ ) -> str:
60
+ """Format log message as JSON or plain text."""
61
+ timestamp = datetime.utcnow().isoformat() + "Z"
62
+
63
+ if self.json_output:
64
+ log_entry = {
65
+ "timestamp": timestamp,
66
+ "level": level.name,
67
+ "logger": self.name,
68
+ "message": message,
69
+ **context
70
+ }
71
+ return json.dumps(log_entry, default=str)
72
+ else:
73
+ ctx_str = " ".join(f"{k}={v}" for k, v in context.items())
74
+ return f"[{timestamp}] [{level.name}] [{self.name}] {message} {ctx_str}".strip()
75
+
76
+ def _log(self, level: LogLevel, message: str, **context):
77
+ """Internal log method."""
78
+ if level < self.level:
79
+ return
80
+
81
+ formatted = self._format_message(level, message, **context)
82
+
83
+ # Output to stderr for errors, stdout for others
84
+ output = sys.stderr if level >= LogLevel.ERROR else sys.stdout
85
+ try:
86
+ print(formatted, file=output, flush=True)
87
+ except UnicodeEncodeError:
88
+ # Fallback for Windows consoles: use the stream's own encoding with replacements
89
+ encoding = getattr(output, 'encoding', 'utf-8') or 'utf-8'
90
+ safe_msg = formatted.encode(encoding, errors='replace').decode(encoding)
91
+ print(safe_msg, file=output, flush=True)
92
+
93
+ # Also write to file if configured
94
+ if self._file:
95
+ print(formatted, file=self._file, flush=True)
96
+
97
+ def debug(self, message: str, **context):
98
+ self._log(LogLevel.DEBUG, message, **context)
99
+
100
+ def info(self, message: str, **context):
101
+ self._log(LogLevel.INFO, message, **context)
102
+
103
+ def warning(self, message: str, **context):
104
+ self._log(LogLevel.WARNING, message, **context)
105
+
106
+ def error(self, message: str, **context):
107
+ self._log(LogLevel.ERROR, message, **context)
108
+
109
+ def critical(self, message: str, **context):
110
+ self._log(LogLevel.CRITICAL, message, **context)
111
+
112
+ def exception(self, message: str, exc: Exception, **context):
113
+ """Log exception with traceback info."""
114
+ import traceback
115
+ context['exception_type'] = type(exc).__name__
116
+ context['exception_message'] = str(exc)
117
+ context['traceback'] = traceback.format_exc()
118
+ self._log(LogLevel.ERROR, message, **context)
119
+
120
+ def close(self):
121
+ if self._file:
122
+ self._file.close()
123
+
124
+
125
+ def get_logger(name: str = "qyro") -> QyroLogger:
126
+ """Get a named logger instance."""
127
+ return QyroLogger.get_logger(name)
128
+
129
+
130
+ def set_log_level(level: LogLevel):
131
+ """Set the global log level."""
132
+ get_logger().level = level
133
+
134
+
135
+ # Convenience functions
136
+ def debug(message: str, **context):
137
+ get_logger().debug(message, **context)
138
+
139
+ def info(message: str, **context):
140
+ get_logger().info(message, **context)
141
+
142
+ def warning(message: str, **context):
143
+ get_logger().warning(message, **context)
144
+
145
+ def error(message: str, **context):
146
+ get_logger().error(message, **context)
147
+
148
+ def critical(message: str, **context):
149
+ get_logger().critical(message, **context)