netra-zen 1.0.10__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,140 +1,140 @@
1
- #!/usr/bin/env python3
2
- """
3
- Verification script to prove JSONL logs are bundled in payload
4
- """
5
-
6
- import json
7
- import sys
8
- from pathlib import Path
9
-
10
- # Add parent directory to path
11
- sys.path.insert(0, str(Path(__file__).parent.parent))
12
-
13
- from scripts.agent_logs import collect_recent_logs
14
-
15
-
16
- def verify_log_bundling(log_path: str):
17
- """
18
- Verify that logs are properly collected and bundled
19
-
20
- Args:
21
- log_path: Path to JSONL file or directory
22
- """
23
- print("=" * 70)
24
- print("JSONL LOG TRANSMISSION VERIFICATION")
25
- print("=" * 70)
26
- print()
27
-
28
- # Step 1: Collect logs
29
- print("Step 1: Collecting logs from file...")
30
- result = collect_recent_logs(limit=1, base_path=log_path)
31
-
32
- if not result:
33
- print("❌ FAILED: No logs collected")
34
- return False
35
-
36
- logs, files_read, file_info = result
37
- print(f"✓ Successfully collected {len(logs)} log entries from {files_read} file(s)")
38
- print()
39
-
40
- # Step 2: Show file details
41
- print("Step 2: File details...")
42
- for info in file_info:
43
- print(f" File: {info['name']}")
44
- print(f" Hash: {info['hash']}")
45
- print(f" Entries: {info['entries']}")
46
- print()
47
-
48
- # Step 3: Simulate payload creation
49
- print("Step 3: Simulating WebSocket payload creation...")
50
- payload = {
51
- "type": "message_create",
52
- "run_id": "test-run-id",
53
- "payload": {
54
- "message": "Test message with logs",
55
- "jsonl_logs": logs # This is where logs are added
56
- }
57
- }
58
-
59
- print(f"✓ Payload created with 'jsonl_logs' key")
60
- print(f" Payload keys: {list(payload['payload'].keys())}")
61
- print()
62
-
63
- # Step 4: Verify payload size
64
- print("Step 4: Calculating payload size...")
65
- payload_json = json.dumps(payload)
66
- payload_size_bytes = len(payload_json.encode('utf-8'))
67
- payload_size_kb = payload_size_bytes / 1024
68
- payload_size_mb = payload_size_kb / 1024
69
-
70
- if payload_size_mb >= 1:
71
- size_str = f"{payload_size_mb:.2f} MB"
72
- elif payload_size_kb >= 1:
73
- size_str = f"{payload_size_kb:.2f} KB"
74
- else:
75
- size_str = f"{payload_size_bytes} bytes"
76
-
77
- print(f"✓ Total payload size: {size_str}")
78
- print()
79
-
80
- # Step 5: Show sample log entries
81
- print("Step 5: Sample log entries in payload...")
82
- if logs:
83
- print(f" First entry keys: {list(logs[0].keys())}")
84
- print(f" First entry timestamp: {logs[0].get('timestamp', 'N/A')}")
85
- print(f" Last entry timestamp: {logs[-1].get('timestamp', 'N/A')}")
86
- print()
87
-
88
- # Step 6: Verify transmission-ready
89
- print("Step 6: Transmission verification...")
90
- print(f"✓ Payload is valid JSON: {payload_json is not None}")
91
- print(f"✓ Payload contains 'jsonl_logs': {'jsonl_logs' in payload['payload']}")
92
- print(f"✓ Log count in payload: {len(payload['payload']['jsonl_logs'])}")
93
- print()
94
-
95
- print("=" * 70)
96
- print("✅ VERIFICATION COMPLETE")
97
- print("=" * 70)
98
- print()
99
- print("PROOF OF TRANSMISSION:")
100
- print(f" • {len(logs)} JSONL log entries are bundled in the payload")
101
- print(f" • Payload size: {size_str}")
102
- print(f" • Ready for WebSocket transmission to backend")
103
- print()
104
-
105
- # Optional: Save proof file
106
- proof_file = Path("/tmp/zen_transmission_proof.json")
107
- proof_payload = {
108
- "verification_timestamp": "verification_run",
109
- "log_count": len(logs),
110
- "files_read": files_read,
111
- "file_info": file_info,
112
- "payload_size": size_str,
113
- "sample_first_entry": logs[0] if logs else None,
114
- "sample_last_entry": logs[-1] if logs else None,
115
- "payload_structure": {
116
- "type": payload["type"],
117
- "run_id": payload["run_id"],
118
- "payload_keys": list(payload["payload"].keys()),
119
- "jsonl_logs_present": "jsonl_logs" in payload["payload"],
120
- "jsonl_logs_count": len(payload["payload"]["jsonl_logs"])
121
- }
122
- }
123
-
124
- with open(proof_file, 'w') as f:
125
- json.dump(proof_payload, f, indent=2)
126
-
127
- print(f"📝 Detailed proof saved to: {proof_file}")
128
- print()
129
-
130
- return True
131
-
132
-
133
- if __name__ == "__main__":
134
- if len(sys.argv) < 2:
135
- print("Usage: python verify_log_transmission.py <path-to-jsonl-file>")
136
- sys.exit(1)
137
-
138
- log_path = sys.argv[1]
139
- success = verify_log_bundling(log_path)
140
- sys.exit(0 if success else 1)
1
+ #!/usr/bin/env python3
2
+ """
3
+ Verification script to prove JSONL logs are bundled in payload
4
+ """
5
+
6
+ import json
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ # Add parent directory to path
11
+ sys.path.insert(0, str(Path(__file__).parent.parent))
12
+
13
+ from scripts.agent_logs import collect_recent_logs
14
+
15
+
16
+ def verify_log_bundling(log_path: str):
17
+ """
18
+ Verify that logs are properly collected and bundled
19
+
20
+ Args:
21
+ log_path: Path to JSONL file or directory
22
+ """
23
+ print("=" * 70)
24
+ print("JSONL LOG TRANSMISSION VERIFICATION")
25
+ print("=" * 70)
26
+ print()
27
+
28
+ # Step 1: Collect logs
29
+ print("Step 1: Collecting logs from file...")
30
+ result = collect_recent_logs(limit=1, base_path=log_path)
31
+
32
+ if not result:
33
+ print("❌ FAILED: No logs collected")
34
+ return False
35
+
36
+ logs, files_read, file_info = result
37
+ print(f"✓ Successfully collected {len(logs)} log entries from {files_read} file(s)")
38
+ print()
39
+
40
+ # Step 2: Show file details
41
+ print("Step 2: File details...")
42
+ for info in file_info:
43
+ print(f" File: {info['name']}")
44
+ print(f" Hash: {info['hash']}")
45
+ print(f" Entries: {info['entries']}")
46
+ print()
47
+
48
+ # Step 3: Simulate payload creation
49
+ print("Step 3: Simulating WebSocket payload creation...")
50
+ payload = {
51
+ "type": "message_create",
52
+ "run_id": "test-run-id",
53
+ "payload": {
54
+ "message": "Test message with logs",
55
+ "jsonl_logs": logs # This is where logs are added
56
+ }
57
+ }
58
+
59
+ print(f"✓ Payload created with 'jsonl_logs' key")
60
+ print(f" Payload keys: {list(payload['payload'].keys())}")
61
+ print()
62
+
63
+ # Step 4: Verify payload size
64
+ print("Step 4: Calculating payload size...")
65
+ payload_json = json.dumps(payload)
66
+ payload_size_bytes = len(payload_json.encode('utf-8'))
67
+ payload_size_kb = payload_size_bytes / 1024
68
+ payload_size_mb = payload_size_kb / 1024
69
+
70
+ if payload_size_mb >= 1:
71
+ size_str = f"{payload_size_mb:.2f} MB"
72
+ elif payload_size_kb >= 1:
73
+ size_str = f"{payload_size_kb:.2f} KB"
74
+ else:
75
+ size_str = f"{payload_size_bytes} bytes"
76
+
77
+ print(f"✓ Total payload size: {size_str}")
78
+ print()
79
+
80
+ # Step 5: Show sample log entries
81
+ print("Step 5: Sample log entries in payload...")
82
+ if logs:
83
+ print(f" First entry keys: {list(logs[0].keys())}")
84
+ print(f" First entry timestamp: {logs[0].get('timestamp', 'N/A')}")
85
+ print(f" Last entry timestamp: {logs[-1].get('timestamp', 'N/A')}")
86
+ print()
87
+
88
+ # Step 6: Verify transmission-ready
89
+ print("Step 6: Transmission verification...")
90
+ print(f"✓ Payload is valid JSON: {payload_json is not None}")
91
+ print(f"✓ Payload contains 'jsonl_logs': {'jsonl_logs' in payload['payload']}")
92
+ print(f"✓ Log count in payload: {len(payload['payload']['jsonl_logs'])}")
93
+ print()
94
+
95
+ print("=" * 70)
96
+ print("✅ VERIFICATION COMPLETE")
97
+ print("=" * 70)
98
+ print()
99
+ print("PROOF OF TRANSMISSION:")
100
+ print(f" • {len(logs)} JSONL log entries are bundled in the payload")
101
+ print(f" • Payload size: {size_str}")
102
+ print(f" • Ready for WebSocket transmission to backend")
103
+ print()
104
+
105
+ # Optional: Save proof file
106
+ proof_file = Path("/tmp/zen_transmission_proof.json")
107
+ proof_payload = {
108
+ "verification_timestamp": "verification_run",
109
+ "log_count": len(logs),
110
+ "files_read": files_read,
111
+ "file_info": file_info,
112
+ "payload_size": size_str,
113
+ "sample_first_entry": logs[0] if logs else None,
114
+ "sample_last_entry": logs[-1] if logs else None,
115
+ "payload_structure": {
116
+ "type": payload["type"],
117
+ "run_id": payload["run_id"],
118
+ "payload_keys": list(payload["payload"].keys()),
119
+ "jsonl_logs_present": "jsonl_logs" in payload["payload"],
120
+ "jsonl_logs_count": len(payload["payload"]["jsonl_logs"])
121
+ }
122
+ }
123
+
124
+ with open(proof_file, 'w') as f:
125
+ json.dump(proof_payload, f, indent=2)
126
+
127
+ print(f"📝 Detailed proof saved to: {proof_file}")
128
+ print()
129
+
130
+ return True
131
+
132
+
133
+ if __name__ == "__main__":
134
+ if len(sys.argv) < 2:
135
+ print("Usage: python verify_log_transmission.py <path-to-jsonl-file>")
136
+ sys.exit(1)
137
+
138
+ log_path = sys.argv[1]
139
+ success = verify_log_bundling(log_path)
140
+ sys.exit(0 if success else 1)
zen/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
- """Zen namespace package placeholder.
2
-
3
- This lightweight module exists so repository scripts can import
4
- `zen.telemetry` directly without requiring the full orchestrator package.
5
- """
6
-
7
- __all__: list[str] = []
1
+ """Zen namespace package placeholder.
2
+
3
+ This lightweight module exists so repository scripts can import
4
+ `zen.telemetry` directly without requiring the full orchestrator package.
5
+ """
6
+
7
+ __all__: list[str] = []
zen/__main__.py CHANGED
@@ -1,11 +1,11 @@
1
- """Module entry point to support `python -m zen` invocation."""
2
-
3
- from zen_orchestrator import run
4
-
5
-
6
- def main() -> None:
7
- run()
8
-
9
-
10
- if __name__ == "__main__":
11
- main()
1
+ """Module entry point to support `python -m zen` invocation."""
2
+
3
+ from zen_orchestrator import run
4
+
5
+
6
+ def main() -> None:
7
+ run()
8
+
9
+
10
+ if __name__ == "__main__":
11
+ main()
zen/telemetry/__init__.py CHANGED
@@ -1,11 +1,14 @@
1
- """Telemetry utilities exposed by the Zen package."""
2
-
3
- from .embedded_credentials import get_embedded_credentials, get_project_id
4
- from .manager import TelemetryManager, telemetry_manager
5
-
6
- __all__ = [
7
- "TelemetryManager",
8
- "telemetry_manager",
9
- "get_embedded_credentials",
10
- "get_project_id",
11
- ]
1
+ """Telemetry utilities exposed by the Zen package."""
2
+
3
+ from .embedded_credentials import get_embedded_credentials, get_project_id
4
+ from .manager import TelemetryManager, telemetry_manager
5
+ from .apex_telemetry import run_apex_with_telemetry, ApexTelemetryWrapper
6
+
7
+ __all__ = [
8
+ "TelemetryManager",
9
+ "telemetry_manager",
10
+ "get_embedded_credentials",
11
+ "get_project_id",
12
+ "run_apex_with_telemetry",
13
+ "ApexTelemetryWrapper",
14
+ ]
@@ -0,0 +1,259 @@
1
+ """Telemetry wrapper for apex instance tracking.
2
+
3
+ This module provides a lightweight wrapper around agent_cli.py subprocess calls
4
+ to emit OpenTelemetry spans for apex instances without modifying agent_cli.py.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import logging
11
+ import os
12
+ import subprocess
13
+ import sys
14
+ import time
15
+ from typing import Any, Dict, Optional
16
+
17
+ from .manager import telemetry_manager
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class ApexTelemetryWrapper:
23
+ """Wrapper to track apex instance telemetry."""
24
+
25
+ def __init__(self):
26
+ self.start_time: Optional[float] = None
27
+ self.end_time: Optional[float] = None
28
+ self.exit_code: Optional[int] = None
29
+ self.message: Optional[str] = None
30
+ self.env: str = "staging"
31
+ self.stdout: str = ""
32
+ self.stderr: str = ""
33
+
34
+ def run_apex_with_telemetry(
35
+ self,
36
+ agent_cli_path: str,
37
+ filtered_argv: list,
38
+ env: Optional[Dict[str, str]] = None
39
+ ) -> int:
40
+ """Run agent_cli.py subprocess and emit telemetry span.
41
+
42
+ Args:
43
+ agent_cli_path: Path to agent_cli.py script
44
+ filtered_argv: Command-line arguments (without 'zen' and '--apex')
45
+ env: Environment variables to pass to subprocess
46
+
47
+ Returns:
48
+ Exit code from agent_cli subprocess
49
+ """
50
+ self.start_time = time.time()
51
+
52
+ # Extract message from argv for telemetry
53
+ self.message = self._extract_message(filtered_argv)
54
+ self.env = self._extract_env(filtered_argv)
55
+
56
+ # Build command
57
+ cmd = [sys.executable, agent_cli_path] + filtered_argv
58
+
59
+ try:
60
+ # Use Popen for real-time streaming while still capturing output for telemetry
61
+ process = subprocess.Popen(
62
+ cmd,
63
+ env=env,
64
+ stdout=subprocess.PIPE,
65
+ stderr=subprocess.PIPE,
66
+ text=True,
67
+ bufsize=1 # Line buffered for real-time output
68
+ )
69
+
70
+ # Collect output while streaming in real-time
71
+ stdout_lines = []
72
+ stderr_lines = []
73
+
74
+ # Stream stdout in real-time
75
+ if process.stdout:
76
+ for line in iter(process.stdout.readline, ''):
77
+ if line:
78
+ print(line, end='') # Print immediately for real-time display
79
+ stdout_lines.append(line)
80
+
81
+ # Wait for process to complete and get stderr
82
+ stderr_output = process.stderr.read() if process.stderr else ""
83
+ if stderr_output:
84
+ print(stderr_output, end='', file=sys.stderr)
85
+ stderr_lines.append(stderr_output)
86
+
87
+ # Wait for process to complete
88
+ self.exit_code = process.wait()
89
+
90
+ # Store captured output for telemetry parsing
91
+ self.stdout = ''.join(stdout_lines)
92
+ self.stderr = ''.join(stderr_lines)
93
+
94
+ except Exception as e:
95
+ logger.warning(f"Failed to run apex subprocess: {e}")
96
+ self.exit_code = 1
97
+ self.stderr = str(e)
98
+
99
+ finally:
100
+ self.end_time = time.time()
101
+ self._emit_telemetry()
102
+
103
+ return self.exit_code or 0
104
+
105
+ def _extract_message(self, argv: list) -> str:
106
+ """Extract message from command-line arguments."""
107
+ try:
108
+ if '--message' in argv:
109
+ idx = argv.index('--message')
110
+ if idx + 1 < len(argv):
111
+ return argv[idx + 1]
112
+ elif '-m' in argv:
113
+ idx = argv.index('-m')
114
+ if idx + 1 < len(argv):
115
+ return argv[idx + 1]
116
+ except (ValueError, IndexError):
117
+ pass
118
+ return "apex-instance"
119
+
120
+ def _extract_env(self, argv: list) -> str:
121
+ """Extract environment from command-line arguments."""
122
+ try:
123
+ if '--env' in argv:
124
+ idx = argv.index('--env')
125
+ if idx + 1 < len(argv):
126
+ return argv[idx + 1]
127
+ except (ValueError, IndexError):
128
+ pass
129
+ return "staging"
130
+
131
+ def _emit_telemetry(self) -> None:
132
+ """Emit OpenTelemetry span for apex instance."""
133
+ if telemetry_manager is None or not hasattr(telemetry_manager, "is_enabled"):
134
+ logger.debug("Telemetry manager not available")
135
+ return
136
+
137
+ if not telemetry_manager.is_enabled():
138
+ logger.debug("Telemetry is not enabled")
139
+ return
140
+
141
+ # Calculate duration
142
+ duration_ms = 0
143
+ if self.start_time and self.end_time:
144
+ duration_ms = int((self.end_time - self.start_time) * 1000)
145
+
146
+ # Determine status
147
+ status = "completed" if self.exit_code == 0 else "failed"
148
+ success = self.exit_code == 0
149
+
150
+ # Build attributes for apex.instance span
151
+ attributes: Dict[str, Any] = {
152
+ "zen.instance.type": "apex",
153
+ "zen.instance.name": "apex.instance",
154
+ "zen.instance.status": status,
155
+ "zen.instance.success": success,
156
+ "zen.instance.duration_ms": duration_ms,
157
+ "zen.instance.exit_code": self.exit_code or 0,
158
+ "zen.apex.environment": self.env,
159
+ "zen.apex.message": self._truncate_message(self.message or ""),
160
+ }
161
+
162
+ # Parse JSON output if available (contains token/cost info)
163
+ json_output = self._parse_json_output()
164
+ if json_output:
165
+ self._add_json_metrics(attributes, json_output)
166
+
167
+ # Emit span using the telemetry manager's tracer (same way as regular zen instances)
168
+ try:
169
+ # Access the tracer the same way telemetry_manager.record_instance_span() does
170
+ if not hasattr(telemetry_manager, '_tracer') or telemetry_manager._tracer is None:
171
+ logger.warning("Telemetry manager has no tracer configured")
172
+ return
173
+
174
+ from opentelemetry.trace import SpanKind
175
+ from google.api_core.exceptions import GoogleAPICallError
176
+
177
+ with telemetry_manager._tracer.start_as_current_span(
178
+ "apex.instance", kind=SpanKind.INTERNAL
179
+ ) as span:
180
+ for key, value in attributes.items():
181
+ span.set_attribute(key, value)
182
+
183
+ logger.info(f"✅ Emitted apex telemetry span with {len(attributes)} attributes")
184
+ logger.debug(f"Apex span attributes: {attributes}")
185
+
186
+ # Note: Removed force_flush to prevent blocking event streaming
187
+ # Spans will still be sent via the normal batch export process
188
+
189
+ except Exception as exc:
190
+ logger.error(f"❌ Failed to emit apex telemetry span: {exc}")
191
+ import traceback
192
+ logger.debug(f"Traceback: {traceback.format_exc()}")
193
+
194
+ def _truncate_message(self, message: str, max_length: int = 200) -> str:
195
+ """Truncate message for telemetry attributes."""
196
+ if len(message) <= max_length:
197
+ return message
198
+ return message[:max_length] + "..."
199
+
200
+ def _parse_json_output(self) -> Optional[Dict[str, Any]]:
201
+ """Parse JSON output from agent_cli stdout if available."""
202
+ if not self.stdout:
203
+ return None
204
+
205
+ # Try to find JSON in stdout
206
+ for line in self.stdout.split('\n'):
207
+ line = line.strip()
208
+ if line.startswith('{') and line.endswith('}'):
209
+ try:
210
+ return json.loads(line)
211
+ except json.JSONDecodeError:
212
+ continue
213
+
214
+ return None
215
+
216
+ def _add_json_metrics(self, attributes: Dict[str, Any], json_output: Dict[str, Any]) -> None:
217
+ """Add metrics from JSON output to telemetry attributes."""
218
+ # Extract token usage if available
219
+ if 'usage' in json_output:
220
+ usage = json_output['usage']
221
+ attributes['zen.tokens.total'] = usage.get('total_tokens', 0)
222
+ attributes['zen.tokens.input'] = usage.get('input_tokens', 0)
223
+ attributes['zen.tokens.output'] = usage.get('output_tokens', 0)
224
+ attributes['zen.tokens.cache.read'] = usage.get('cache_read_tokens', 0)
225
+ attributes['zen.tokens.cache.creation'] = usage.get('cache_creation_tokens', 0)
226
+
227
+ # Extract cost if available
228
+ if 'cost' in json_output:
229
+ cost = json_output['cost']
230
+ if 'total_usd' in cost:
231
+ attributes['zen.cost.usd_total'] = round(float(cost['total_usd']), 6)
232
+
233
+ # Extract run_id if available
234
+ if 'run_id' in json_output:
235
+ attributes['zen.apex.run_id'] = json_output['run_id']
236
+
237
+ # Extract validation status
238
+ if 'validation' in json_output:
239
+ validation = json_output['validation']
240
+ attributes['zen.apex.validation.passed'] = validation.get('passed', False)
241
+
242
+
243
+ def run_apex_with_telemetry(
244
+ agent_cli_path: str,
245
+ filtered_argv: list,
246
+ env: Optional[Dict[str, str]] = None
247
+ ) -> int:
248
+ """Convenience function to run apex with telemetry tracking.
249
+
250
+ Args:
251
+ agent_cli_path: Path to agent_cli.py script
252
+ filtered_argv: Command-line arguments (without 'zen' and '--apex')
253
+ env: Environment variables to pass to subprocess
254
+
255
+ Returns:
256
+ Exit code from agent_cli subprocess
257
+ """
258
+ wrapper = ApexTelemetryWrapper()
259
+ return wrapper.run_apex_with_telemetry(agent_cli_path, filtered_argv, env)