vnai 2.1.7__py3-none-any.whl → 2.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vnai/beam/metrics.py CHANGED
@@ -1,167 +1,218 @@
1
- import sys
2
- import time
3
- import threading
4
- from datetime import datetime
5
- import hashlib
6
- import json
7
-
8
- class Collector:
9
- _instance = None
10
- _lock = threading.Lock()
11
-
12
- def __new__(cls):
13
- with cls._lock:
14
- if cls._instance is None:
15
- cls._instance = super(Collector, cls).__new__(cls)
16
- cls._instance._initialize()
17
- return cls._instance
18
-
19
- def _initialize(self):
20
- self.metrics = {
21
- "function": [],
22
- "rate_limit": [],
23
- "request": [],
24
- "error": []
25
- }
26
- self.thresholds = {
27
- "buffer_size": 50,
28
- "error_threshold": 0.1,
29
- "performance_threshold": 5.0
30
- }
31
- self.function_count = 0
32
- self.colab_auth_triggered = False
33
- self.max_metric_length = 200
34
- self._last_record_time = {}
35
- self.min_interval_per_type = 0.5
36
- self._recent_hashes = []
37
- self._sending_metrics = False
38
-
39
- def record(self, metric_type, data, priority=None):
40
- if not isinstance(data, dict):
41
- data = {"value": str(data)}
42
- if"timestamp" not in data:
43
- data["timestamp"] = datetime.now().isoformat()
44
- if metric_type !="system_info":
45
- data.pop("system", None)
46
- from vnai.scope.profile import inspector
47
- data["machine_id"] = inspector.fingerprint()
48
- now = time.time()
49
- last_time = self._last_record_time.get(metric_type, 0)
50
- if now - last_time < self.min_interval_per_type and priority !="high":
51
- return
52
- self._last_record_time[metric_type] = now
53
- data_hash = hashlib.md5(json.dumps(data, sort_keys=True).encode()).hexdigest()
54
- if data_hash in self._recent_hashes and priority !="high":
55
- return
56
- self._recent_hashes.append(data_hash)
57
- if metric_type in self.metrics:
58
- self.metrics[metric_type].append(data)
59
- if len(self.metrics[metric_type]) > self.max_metric_length:
60
- self.metrics[metric_type] = self.metrics[metric_type][-self.max_metric_length:]
61
- else:
62
- self.metrics["function"].append(data)
63
- if metric_type =="function":
64
- self.function_count += 1
65
- if self.function_count > 10 and not self.colab_auth_triggered and'google.colab' in sys.modules:
66
- self.colab_auth_triggered = True
67
- threading.Thread(target=self._trigger_colab_auth, daemon=True).start()
68
- if sum(len(metric_list) for metric_list in self.metrics.values()) >= self.thresholds["buffer_size"]:
69
- self._send_metrics()
70
- if priority =="high" or metric_type =="error":
71
- self._send_metrics()
72
-
73
- def _trigger_colab_auth(self):
74
- try:
75
- from vnai.scope.profile import inspector
76
- inspector.get_or_create_user_id()
77
- except:
78
- pass
79
-
80
- def _send_metrics(self):
81
- if self._sending_metrics:
82
- return
83
- self._sending_metrics = True
84
- try:
85
- from vnai.flow.relay import track_function_call, track_rate_limit, track_api_request
86
- except ImportError:
87
- for metric_type in self.metrics:
88
- self.metrics[metric_type] = []
89
- self._sending_metrics = False
90
- return
91
- for metric_type, data_list in self.metrics.items():
92
- if not data_list:
93
- continue
94
- for data in data_list:
95
- try:
96
- if metric_type =="function":
97
- track_function_call(
98
- function_name=data.get("function","unknown"),
99
- source=data.get("source","vnai"),
100
- execution_time=data.get("execution_time", 0),
101
- success=data.get("success", True),
102
- error=data.get("error"),
103
- args=data.get("args")
104
- )
105
- elif metric_type =="rate_limit":
106
- track_rate_limit(
107
- source=data.get("source","vnai"),
108
- limit_type=data.get("limit_type","unknown"),
109
- limit_value=data.get("limit_value", 0),
110
- current_usage=data.get("current_usage", 0),
111
- is_exceeded=data.get("is_exceeded", False)
112
- )
113
- elif metric_type =="request":
114
- track_api_request(
115
- endpoint=data.get("endpoint","unknown"),
116
- source=data.get("source","vnai"),
117
- method=data.get("method","GET"),
118
- status_code=data.get("status_code", 200),
119
- execution_time=data.get("execution_time", 0),
120
- request_size=data.get("request_size", 0),
121
- response_size=data.get("response_size", 0)
122
- )
123
- except Exception as e:
124
- continue
125
- self.metrics[metric_type] = []
126
- self._sending_metrics = False
127
-
128
- def get_metrics_summary(self):
129
- return {
130
- metric_type: len(data_list)
131
- for metric_type, data_list in self.metrics.items()
132
- }
133
- collector = Collector()
134
-
135
- def capture(module_type="function"):
136
- def decorator(func):
137
- def wrapper(*args, **kwargs):
138
- start_time = time.time()
139
- success = False
140
- error = None
141
- try:
142
- result = func(*args, **kwargs)
143
- success = True
144
- return result
145
- except Exception as e:
146
- error = str(e)
147
- collector.record("error", {
148
- "function": func.__name__,
149
- "error": error,
150
- "args": str(args)[:100] if args else None
151
- })
152
- raise
153
- finally:
154
- execution_time = time.time() - start_time
155
- collector.record(
156
- module_type,
157
- {
158
- "function": func.__name__,
159
- "execution_time": execution_time,
160
- "success": success,
161
- "error": error,
162
- "timestamp": datetime.now().isoformat(),
163
- "args": str(args)[:100] if args else None
164
- }
165
- )
166
- return wrapper
167
- return decorator
1
+ # vnai/beam/metrics.py
2
+
3
+ import sys
4
+ import time
5
+ import threading
6
+ from datetime import datetime
7
+ import hashlib
8
+ import json
9
+
10
+ class Collector:
11
+ """Collects operation metrics for system optimization"""
12
+
13
+ _instance = None
14
+ _lock = threading.Lock()
15
+
16
+ def __new__(cls):
17
+ with cls._lock:
18
+ if cls._instance is None:
19
+ cls._instance = super(Collector, cls).__new__(cls)
20
+ cls._instance._initialize()
21
+ return cls._instance
22
+
23
+ def _initialize(self):
24
+ """Initialize collector"""
25
+ # Initialize metrics storage
26
+ self.metrics = {
27
+ "function": [],
28
+ "rate_limit": [],
29
+ "request": [],
30
+ "error": []
31
+ }
32
+ # Configuration thresholds
33
+ self.thresholds = {
34
+ "buffer_size": 50,
35
+ "error_threshold": 0.1,
36
+ "performance_threshold": 5.0
37
+ }
38
+ # Tracking variables
39
+ self.function_count = 0
40
+ self.colab_auth_triggered = False
41
+ self.max_metric_length = 200 # Keep only the latest 200 entries
42
+ self._last_record_time = {} # Track last record time for throttling
43
+ self.min_interval_per_type = 0.5 # Min interval between same type records
44
+ self._recent_hashes = [] # Track recent hashes for deduplication
45
+ self._sending_metrics = False # Prevent reentrancy in _send_metrics
46
+
47
+ def record(self, metric_type, data, priority=None):
48
+ """Record operation metrics with deduplication and throttling"""
49
+
50
+ # Ensure data is a dictionary
51
+ if not isinstance(data, dict):
52
+ data = {"value": str(data)}
53
+
54
+ # Add timestamp if not present
55
+ if "timestamp" not in data:
56
+ data["timestamp"] = datetime.now().isoformat()
57
+
58
+ # For non-system info, simplify and tag machine
59
+ if metric_type != "system_info":
60
+ data.pop("system", None)
61
+ from vnai.scope.profile import inspector
62
+ data["machine_id"] = inspector.fingerprint()
63
+
64
+ # ==== THROTTLING ====
65
+ now = time.time()
66
+ last_time = self._last_record_time.get(metric_type, 0)
67
+ if now - last_time < self.min_interval_per_type and priority != "high":
68
+ return # Skip due to interval limit
69
+ self._last_record_time[metric_type] = now
70
+
71
+ # ==== DEDUPLICATION ====
72
+ data_hash = hashlib.md5(json.dumps(data, sort_keys=True).encode()).hexdigest()
73
+ if data_hash in self._recent_hashes and priority != "high":
74
+ return # Skip duplicate
75
+ self._recent_hashes.append(data_hash)
76
+
77
+ # ==== RECORD LOGIC ====
78
+ if metric_type in self.metrics:
79
+ self.metrics[metric_type].append(data)
80
+ # Prune oldest if too long
81
+ if len(self.metrics[metric_type]) > self.max_metric_length:
82
+ self.metrics[metric_type] = self.metrics[metric_type][-self.max_metric_length:]
83
+ else:
84
+ self.metrics["function"].append(data)
85
+
86
+ # Function metric tracking (Colab trigger)
87
+ if metric_type == "function":
88
+ self.function_count += 1
89
+ if self.function_count > 10 and not self.colab_auth_triggered and 'google.colab' in sys.modules:
90
+ self.colab_auth_triggered = True
91
+ threading.Thread(target=self._trigger_colab_auth, daemon=True).start()
92
+
93
+ # Auto-send triggers
94
+ if sum(len(metric_list) for metric_list in self.metrics.values()) >= self.thresholds["buffer_size"]:
95
+ self._send_metrics()
96
+ if priority == "high" or metric_type == "error":
97
+ self._send_metrics()
98
+
99
+ def _trigger_colab_auth(self):
100
+ """Trigger Google Colab authentication in a background thread"""
101
+ try:
102
+ from vnai.scope.profile import inspector
103
+ inspector.get_or_create_user_id()
104
+ except:
105
+ pass # Silently fail if there's an issue
106
+
107
+ def _send_metrics(self):
108
+ """Send collected metrics to data relay"""
109
+ # Prevent reentrancy
110
+ if self._sending_metrics:
111
+ return
112
+
113
+ self._sending_metrics = True
114
+ try:
115
+ # Import here to avoid circular imports
116
+ from vnai.flow.relay import track_function_call, track_rate_limit, track_api_request
117
+ except ImportError:
118
+ # If relay module is not available, clear metrics and return
119
+ for metric_type in self.metrics:
120
+ self.metrics[metric_type] = []
121
+ self._sending_metrics = False
122
+ return
123
+
124
+ # Process and send each type of metric using the appropriate tracking function
125
+ for metric_type, data_list in self.metrics.items():
126
+ if not data_list:
127
+ continue
128
+
129
+ # Process each metric by type
130
+ for data in data_list:
131
+ try:
132
+ if metric_type == "function":
133
+ # Use the track_function_call interface
134
+ track_function_call(
135
+ function_name=data.get("function", "unknown"),
136
+ source=data.get("source", "vnai"),
137
+ execution_time=data.get("execution_time", 0),
138
+ success=data.get("success", True),
139
+ error=data.get("error"),
140
+ args=data.get("args")
141
+ )
142
+ elif metric_type == "rate_limit":
143
+ # Use the track_rate_limit interface
144
+ track_rate_limit(
145
+ source=data.get("source", "vnai"),
146
+ limit_type=data.get("limit_type", "unknown"),
147
+ limit_value=data.get("limit_value", 0),
148
+ current_usage=data.get("current_usage", 0),
149
+ is_exceeded=data.get("is_exceeded", False)
150
+ )
151
+ elif metric_type == "request":
152
+ # Use the track_api_request interface
153
+ track_api_request(
154
+ endpoint=data.get("endpoint", "unknown"),
155
+ source=data.get("source", "vnai"),
156
+ method=data.get("method", "GET"),
157
+ status_code=data.get("status_code", 200),
158
+ execution_time=data.get("execution_time", 0),
159
+ request_size=data.get("request_size", 0),
160
+ response_size=data.get("response_size", 0)
161
+ )
162
+ except Exception as e:
163
+ # If tracking fails, just continue with the next item
164
+ continue
165
+
166
+ # Clear the processed metrics
167
+ self.metrics[metric_type] = []
168
+
169
+ # Reset sending flag
170
+ self._sending_metrics = False
171
+
172
+ def get_metrics_summary(self):
173
+ """Get summary of collected metrics"""
174
+ return {
175
+ metric_type: len(data_list)
176
+ for metric_type, data_list in self.metrics.items()
177
+ }
178
+
179
+ # Create singleton instance
180
+ collector = Collector()
181
+
182
+ def capture(module_type="function"):
183
+ """Decorator to capture metrics for any function"""
184
+ def decorator(func):
185
+ def wrapper(*args, **kwargs):
186
+ start_time = time.time()
187
+ success = False
188
+ error = None
189
+
190
+ try:
191
+ result = func(*args, **kwargs)
192
+ success = True
193
+ return result
194
+ except Exception as e:
195
+ error = str(e)
196
+ # Log the error to metrics before re-raising
197
+ collector.record("error", {
198
+ "function": func.__name__,
199
+ "error": error,
200
+ "args": str(args)[:100] if args else None
201
+ })
202
+ raise
203
+ finally:
204
+ execution_time = time.time() - start_time
205
+
206
+ collector.record(
207
+ module_type,
208
+ {
209
+ "function": func.__name__,
210
+ "execution_time": execution_time,
211
+ "success": success,
212
+ "error": error,
213
+ "timestamp": datetime.now().isoformat(),
214
+ "args": str(args)[:100] if args else None # Truncate for privacy
215
+ }
216
+ )
217
+ return wrapper
218
+ return decorator
vnai/beam/pulse.py CHANGED
@@ -1,79 +1,108 @@
1
- import threading
2
- import time
3
- from datetime import datetime
4
-
5
- class Monitor:
6
- _instance = None
7
- _lock = threading.Lock()
8
-
9
- def __new__(cls):
10
- with cls._lock:
11
- if cls._instance is None:
12
- cls._instance = super(Monitor, cls).__new__(cls)
13
- cls._instance._initialize()
14
- return cls._instance
15
-
16
- def _initialize(self):
17
- self.health_status ="healthy"
18
- self.last_check = time.time()
19
- self.check_interval = 300
20
- self.error_count = 0
21
- self.warning_count = 0
22
- self.status_history = []
23
- self._start_background_check()
24
-
25
- def _start_background_check(self):
26
- def check_health():
27
- while True:
28
- try:
29
- self.check_health()
30
- except:
31
- pass
32
- time.sleep(self.check_interval)
33
- thread = threading.Thread(target=check_health, daemon=True)
34
- thread.start()
35
-
36
- def check_health(self):
37
- from vnai.beam.metrics import collector
38
- from vnai.beam.quota import guardian
39
- self.last_check = time.time()
40
- metrics_summary = collector.get_metrics_summary()
41
- has_errors = metrics_summary.get("error", 0) > 0
42
- resource_usage = guardian.usage()
43
- high_usage = resource_usage > 80
44
- if has_errors and high_usage:
45
- self.health_status ="critical"
46
- self.error_count += 1
47
- elif has_errors or high_usage:
48
- self.health_status ="warning"
49
- self.warning_count += 1
50
- else:
51
- self.health_status ="healthy"
52
- self.status_history.append({
53
- "timestamp": datetime.now().isoformat(),
54
- "status": self.health_status,
55
- "metrics": metrics_summary,
56
- "resource_usage": resource_usage
57
- })
58
- if len(self.status_history) > 10:
59
- self.status_history = self.status_history[-10:]
60
- return self.health_status
61
-
62
- def report(self):
63
- if time.time() - self.last_check > self.check_interval:
64
- self.check_health()
65
- return {
66
- "status": self.health_status,
67
- "last_check": datetime.fromtimestamp(self.last_check).isoformat(),
68
- "error_count": self.error_count,
69
- "warning_count": self.warning_count,
70
- "history": self.status_history[-3:],
71
- }
72
-
73
- def reset(self):
74
- self.health_status ="healthy"
75
- self.error_count = 0
76
- self.warning_count = 0
77
- self.status_history = []
78
- self.last_check = time.time()
79
- monitor = Monitor()
1
+ # vnai/beam/pulse.py
2
+
3
+ import threading
4
+ import time
5
+ from datetime import datetime
6
+
7
+ class Monitor:
8
+ """Monitors system health and performance"""
9
+
10
+ _instance = None
11
+ _lock = threading.Lock()
12
+
13
+ def __new__(cls):
14
+ with cls._lock:
15
+ if cls._instance is None:
16
+ cls._instance = super(Monitor, cls).__new__(cls)
17
+ cls._instance._initialize()
18
+ return cls._instance
19
+
20
+ def _initialize(self):
21
+ """Initialize monitor"""
22
+ self.health_status = "healthy"
23
+ self.last_check = time.time()
24
+ self.check_interval = 300 # seconds
25
+ self.error_count = 0
26
+ self.warning_count = 0
27
+ self.status_history = []
28
+
29
+ # Start background health check thread
30
+ self._start_background_check()
31
+
32
+ def _start_background_check(self):
33
+ """Start background health check thread"""
34
+ def check_health():
35
+ while True:
36
+ try:
37
+ self.check_health()
38
+ except:
39
+ pass # Don't let errors stop the monitor
40
+ time.sleep(self.check_interval)
41
+
42
+ thread = threading.Thread(target=check_health, daemon=True)
43
+ thread.start()
44
+
45
+ def check_health(self):
46
+ """Check system health status"""
47
+ from vnai.beam.metrics import collector
48
+ from vnai.beam.quota import guardian
49
+
50
+ # Record check time
51
+ self.last_check = time.time()
52
+
53
+ # Check metrics collector health
54
+ metrics_summary = collector.get_metrics_summary()
55
+ has_errors = metrics_summary.get("error", 0) > 0
56
+
57
+ # Check resource usage
58
+ resource_usage = guardian.usage()
59
+ high_usage = resource_usage > 80 # Over 80% of rate limits
60
+
61
+ # Determine health status
62
+ if has_errors and high_usage:
63
+ self.health_status = "critical"
64
+ self.error_count += 1
65
+ elif has_errors or high_usage:
66
+ self.health_status = "warning"
67
+ self.warning_count += 1
68
+ else:
69
+ self.health_status = "healthy"
70
+
71
+ # Record health status
72
+ self.status_history.append({
73
+ "timestamp": datetime.now().isoformat(),
74
+ "status": self.health_status,
75
+ "metrics": metrics_summary,
76
+ "resource_usage": resource_usage
77
+ })
78
+
79
+ # Keep history limited to last 10 entries
80
+ if len(self.status_history) > 10:
81
+ self.status_history = self.status_history[-10:]
82
+
83
+ return self.health_status
84
+
85
+ def report(self):
86
+ """Get health report"""
87
+ # Ensure we have a fresh check if last one is old
88
+ if time.time() - self.last_check > self.check_interval:
89
+ self.check_health()
90
+
91
+ return {
92
+ "status": self.health_status,
93
+ "last_check": datetime.fromtimestamp(self.last_check).isoformat(),
94
+ "error_count": self.error_count,
95
+ "warning_count": self.warning_count,
96
+ "history": self.status_history[-3:], # Last 3 entries
97
+ }
98
+
99
+ def reset(self):
100
+ """Reset health monitor"""
101
+ self.health_status = "healthy"
102
+ self.error_count = 0
103
+ self.warning_count = 0
104
+ self.status_history = []
105
+ self.last_check = time.time()
106
+
107
+ # Create singleton instance
108
+ monitor = Monitor()