praisonaiagents 0.0.151__py3-none-any.whl → 0.0.153__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,7 @@ Features:
13
13
  - Easy integration with existing agents and workflows
14
14
  """
15
15
 
16
+ import os
16
17
  import time
17
18
  import json
18
19
  import threading
@@ -32,6 +33,7 @@ except ImportError:
32
33
  logger = logging.getLogger(__name__)
33
34
 
34
35
 
36
+
35
37
  class PerformanceMonitor:
36
38
  """
37
39
  User-friendly performance monitoring for functions, API calls, and workflows.
@@ -50,6 +52,20 @@ class PerformanceMonitor:
50
52
  Args:
51
53
  max_entries: Maximum number of performance entries to keep in memory
52
54
  """
55
+ # Check if performance monitoring is disabled
56
+ from .telemetry import _is_monitoring_disabled
57
+ self._monitoring_disabled = _is_monitoring_disabled()
58
+
59
+ # If monitoring is disabled, use minimal initialization
60
+ if self._monitoring_disabled:
61
+ self.max_entries = 0
62
+ self._lock = None
63
+ self._function_stats = {}
64
+ self._api_calls = {}
65
+ self._function_flow = []
66
+ self._active_calls = {}
67
+ self._telemetry = None
68
+ return
53
69
  self.max_entries = max_entries
54
70
  self._lock = threading.RLock()
55
71
 
@@ -97,6 +113,12 @@ class PerformanceMonitor:
97
113
  def my_function():
98
114
  return "result"
99
115
  """
116
+ # If monitoring is disabled, return unmodified function
117
+ if self._monitoring_disabled:
118
+ def decorator(func: Callable) -> Callable:
119
+ return func
120
+ return decorator
121
+
100
122
  def decorator(func: Callable) -> Callable:
101
123
  name = func_name or f"{func.__module__}.{func.__qualname__}"
102
124
 
@@ -170,6 +192,10 @@ class PerformanceMonitor:
170
192
  with performance_monitor.track_api_call("openai", "/v1/chat/completions"):
171
193
  response = openai_client.chat.completions.create(...)
172
194
  """
195
+ # If monitoring is disabled, provide no-op context manager
196
+ if self._monitoring_disabled:
197
+ yield
198
+ return
173
199
  call_name = f"{api_name}:{endpoint}" if endpoint else api_name
174
200
  start_time = time.time()
175
201
 
@@ -189,6 +215,9 @@ class PerformanceMonitor:
189
215
  def _record_function_performance(self, func_name: str, execution_time: float,
190
216
  success: bool, error: Optional[str] = None):
191
217
  """Record function performance statistics."""
218
+ if self._monitoring_disabled:
219
+ return
220
+
192
221
  with self._lock:
193
222
  stats = self._function_stats[func_name]
194
223
  stats['call_count'] += 1
@@ -212,6 +241,9 @@ class PerformanceMonitor:
212
241
  def _record_api_call(self, api_name: str, execution_time: float,
213
242
  success: bool, error: Optional[str] = None):
214
243
  """Record API call performance statistics."""
244
+ if self._monitoring_disabled:
245
+ return
246
+
215
247
  with self._lock:
216
248
  stats = self._api_calls[api_name]
217
249
  stats['call_count'] += 1
@@ -241,6 +273,10 @@ class PerformanceMonitor:
241
273
  Returns:
242
274
  Dictionary with performance statistics
243
275
  """
276
+ # If monitoring is disabled, return empty results
277
+ if self._monitoring_disabled:
278
+ return {}
279
+
244
280
  with self._lock:
245
281
  if func_name:
246
282
  if func_name not in self._function_stats:
@@ -287,6 +323,10 @@ class PerformanceMonitor:
287
323
  Returns:
288
324
  Dictionary with API call performance statistics
289
325
  """
326
+ # If monitoring is disabled, return empty results
327
+ if self._monitoring_disabled:
328
+ return {}
329
+
290
330
  with self._lock:
291
331
  if api_name:
292
332
  if api_name not in self._api_calls:
@@ -515,6 +555,9 @@ class PerformanceMonitor:
515
555
  Returns:
516
556
  Performance data in requested format
517
557
  """
558
+ if self._monitoring_disabled:
559
+ return {} if format == "dict" else "{}"
560
+
518
561
  data = {
519
562
  'functions': self.get_function_performance(),
520
563
  'api_calls': self.get_api_call_performance(),
@@ -526,6 +569,119 @@ class PerformanceMonitor:
526
569
  if format == "json":
527
570
  return json.dumps(data, indent=2)
528
571
  return data
572
+
573
+ def export_metrics_for_external_apm(self, service_name: str = "praisonai-agents") -> Dict[str, Any]:
574
+ """
575
+ Export lightweight metrics suitable for external APM tools like DataDog or New Relic.
576
+
577
+ This method provides a minimal overhead way to export key performance metrics
578
+ without the expensive flow analysis operations.
579
+
580
+ Args:
581
+ service_name: Name of the service for APM tagging
582
+
583
+ Returns:
584
+ Dictionary with lightweight metrics suitable for external monitoring
585
+ """
586
+ if self._monitoring_disabled:
587
+ return {}
588
+
589
+ timestamp = datetime.now().isoformat()
590
+ metrics = []
591
+
592
+ # Function performance metrics (lightweight)
593
+ for func_name, stats in self._function_stats.items():
594
+ if stats['call_count'] > 0:
595
+ avg_duration = stats['total_time'] / stats['call_count']
596
+ error_rate = stats['error_count'] / stats['call_count']
597
+
598
+ # Create metrics in a format suitable for external APM
599
+ metrics.append({
600
+ 'metric_name': 'function.execution.duration',
601
+ 'metric_type': 'gauge',
602
+ 'value': avg_duration,
603
+ 'timestamp': timestamp,
604
+ 'tags': {
605
+ 'service': service_name,
606
+ 'function_name': func_name,
607
+ 'unit': 'seconds'
608
+ }
609
+ })
610
+
611
+ metrics.append({
612
+ 'metric_name': 'function.execution.count',
613
+ 'metric_type': 'counter',
614
+ 'value': stats['call_count'],
615
+ 'timestamp': timestamp,
616
+ 'tags': {
617
+ 'service': service_name,
618
+ 'function_name': func_name
619
+ }
620
+ })
621
+
622
+ metrics.append({
623
+ 'metric_name': 'function.error.rate',
624
+ 'metric_type': 'gauge',
625
+ 'value': error_rate,
626
+ 'timestamp': timestamp,
627
+ 'tags': {
628
+ 'service': service_name,
629
+ 'function_name': func_name,
630
+ 'unit': 'ratio'
631
+ }
632
+ })
633
+
634
+ # API call metrics (lightweight)
635
+ for api_name, stats in self._api_calls.items():
636
+ if stats['call_count'] > 0:
637
+ avg_duration = stats['total_time'] / stats['call_count']
638
+ success_rate = stats['success_count'] / stats['call_count']
639
+
640
+ metrics.append({
641
+ 'metric_name': 'api.call.duration',
642
+ 'metric_type': 'gauge',
643
+ 'value': avg_duration,
644
+ 'timestamp': timestamp,
645
+ 'tags': {
646
+ 'service': service_name,
647
+ 'api_name': api_name,
648
+ 'unit': 'seconds'
649
+ }
650
+ })
651
+
652
+ metrics.append({
653
+ 'metric_name': 'api.call.count',
654
+ 'metric_type': 'counter',
655
+ 'value': stats['call_count'],
656
+ 'timestamp': timestamp,
657
+ 'tags': {
658
+ 'service': service_name,
659
+ 'api_name': api_name
660
+ }
661
+ })
662
+
663
+ metrics.append({
664
+ 'metric_name': 'api.success.rate',
665
+ 'metric_type': 'gauge',
666
+ 'value': success_rate,
667
+ 'timestamp': timestamp,
668
+ 'tags': {
669
+ 'service': service_name,
670
+ 'api_name': api_name,
671
+ 'unit': 'ratio'
672
+ }
673
+ })
674
+
675
+ return {
676
+ 'service_name': service_name,
677
+ 'timestamp': timestamp,
678
+ 'metrics': metrics,
679
+ 'metadata': {
680
+ 'total_functions_monitored': len(self._function_stats),
681
+ 'total_apis_monitored': len(self._api_calls),
682
+ 'monitoring_enabled': not self._monitoring_disabled
683
+ }
684
+ }
529
685
 
530
686
 
531
687
  # Global performance monitor instance
@@ -570,4 +726,9 @@ def get_slowest_apis(limit: int = 10) -> List[Dict[str, Any]]:
570
726
 
571
727
  def clear_performance_data():
572
728
  """Clear all performance monitoring data."""
573
- performance_monitor.clear_statistics()
729
+ performance_monitor.clear_statistics()
730
+
731
+
732
+ def export_external_apm_metrics(service_name: str = "praisonai-agents") -> Dict[str, Any]:
733
+ """Export lightweight metrics for external APM tools."""
734
+ return performance_monitor.export_metrics_for_external_apm(service_name)
@@ -5,13 +5,14 @@ This module provides advanced analysis tools for function flow visualization,
5
5
  performance bottleneck identification, and comprehensive reporting.
6
6
 
7
7
  Features:
8
- - Function flow analysis and visualization
8
+ - Function flow analysis and visualization (opt-in via PRAISONAI_FLOW_ANALYSIS_ENABLED)
9
9
  - Performance bottleneck detection
10
10
  - Execution path mapping
11
11
  - Performance trend analysis
12
12
  - Advanced reporting utilities
13
13
  """
14
14
 
15
+ import os
15
16
  import json
16
17
  from collections import defaultdict
17
18
  from typing import Dict, Any, List, Optional
@@ -27,6 +28,9 @@ except ImportError:
27
28
 
28
29
  logger = logging.getLogger(__name__)
29
30
 
31
+ # Check if expensive flow analysis should be enabled (opt-in only)
32
+ _FLOW_ANALYSIS_ENABLED = os.environ.get('PRAISONAI_FLOW_ANALYSIS_ENABLED', '').lower() in ('true', '1', 'yes')
33
+
30
34
  # Performance analysis thresholds
31
35
  BOTTLENECK_THRESHOLD_AVERAGE = 1.0 # seconds - average duration to consider bottleneck
32
36
  BOTTLENECK_THRESHOLD_MAX = 5.0 # seconds - max duration to consider bottleneck
@@ -45,17 +49,28 @@ class FunctionFlowAnalyzer:
45
49
 
46
50
  def __init__(self):
47
51
  self.logger = logging.getLogger(__name__)
52
+
53
+ # Check if performance monitoring is disabled
54
+ from .telemetry import _is_monitoring_disabled
55
+ self._analysis_disabled = _is_monitoring_disabled()
48
56
 
49
57
  def analyze_execution_flow(self, flow_data: Optional[List[Dict]] = None) -> Dict[str, Any]:
50
58
  """
51
59
  Analyze function execution flow to identify patterns and bottlenecks.
52
60
 
61
+ Note: Expensive flow analysis operations are opt-in only via PRAISONAI_FLOW_ANALYSIS_ENABLED
62
+ environment variable to avoid performance overhead.
63
+
53
64
  Args:
54
65
  flow_data: Optional flow data, or None to use current monitor data
55
66
 
56
67
  Returns:
57
68
  Analysis results with flow patterns, bottlenecks, and statistics
58
69
  """
70
+ # Early exit if analysis is disabled
71
+ if self._analysis_disabled:
72
+ return {"message": "Flow analysis disabled via environment variables"}
73
+
59
74
  if flow_data is None:
60
75
  if not PERFORMANCE_MONITOR_AVAILABLE:
61
76
  return {"error": "Performance monitor not available and no flow data provided"}
@@ -66,17 +81,24 @@ class FunctionFlowAnalyzer:
66
81
 
67
82
  analysis = {
68
83
  "total_events": len(flow_data),
69
- "execution_patterns": self._analyze_patterns(flow_data),
70
- "bottlenecks": self._identify_bottlenecks(flow_data),
71
- "parallelism": self._analyze_parallelism(flow_data),
72
- "call_chains": self._build_call_chains(flow_data),
73
84
  "statistics": self._calculate_flow_statistics(flow_data)
74
85
  }
75
86
 
87
+ # Only include expensive analysis if explicitly enabled
88
+ if _FLOW_ANALYSIS_ENABLED:
89
+ analysis.update({
90
+ "execution_patterns": self._analyze_patterns(flow_data),
91
+ "bottlenecks": self._identify_bottlenecks(flow_data),
92
+ "parallelism": self._analyze_parallelism(flow_data),
93
+ "call_chains": self._build_call_chains(flow_data),
94
+ })
95
+ else:
96
+ analysis["note"] = "Advanced flow analysis disabled. Set PRAISONAI_FLOW_ANALYSIS_ENABLED=true to enable expensive pattern detection."
97
+
76
98
  return analysis
77
99
 
78
100
  def _analyze_patterns(self, flow_data: List[Dict]) -> Dict[str, Any]:
79
- """Analyze execution patterns in the flow data."""
101
+ """Analyze execution patterns in the flow data (optimized to avoid O(n²) complexity)."""
80
102
  patterns = {
81
103
  "most_frequent_sequences": [],
82
104
  "recursive_calls": [],
@@ -84,6 +106,12 @@ class FunctionFlowAnalyzer:
84
106
  "error_patterns": []
85
107
  }
86
108
 
109
+ # Limit analysis to reasonable data size to prevent performance issues
110
+ MAX_EVENTS_TO_ANALYZE = 1000
111
+ if len(flow_data) > MAX_EVENTS_TO_ANALYZE:
112
+ # Sample the most recent events instead of analyzing all (create copy to avoid modifying input)
113
+ flow_data = flow_data[-MAX_EVENTS_TO_ANALYZE:].copy()
114
+
87
115
  # Group events by function to find sequences
88
116
  function_sequences = defaultdict(list)
89
117
 
@@ -91,7 +119,7 @@ class FunctionFlowAnalyzer:
91
119
  func_name = event.get('function', 'unknown')
92
120
  function_sequences[func_name].append(event)
93
121
 
94
- # Find most frequent function sequences
122
+ # Find most frequent function sequences (optimized - single pass)
95
123
  sequence_counts = defaultdict(int)
96
124
  for i in range(len(flow_data) - 1):
97
125
  current_func = flow_data[i].get('function')
@@ -13,20 +13,66 @@ import threading
13
13
  from typing import Dict, Any, Optional
14
14
  from datetime import datetime
15
15
  import logging
16
+ from concurrent.futures import ThreadPoolExecutor
16
17
 
17
- # Try to import PostHog
18
- try:
19
- from posthog import Posthog
20
- POSTHOG_AVAILABLE = True
21
- except ImportError:
22
- POSTHOG_AVAILABLE = False
18
+ # Lazy imports - only import when needed
19
+ _POSTHOG_AVAILABLE = None
20
+ _POSTHOG_CLASS = None
23
21
 
24
- # Check for opt-out environment variables
25
- _TELEMETRY_DISABLED = any([
26
- os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'),
27
- os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'),
28
- os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'),
29
- ])
22
+ def _get_posthog():
23
+ """Lazy import PostHog to avoid import overhead when disabled."""
24
+ global _POSTHOG_AVAILABLE, _POSTHOG_CLASS
25
+ if _POSTHOG_AVAILABLE is None:
26
+ try:
27
+ from posthog import Posthog
28
+ _POSTHOG_CLASS = Posthog
29
+ _POSTHOG_AVAILABLE = True
30
+ except ImportError:
31
+ _POSTHOG_AVAILABLE = False
32
+ _POSTHOG_CLASS = None
33
+ return _POSTHOG_CLASS if _POSTHOG_AVAILABLE else None
34
+
35
+ # Cached result to avoid repeated environment variable checks
36
+ _TELEMETRY_DISABLED_CACHE = None
37
+
38
+ def _is_monitoring_disabled() -> bool:
39
+ """
40
+ Check if monitoring/telemetry is disabled via environment variables.
41
+
42
+ NEW BEHAVIOR: Performance monitoring is now DISABLED BY DEFAULT.
43
+ To enable monitoring, set PRAISONAI_PERFORMANCE_ENABLED=true.
44
+
45
+ The legacy disable flags still work for backward compatibility.
46
+
47
+ This function is cached to avoid repeated environment variable lookups.
48
+ """
49
+ global _TELEMETRY_DISABLED_CACHE
50
+
51
+ # Return cached result if available
52
+ if _TELEMETRY_DISABLED_CACHE is not None:
53
+ return _TELEMETRY_DISABLED_CACHE
54
+
55
+ # Check if explicitly disabled via legacy flags
56
+ explicitly_disabled = any([
57
+ os.environ.get('PRAISONAI_PERFORMANCE_DISABLED', '').lower() in ('true', '1', 'yes'),
58
+ os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'),
59
+ os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'),
60
+ os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'),
61
+ ])
62
+
63
+ if explicitly_disabled:
64
+ _TELEMETRY_DISABLED_CACHE = True
65
+ return True
66
+
67
+ # NEW: Check if explicitly enabled (required for monitoring to be active)
68
+ explicitly_enabled = any([
69
+ os.environ.get('PRAISONAI_PERFORMANCE_ENABLED', '').lower() in ('true', '1', 'yes'),
70
+ os.environ.get('PRAISONAI_TELEMETRY_ENABLED', '').lower() in ('true', '1', 'yes'),
71
+ ])
72
+
73
+ # Disabled by default unless explicitly enabled
74
+ _TELEMETRY_DISABLED_CACHE = not explicitly_enabled
75
+ return _TELEMETRY_DISABLED_CACHE
30
76
 
31
77
 
32
78
  class MinimalTelemetry:
@@ -61,17 +107,32 @@ class MinimalTelemetry:
61
107
  if enabled is not None:
62
108
  self.enabled = enabled
63
109
  else:
64
- self.enabled = not _TELEMETRY_DISABLED
110
+ self.enabled = not _is_monitoring_disabled()
111
+
112
+ # Fast path for disabled telemetry - minimal initialization
113
+ if not self.enabled:
114
+ self.logger = logging.getLogger(__name__)
115
+ self.logger.debug("Telemetry is disabled")
116
+ # Set minimal required attributes for disabled state
117
+ self._shutdown_complete = True
118
+ self._shutdown_lock = None
119
+ self._thread_pool = None
120
+ self._posthog = None
121
+ self._metrics = {}
122
+ self._metrics_lock = None
123
+ self.session_id = None
124
+ self._environment = {}
125
+ return
65
126
 
127
+ # Full initialization only when enabled
66
128
  self.logger = logging.getLogger(__name__)
67
129
 
68
130
  # Add shutdown tracking to prevent double shutdown
69
131
  self._shutdown_complete = False
70
132
  self._shutdown_lock = threading.Lock()
71
133
 
72
- if not self.enabled:
73
- self.logger.debug("Telemetry is disabled")
74
- return
134
+ # Initialize thread pool for non-blocking telemetry operations
135
+ self._thread_pool = None
75
136
 
76
137
  # Generate anonymous session ID (not user ID)
77
138
  session_data = f"{datetime.now().isoformat()}-{os.getpid()}-{time.time()}"
@@ -96,28 +157,51 @@ class MinimalTelemetry:
96
157
 
97
158
  self.logger.debug(f"Telemetry enabled with session {self.session_id}")
98
159
 
99
- # Initialize PostHog if available
100
- if POSTHOG_AVAILABLE:
160
+ # Initialize thread pool for efficient telemetry operations
161
+ self._thread_pool = ThreadPoolExecutor(
162
+ max_workers=2,
163
+ thread_name_prefix="telemetry"
164
+ )
165
+
166
+ # Initialize PostHog lazily - only when needed
167
+ self._posthog = None
168
+ self._posthog_initialized = False
169
+
170
+ def _get_framework_version(self) -> str:
171
+ """Get the PraisonAI Agents version."""
172
+ try:
173
+ from .. import __version__
174
+ return __version__
175
+ except (ImportError, KeyError, AttributeError):
176
+ return "unknown"
177
+
178
+ def _get_posthog_client(self):
179
+ """Lazy initialization of PostHog client."""
180
+ if not self.enabled:
181
+ return None
182
+
183
+ if self._posthog_initialized:
184
+ return self._posthog
185
+
186
+ self._posthog_initialized = True
187
+ posthog_class = _get_posthog()
188
+
189
+ if posthog_class:
101
190
  try:
102
- self._posthog = Posthog(
191
+ self._posthog = posthog_class(
103
192
  project_api_key='phc_skZpl3eFLQJ4iYjsERNMbCO6jfeSJi2vyZlPahKgxZ7',
104
193
  host='https://eu.i.posthog.com',
105
194
  disable_geoip=True,
106
195
  on_error=lambda e: self.logger.debug(f"PostHog error: {e}"),
107
196
  sync_mode=False # Use async mode to prevent blocking
108
197
  )
109
- except:
198
+ except Exception as e:
199
+ self.logger.debug(f"Failed to initialize PostHog: {e}")
110
200
  self._posthog = None
111
201
  else:
112
202
  self._posthog = None
113
-
114
- def _get_framework_version(self) -> str:
115
- """Get the PraisonAI Agents version."""
116
- try:
117
- from .. import __version__
118
- return __version__
119
- except (ImportError, KeyError, AttributeError):
120
- return "unknown"
203
+
204
+ return self._posthog
121
205
 
122
206
  def track_agent_execution(self, agent_name: str = None, success: bool = True, async_mode: bool = False):
123
207
  """
@@ -138,12 +222,13 @@ class MinimalTelemetry:
138
222
  self.logger.debug(f"Agent execution tracked: success={success}")
139
223
 
140
224
  # Send event to PostHog
141
- if self._posthog:
225
+ posthog_client = self._get_posthog_client()
226
+ if posthog_client:
142
227
  if async_mode:
143
- # Use a background thread to prevent blocking streaming responses
228
+ # Use thread pool for efficient background execution
144
229
  def _async_capture():
145
230
  try:
146
- self._posthog.capture(
231
+ posthog_client.capture(
147
232
  distinct_id=self.session_id,
148
233
  event='agent_execution',
149
234
  properties={
@@ -155,13 +240,17 @@ class MinimalTelemetry:
155
240
  # Silently handle any telemetry errors to avoid disrupting user experience
156
241
  self.logger.debug(f"Async PostHog capture error: {e}")
157
242
 
158
- # Execute in background thread with daemon flag for clean shutdown
159
- import threading
160
- thread = threading.Thread(target=_async_capture, daemon=True)
161
- thread.start()
243
+ # Use thread pool instead of creating new threads
244
+ if self._thread_pool:
245
+ self._thread_pool.submit(_async_capture)
246
+ else:
247
+ # Fallback to direct thread creation if pool not available
248
+ import threading
249
+ thread = threading.Thread(target=_async_capture, daemon=True)
250
+ thread.start()
162
251
  else:
163
252
  # Synchronous capture for backward compatibility
164
- self._posthog.capture(
253
+ posthog_client.capture(
165
254
  distinct_id=self.session_id,
166
255
  event='agent_execution',
167
256
  properties={
@@ -185,7 +274,8 @@ class MinimalTelemetry:
185
274
  self._metrics["task_completions"] += 1
186
275
 
187
276
  # Send event to PostHog
188
- if self._posthog:
277
+ posthog_client = self._get_posthog_client()
278
+ if posthog_client:
189
279
  self._posthog.capture(
190
280
  distinct_id=self.session_id,
191
281
  event='task_completion',
@@ -229,7 +319,8 @@ class MinimalTelemetry:
229
319
  timing_list[:] = timing_list[-self._max_timing_entries:]
230
320
 
231
321
  # Send event to PostHog
232
- if self._posthog:
322
+ posthog_client = self._get_posthog_client()
323
+ if posthog_client:
233
324
  properties = {
234
325
  'tool_name': tool_name,
235
326
  'success': success,
@@ -266,7 +357,8 @@ class MinimalTelemetry:
266
357
  self._metrics["errors"] += 1
267
358
 
268
359
  # Send event to PostHog
269
- if self._posthog:
360
+ posthog_client = self._get_posthog_client()
361
+ if posthog_client:
270
362
  self._posthog.capture(
271
363
  distinct_id=self.session_id,
272
364
  event='error',
@@ -290,7 +382,8 @@ class MinimalTelemetry:
290
382
  return
291
383
 
292
384
  # Send event to PostHog
293
- if self._posthog:
385
+ posthog_client = self._get_posthog_client()
386
+ if posthog_client:
294
387
  self._posthog.capture(
295
388
  distinct_id=self.session_id,
296
389
  event='feature_usage',
@@ -336,10 +429,11 @@ class MinimalTelemetry:
336
429
  self.logger.debug(f"Telemetry flush: {metrics}")
337
430
 
338
431
  # Send to PostHog if available
339
- if hasattr(self, '_posthog') and self._posthog:
432
+ posthog_client = self._get_posthog_client()
433
+ if posthog_client:
340
434
 
341
435
  try:
342
- self._posthog.capture(
436
+ posthog_client.capture(
343
437
  distinct_id='anonymous',
344
438
  event='sdk_used',
345
439
  properties={
@@ -376,6 +470,15 @@ class MinimalTelemetry:
376
470
  # Final flush
377
471
  self.flush()
378
472
 
473
+ # Shutdown thread pool
474
+ if self._thread_pool:
475
+ try:
476
+ self._thread_pool.shutdown(wait=True) # Removed invalid timeout parameter
477
+ except Exception as e:
478
+ self.logger.debug(f"Thread pool shutdown error: {e}")
479
+ finally:
480
+ self._thread_pool = None
481
+
379
482
  # Shutdown PostHog if available
380
483
  posthog_client = getattr(self, '_posthog', None)
381
484
  if posthog_client: