praisonaiagents 0.0.151__py3-none-any.whl → 0.0.153__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,16 @@
1
1
  """
2
- Simplified integration module for adding telemetry to core PraisonAI components.
2
+ Performance-optimized integration module for adding telemetry to core PraisonAI components.
3
+ Uses thread pools and async patterns to minimize performance overhead.
3
4
  """
4
5
 
5
6
  from typing import Any, Optional, TYPE_CHECKING
6
7
  from functools import wraps
7
8
  import time
9
+ import threading
10
+ import concurrent.futures
11
+ import queue
12
+ import asyncio
13
+ from contextlib import contextmanager
8
14
 
9
15
  if TYPE_CHECKING:
10
16
  from .telemetry import MinimalTelemetry
@@ -12,15 +18,159 @@ if TYPE_CHECKING:
12
18
  from ..task.task import Task
13
19
  from ..agents.agents import PraisonAIAgents
14
20
 
21
+ # Performance mode flag for auto-instrumentation (define early to avoid NameError)
22
+ _performance_mode_enabled = False
15
23
 
16
- def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = None):
24
+ # Shared thread pool for telemetry operations to avoid creating threads per call
25
+ _telemetry_executor = None
26
+ _telemetry_queue = None
27
+ _queue_processor_running = False
28
+ _queue_lock = threading.Lock()
29
+
30
+ def _get_telemetry_executor():
31
+ """Get or create the shared telemetry thread pool executor."""
32
+ global _telemetry_executor
33
+ if _telemetry_executor is None:
34
+ # Use a small thread pool to avoid resource overhead
35
+ _telemetry_executor = concurrent.futures.ThreadPoolExecutor(
36
+ max_workers=2,
37
+ thread_name_prefix="telemetry"
38
+ )
39
+ return _telemetry_executor
40
+
41
+ def _get_telemetry_queue():
42
+ """Get or create the shared telemetry event queue."""
43
+ global _telemetry_queue, _queue_processor_running
44
+ if _telemetry_queue is None:
45
+ _telemetry_queue = queue.Queue(maxsize=1000) # Limit queue size to prevent memory issues
46
+
47
+ # Start queue processor if not running (with proper thread safety)
48
+ with _queue_lock:
49
+ if not _queue_processor_running:
50
+ _queue_processor_running = True
51
+ executor = _get_telemetry_executor()
52
+ executor.submit(_process_telemetry_queue)
53
+
54
+ return _telemetry_queue
55
+
56
+ def _process_telemetry_queue():
57
+ """Background processor for telemetry events to batch operations."""
58
+ global _telemetry_queue, _queue_processor_running
59
+ batch_size = 10
60
+ batch_timeout = 1.0 # Process batch every second
61
+
62
+ try:
63
+ while _queue_processor_running:
64
+ events = []
65
+ deadline = time.time() + batch_timeout
66
+
67
+ # Collect events until batch size or timeout
68
+ while len(events) < batch_size and time.time() < deadline:
69
+ try:
70
+ event = _telemetry_queue.get(timeout=0.1)
71
+ events.append(event)
72
+ _telemetry_queue.task_done()
73
+ except queue.Empty:
74
+ continue
75
+ except:
76
+ break
77
+
78
+ # Process batch if we have events
79
+ if events:
80
+ _process_event_batch(events)
81
+
82
+ except Exception as e:
83
+ # Log error for debugging while maintaining non-disruptive behavior
84
+ import logging
85
+ logging.debug(f"Telemetry queue processing error: {e}")
86
+ pass
87
+ finally:
88
+ _queue_processor_running = False
89
+
90
+ def _process_event_batch(events):
91
+ """Process a batch of telemetry events efficiently."""
92
+ try:
93
+ from .telemetry import get_telemetry
94
+ telemetry = get_telemetry()
95
+
96
+ if not telemetry or not telemetry.enabled:
97
+ return
98
+
99
+ # Process events by type for efficiency
100
+ for event in events:
101
+ event_type = event.get('type')
102
+ if event_type == 'agent_execution':
103
+ telemetry.track_agent_execution(
104
+ event.get('agent_name'),
105
+ success=event.get('success', True),
106
+ async_mode=True # Use async mode to prevent blocking
107
+ )
108
+ elif event_type == 'task_completion':
109
+ telemetry.track_task_completion(
110
+ event.get('task_name'),
111
+ success=event.get('success', True)
112
+ )
113
+ elif event_type == 'tool_usage':
114
+ telemetry.track_tool_usage(
115
+ event.get('tool_name'),
116
+ success=event.get('success', True),
117
+ execution_time=event.get('execution_time')
118
+ )
119
+ elif event_type == 'error':
120
+ telemetry.track_error(event.get('error_type'))
121
+ elif event_type == 'feature_usage':
122
+ telemetry.track_feature_usage(event.get('feature_name'))
123
+ except Exception as e:
124
+ # Log error for debugging while maintaining non-disruptive behavior
125
+ import logging
126
+ logging.debug(f"Telemetry batch processing error: {e}")
127
+ pass
128
+
129
+ @contextmanager
130
+ def _performance_mode_context():
131
+ """Context manager for performance-critical operations that minimizes telemetry overhead."""
132
+ # Store current performance mode state
133
+ global _performance_mode_enabled
134
+ original_state = _performance_mode_enabled
135
+
136
+ try:
137
+ # Temporarily enable performance mode for minimal overhead
138
+ _performance_mode_enabled = True
139
+ yield
140
+ finally:
141
+ # Restore original state
142
+ _performance_mode_enabled = original_state
143
+
144
+
145
+ def _queue_telemetry_event(event_data):
146
+ """Queue a telemetry event for batch processing."""
147
+ try:
148
+ telemetry_queue = _get_telemetry_queue()
149
+ # Non-blocking put to avoid performance impact
150
+ telemetry_queue.put_nowait(event_data)
151
+ except queue.Full:
152
+ # Queue is full, drop the event to maintain performance
153
+ pass
154
+ except Exception:
155
+ # Silently handle any queue errors
156
+ pass
157
+
158
+ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = None, performance_mode: bool = False):
17
159
  """
18
- Instrument an Agent instance with minimal telemetry.
160
+ Instrument an Agent instance with performance-optimized telemetry.
19
161
 
20
162
  Args:
21
163
  agent: The Agent instance to instrument
22
164
  telemetry: Optional telemetry instance (uses global if not provided)
165
+ performance_mode: If True, uses minimal overhead tracking
23
166
  """
167
+ # Early exit if telemetry is disabled by environment variables
168
+ from .telemetry import _is_monitoring_disabled
169
+ telemetry_disabled = _is_monitoring_disabled()
170
+
171
+ if telemetry_disabled:
172
+ return agent
173
+
24
174
  if not telemetry:
25
175
  from .telemetry import get_telemetry
26
176
  telemetry = get_telemetry()
@@ -42,27 +192,28 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
42
192
  if original_chat:
43
193
  @wraps(original_chat)
44
194
  def instrumented_chat(*args, **kwargs):
45
- import threading
46
-
47
195
  try:
48
196
  result = original_chat(*args, **kwargs)
49
- # Track success asynchronously to prevent blocking
50
- def track_async():
51
- try:
52
- telemetry.track_agent_execution(agent.name, success=True)
53
- except:
54
- pass # Ignore telemetry errors
55
- threading.Thread(target=track_async, daemon=True).start()
197
+ # Queue telemetry event for batch processing instead of creating threads
198
+ if not performance_mode:
199
+ _queue_telemetry_event({
200
+ 'type': 'agent_execution',
201
+ 'agent_name': getattr(agent, 'name', 'unknown'),
202
+ 'success': True
203
+ })
56
204
  return result
57
205
  except Exception as e:
58
- # Track error asynchronously
59
- def track_error_async():
60
- try:
61
- telemetry.track_agent_execution(agent.name, success=False)
62
- telemetry.track_error(type(e).__name__)
63
- except:
64
- pass # Ignore telemetry errors
65
- threading.Thread(target=track_error_async, daemon=True).start()
206
+ # Queue error event
207
+ if not performance_mode:
208
+ _queue_telemetry_event({
209
+ 'type': 'agent_execution',
210
+ 'agent_name': getattr(agent, 'name', 'unknown'),
211
+ 'success': False
212
+ })
213
+ _queue_telemetry_event({
214
+ 'type': 'error',
215
+ 'error_type': type(e).__name__
216
+ })
66
217
  raise
67
218
 
68
219
  agent.chat = instrumented_chat
@@ -72,7 +223,6 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
72
223
  @wraps(original_start)
73
224
  def instrumented_start(*args, **kwargs):
74
225
  import types
75
- import threading
76
226
 
77
227
  try:
78
228
  result = original_start(*args, **kwargs)
@@ -85,39 +235,49 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
85
235
  for chunk in result:
86
236
  yield chunk
87
237
  # Track success only after streaming completes
88
- # Use a separate thread to make it truly non-blocking
89
- def track_async():
90
- try:
91
- telemetry.track_agent_execution(agent.name, success=True)
92
- except:
93
- pass # Ignore telemetry errors
94
- threading.Thread(target=track_async, daemon=True).start()
238
+ if not performance_mode:
239
+ _queue_telemetry_event({
240
+ 'type': 'agent_execution',
241
+ 'agent_name': getattr(agent, 'name', 'unknown'),
242
+ 'success': True
243
+ })
95
244
  except Exception as e:
96
245
  # Track error immediately
97
- threading.Thread(target=lambda: telemetry.track_agent_execution(agent.name, success=False), daemon=True).start()
98
- threading.Thread(target=lambda: telemetry.track_error(type(e).__name__), daemon=True).start()
246
+ if not performance_mode:
247
+ _queue_telemetry_event({
248
+ 'type': 'agent_execution',
249
+ 'agent_name': getattr(agent, 'name', 'unknown'),
250
+ 'success': False
251
+ })
252
+ _queue_telemetry_event({
253
+ 'type': 'error',
254
+ 'error_type': type(e).__name__
255
+ })
99
256
  raise
100
257
 
101
258
  return streaming_wrapper()
102
259
  else:
103
- # For non-streaming, track immediately but asynchronously
104
- def track_async():
105
- try:
106
- telemetry.track_agent_execution(agent.name, success=True)
107
- except:
108
- pass # Ignore telemetry errors
109
- threading.Thread(target=track_async, daemon=True).start()
260
+ # For non-streaming, track immediately via queue
261
+ if not performance_mode:
262
+ _queue_telemetry_event({
263
+ 'type': 'agent_execution',
264
+ 'agent_name': getattr(agent, 'name', 'unknown'),
265
+ 'success': True
266
+ })
110
267
  return result
111
268
 
112
269
  except Exception as e:
113
- # Track error immediately but asynchronously
114
- def track_error_async():
115
- try:
116
- telemetry.track_agent_execution(agent.name, success=False)
117
- telemetry.track_error(type(e).__name__)
118
- except:
119
- pass # Ignore telemetry errors
120
- threading.Thread(target=track_error_async, daemon=True).start()
270
+ # Track error via queue
271
+ if not performance_mode:
272
+ _queue_telemetry_event({
273
+ 'type': 'agent_execution',
274
+ 'agent_name': getattr(agent, 'name', 'unknown'),
275
+ 'success': False
276
+ })
277
+ _queue_telemetry_event({
278
+ 'type': 'error',
279
+ 'error_type': type(e).__name__
280
+ })
121
281
  raise
122
282
 
123
283
  agent.start = instrumented_start
@@ -126,27 +286,28 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
126
286
  if original_run:
127
287
  @wraps(original_run)
128
288
  def instrumented_run(*args, **kwargs):
129
- import threading
130
-
131
289
  try:
132
290
  result = original_run(*args, **kwargs)
133
- # Track success asynchronously to prevent blocking
134
- def track_async():
135
- try:
136
- telemetry.track_agent_execution(agent.name, success=True)
137
- except:
138
- pass # Ignore telemetry errors
139
- threading.Thread(target=track_async, daemon=True).start()
291
+ # Track success via queue
292
+ if not performance_mode:
293
+ _queue_telemetry_event({
294
+ 'type': 'agent_execution',
295
+ 'agent_name': getattr(agent, 'name', 'unknown'),
296
+ 'success': True
297
+ })
140
298
  return result
141
299
  except Exception as e:
142
- # Track error asynchronously
143
- def track_error_async():
144
- try:
145
- telemetry.track_agent_execution(agent.name, success=False)
146
- telemetry.track_error(type(e).__name__)
147
- except:
148
- pass # Ignore telemetry errors
149
- threading.Thread(target=track_error_async, daemon=True).start()
300
+ # Track error via queue
301
+ if not performance_mode:
302
+ _queue_telemetry_event({
303
+ 'type': 'agent_execution',
304
+ 'agent_name': getattr(agent, 'name', 'unknown'),
305
+ 'success': False
306
+ })
307
+ _queue_telemetry_event({
308
+ 'type': 'error',
309
+ 'error_type': type(e).__name__
310
+ })
150
311
  raise
151
312
 
152
313
  agent.run = instrumented_run
@@ -155,13 +316,31 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
155
316
  if original_execute_tool:
156
317
  @wraps(original_execute_tool)
157
318
  def instrumented_execute_tool(tool_name: str, *args, **kwargs):
319
+ start_time = time.time() if not performance_mode else None
158
320
  try:
159
321
  result = original_execute_tool(tool_name, *args, **kwargs)
160
- telemetry.track_tool_usage(tool_name, success=True)
322
+ if not performance_mode:
323
+ execution_time = time.time() - start_time if start_time else None
324
+ _queue_telemetry_event({
325
+ 'type': 'tool_usage',
326
+ 'tool_name': tool_name,
327
+ 'success': True,
328
+ 'execution_time': execution_time
329
+ })
161
330
  return result
162
331
  except Exception as e:
163
- telemetry.track_tool_usage(tool_name, success=False)
164
- telemetry.track_error(type(e).__name__)
332
+ if not performance_mode:
333
+ execution_time = time.time() - start_time if start_time else None
334
+ _queue_telemetry_event({
335
+ 'type': 'tool_usage',
336
+ 'tool_name': tool_name,
337
+ 'success': False,
338
+ 'execution_time': execution_time
339
+ })
340
+ _queue_telemetry_event({
341
+ 'type': 'error',
342
+ 'error_type': type(e).__name__
343
+ })
165
344
  raise
166
345
 
167
346
  agent.execute_tool = instrumented_execute_tool
@@ -172,14 +351,22 @@ def instrument_agent(agent: 'Agent', telemetry: Optional['MinimalTelemetry'] = N
172
351
  return agent
173
352
 
174
353
 
175
- def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['MinimalTelemetry'] = None):
354
+ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['MinimalTelemetry'] = None, performance_mode: bool = False):
176
355
  """
177
- Instrument a PraisonAIAgents workflow with minimal telemetry.
356
+ Instrument a PraisonAIAgents workflow with performance-optimized telemetry.
178
357
 
179
358
  Args:
180
359
  workflow: The PraisonAIAgents instance to instrument
181
360
  telemetry: Optional telemetry instance (uses global if not provided)
361
+ performance_mode: If True, uses minimal overhead tracking
182
362
  """
363
+ # Early exit if telemetry is disabled by environment variables
364
+ from .telemetry import _is_monitoring_disabled
365
+ telemetry_disabled = _is_monitoring_disabled()
366
+
367
+ if telemetry_disabled:
368
+ return workflow
369
+
183
370
  if not telemetry:
184
371
  from .telemetry import get_telemetry
185
372
  telemetry = get_telemetry()
@@ -191,13 +378,17 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
191
378
  if hasattr(workflow, '_telemetry_instrumented'):
192
379
  return workflow
193
380
 
194
- # Track feature usage
195
- telemetry.track_feature_usage(f"workflow_{workflow.process}" if hasattr(workflow, 'process') else "workflow")
381
+ # Track feature usage via queue
382
+ if not performance_mode:
383
+ _queue_telemetry_event({
384
+ 'type': 'feature_usage',
385
+ 'feature_name': f"workflow_{workflow.process}" if hasattr(workflow, 'process') else "workflow"
386
+ })
196
387
 
197
388
  # Instrument all agents in the workflow
198
389
  if hasattr(workflow, 'agents') and workflow.agents:
199
390
  for agent in workflow.agents:
200
- instrument_agent(agent, telemetry)
391
+ instrument_agent(agent, telemetry, performance_mode)
201
392
 
202
393
  # Wrap the execute_task method to track task completions
203
394
  if hasattr(workflow, 'execute_task'):
@@ -213,16 +404,29 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
213
404
 
214
405
  result = original_execute_task(task_id, *args, **kwargs)
215
406
 
216
- # Track task completion
217
- task_name = task.name if task and hasattr(task, 'name') else f"task_{task_id}"
218
- telemetry.track_task_completion(task_name, success=True)
407
+ # Track task completion via queue
408
+ if not performance_mode:
409
+ task_name = task.name if task and hasattr(task, 'name') else f"task_{task_id}"
410
+ _queue_telemetry_event({
411
+ 'type': 'task_completion',
412
+ 'task_name': task_name,
413
+ 'success': True
414
+ })
219
415
 
220
416
  return result
221
417
  except Exception as e:
222
- telemetry.track_error(type(e).__name__)
223
- if task:
224
- task_name = task.name if hasattr(task, 'name') else f"task_{task_id}"
225
- telemetry.track_task_completion(task_name, success=False)
418
+ if not performance_mode:
419
+ _queue_telemetry_event({
420
+ 'type': 'error',
421
+ 'error_type': type(e).__name__
422
+ })
423
+ if task:
424
+ task_name = task.name if hasattr(task, 'name') else f"task_{task_id}"
425
+ _queue_telemetry_event({
426
+ 'type': 'task_completion',
427
+ 'task_name': task_name,
428
+ 'success': False
429
+ })
226
430
  raise
227
431
 
228
432
  workflow.execute_task = instrumented_execute_task
@@ -237,7 +441,11 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
237
441
  # Don't double-track here since agent.chat already tracks execution
238
442
  return result
239
443
  except Exception as e:
240
- telemetry.track_error(type(e).__name__)
444
+ if not performance_mode:
445
+ _queue_telemetry_event({
446
+ 'type': 'error',
447
+ 'error_type': type(e).__name__
448
+ })
241
449
  raise
242
450
 
243
451
  workflow.start = instrumented_start
@@ -253,7 +461,11 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
253
461
  # Don't double-track here since agent.chat already tracks execution
254
462
  return result
255
463
  except Exception as e:
256
- telemetry.track_error(type(e).__name__)
464
+ if not performance_mode:
465
+ _queue_telemetry_event({
466
+ 'type': 'error',
467
+ 'error_type': type(e).__name__
468
+ })
257
469
  raise
258
470
 
259
471
  workflow.astart = instrumented_astart
@@ -265,14 +477,46 @@ def instrument_workflow(workflow: 'PraisonAIAgents', telemetry: Optional['Minima
265
477
 
266
478
 
267
479
  # Auto-instrumentation helper
268
- def auto_instrument_all(telemetry: Optional['MinimalTelemetry'] = None):
480
+ def auto_instrument_all(telemetry: Optional['MinimalTelemetry'] = None, performance_mode: bool = False):
269
481
  """
270
- Automatically instrument all new instances of Agent and PraisonAIAgents.
482
+ Automatically instrument all new instances of Agent and PraisonAIAgents with optimized telemetry.
271
483
  This should be called after enabling telemetry.
272
484
 
273
485
  Args:
274
486
  telemetry: Optional telemetry instance (uses global if not provided)
487
+ performance_mode: If True, uses minimal overhead tracking
275
488
  """
489
+ # Early exit if telemetry is disabled by environment variables to avoid
490
+ # expensive class wrapping overhead
491
+ try:
492
+ from .telemetry import _is_monitoring_disabled
493
+ telemetry_disabled = _is_monitoring_disabled()
494
+ except ImportError:
495
+ # Fallback if import fails - use same logic as _is_monitoring_disabled
496
+ import os
497
+
498
+ # Check if explicitly disabled via legacy flags
499
+ explicitly_disabled = any([
500
+ os.environ.get('PRAISONAI_TELEMETRY_DISABLED', '').lower() in ('true', '1', 'yes'),
501
+ os.environ.get('PRAISONAI_DISABLE_TELEMETRY', '').lower() in ('true', '1', 'yes'),
502
+ os.environ.get('DO_NOT_TRACK', '').lower() in ('true', '1', 'yes'),
503
+ ])
504
+
505
+ if explicitly_disabled:
506
+ telemetry_disabled = True
507
+ else:
508
+ # NEW: Check if explicitly enabled (required for monitoring to be active)
509
+ explicitly_enabled = any([
510
+ os.environ.get('PRAISONAI_PERFORMANCE_ENABLED', '').lower() in ('true', '1', 'yes'),
511
+ os.environ.get('PRAISONAI_TELEMETRY_ENABLED', '').lower() in ('true', '1', 'yes'),
512
+ ])
513
+
514
+ # Disabled by default unless explicitly enabled
515
+ telemetry_disabled = not explicitly_enabled
516
+
517
+ if telemetry_disabled:
518
+ return
519
+
276
520
  if not telemetry:
277
521
  from .telemetry import get_telemetry
278
522
  telemetry = get_telemetry()
@@ -293,13 +537,13 @@ def auto_instrument_all(telemetry: Optional['MinimalTelemetry'] = None):
293
537
  @wraps(original_agent_init)
294
538
  def agent_init_wrapper(self, *args, **kwargs):
295
539
  original_agent_init(self, *args, **kwargs)
296
- instrument_agent(self, telemetry)
540
+ instrument_agent(self, telemetry, performance_mode)
297
541
 
298
542
  # Wrap PraisonAIAgents.__init__
299
543
  @wraps(original_workflow_init)
300
544
  def workflow_init_wrapper(self, *args, **kwargs):
301
545
  original_workflow_init(self, *args, **kwargs)
302
- instrument_workflow(self, telemetry)
546
+ instrument_workflow(self, telemetry, performance_mode)
303
547
 
304
548
  # Apply wrapped constructors
305
549
  Agent.__init__ = agent_init_wrapper
@@ -307,4 +551,61 @@ def auto_instrument_all(telemetry: Optional['MinimalTelemetry'] = None):
307
551
 
308
552
  except ImportError:
309
553
  # Classes not available, skip auto-instrumentation
310
- pass
554
+ pass
555
+
556
+
557
+ def enable_performance_mode():
558
+ """Enable performance mode for all new telemetry instrumentation."""
559
+ global _performance_mode_enabled
560
+ _performance_mode_enabled = True
561
+
562
+
563
+ def disable_performance_mode():
564
+ """Disable performance mode for all new telemetry instrumentation."""
565
+ global _performance_mode_enabled
566
+ _performance_mode_enabled = False
567
+
568
+
569
+ def cleanup_telemetry_resources():
570
+ """
571
+ Clean up telemetry resources including thread pools and queues.
572
+ Should be called during application shutdown.
573
+ """
574
+ global _telemetry_executor, _telemetry_queue, _queue_processor_running
575
+
576
+ # Stop queue processing
577
+ _queue_processor_running = False
578
+
579
+ # Wait for any remaining events to be processed
580
+ if _telemetry_queue:
581
+ try:
582
+ # Give queue processor time to finish current batch
583
+ import time
584
+ time.sleep(1.1) # Slightly longer than batch timeout
585
+
586
+ # Clear any remaining events
587
+ while not _telemetry_queue.empty():
588
+ try:
589
+ _telemetry_queue.get_nowait()
590
+ _telemetry_queue.task_done()
591
+ except queue.Empty:
592
+ break
593
+ except Exception:
594
+ pass
595
+
596
+ # Shutdown thread pool with configurable timeout
597
+ if _telemetry_executor:
598
+ try:
599
+ import os
600
+ shutdown_timeout = float(os.environ.get('PRAISONAI_TELEMETRY_SHUTDOWN_TIMEOUT', '5.0'))
601
+ _telemetry_executor.shutdown(wait=True, timeout=shutdown_timeout)
602
+ except Exception as e:
603
+ import logging
604
+ logging.debug(f"Telemetry executor shutdown error: {e}")
605
+ pass
606
+ _telemetry_executor = None
607
+
608
+ _telemetry_queue = None
609
+
610
+
611
+ # Performance mode flag moved to top of file to avoid NameError