praisonaiagents 0.0.142__py3-none-any.whl → 0.0.143__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ import os
9
9
  import time
10
10
  import platform
11
11
  import hashlib
12
+ import threading
12
13
  from typing import Dict, Any, Optional
13
14
  from datetime import datetime
14
15
  import logging
@@ -40,6 +41,15 @@ class MinimalTelemetry:
40
41
  - Can be disabled via environment variables
41
42
  """
42
43
 
44
+ # Common error phrases that indicate interpreter shutdown
45
+ _SHUTDOWN_ERROR_PHRASES = [
46
+ 'cannot schedule new futures',
47
+ 'interpreter shutdown',
48
+ 'atexit after shutdown',
49
+ 'event loop closed',
50
+ 'runtime is shutting down'
51
+ ]
52
+
43
53
  def __init__(self, enabled: bool = None):
44
54
  """
45
55
  Initialize the minimal telemetry collector.
@@ -55,6 +65,10 @@ class MinimalTelemetry:
55
65
 
56
66
  self.logger = logging.getLogger(__name__)
57
67
 
68
+ # Add shutdown tracking to prevent double shutdown
69
+ self._shutdown_complete = False
70
+ self._shutdown_lock = threading.Lock()
71
+
58
72
  if not self.enabled:
59
73
  self.logger.debug("Telemetry is disabled")
60
74
  return
@@ -70,6 +84,8 @@ class MinimalTelemetry:
70
84
  "tool_calls": 0,
71
85
  "errors": 0,
72
86
  }
87
+ self._metrics_lock = threading.Lock()
88
+ self._max_timing_entries = 1000 # Limit to prevent memory leaks
73
89
 
74
90
  # Collect basic environment info (anonymous)
75
91
  self._environment = {
@@ -100,7 +116,7 @@ class MinimalTelemetry:
100
116
  try:
101
117
  from .. import __version__
102
118
  return __version__
103
- except ImportError:
119
+ except (ImportError, KeyError, AttributeError):
104
120
  return "unknown"
105
121
 
106
122
  def track_agent_execution(self, agent_name: str = None, success: bool = True):
@@ -114,7 +130,8 @@ class MinimalTelemetry:
114
130
  if not self.enabled:
115
131
  return
116
132
 
117
- self._metrics["agent_executions"] += 1
133
+ with self._metrics_lock:
134
+ self._metrics["agent_executions"] += 1
118
135
 
119
136
  # Send event to PostHog
120
137
  if self._posthog:
@@ -140,7 +157,8 @@ class MinimalTelemetry:
140
157
  if not self.enabled:
141
158
  return
142
159
 
143
- self._metrics["task_completions"] += 1
160
+ with self._metrics_lock:
161
+ self._metrics["task_completions"] += 1
144
162
 
145
163
  # Send event to PostHog
146
164
  if self._posthog:
@@ -155,33 +173,60 @@ class MinimalTelemetry:
155
173
 
156
174
  self.logger.debug(f"Task completion tracked: success={success}")
157
175
 
158
- def track_tool_usage(self, tool_name: str, success: bool = True):
176
+ def track_tool_usage(self, tool_name: str, success: bool = True, execution_time: float = None):
159
177
  """
160
- Track tool usage event.
178
+ Track tool usage event with optional timing.
161
179
 
162
180
  Args:
163
181
  tool_name: Name of the tool being used
164
182
  success: Whether the tool call was successful
183
+ execution_time: Time in seconds the tool took to execute (optional)
165
184
  """
166
185
  if not self.enabled:
167
186
  return
168
187
 
169
- self._metrics["tool_calls"] += 1
188
+ with self._metrics_lock:
189
+ self._metrics["tool_calls"] += 1
190
+
191
+ # Add timing metrics if provided (with memory management)
192
+ if execution_time is not None:
193
+ if "tool_execution_times" not in self._metrics:
194
+ self._metrics["tool_execution_times"] = []
195
+
196
+ timing_list = self._metrics["tool_execution_times"]
197
+ timing_list.append({
198
+ "tool_name": tool_name,
199
+ "execution_time": execution_time,
200
+ "success": success
201
+ })
202
+
203
+ # Prevent memory leaks by limiting stored entries
204
+ if len(timing_list) > self._max_timing_entries:
205
+ timing_list[:] = timing_list[-self._max_timing_entries:]
170
206
 
171
207
  # Send event to PostHog
172
208
  if self._posthog:
209
+ properties = {
210
+ 'tool_name': tool_name,
211
+ 'success': success,
212
+ 'session_id': self.session_id
213
+ }
214
+
215
+ # Include execution time if available
216
+ if execution_time is not None:
217
+ properties['execution_time'] = execution_time
218
+
173
219
  self._posthog.capture(
174
220
  distinct_id=self.session_id,
175
221
  event='tool_usage',
176
- properties={
177
- 'tool_name': tool_name,
178
- 'success': success,
179
- 'session_id': self.session_id
180
- }
222
+ properties=properties
181
223
  )
182
224
 
183
225
  # Only track tool name, not arguments or results
184
- self.logger.debug(f"Tool usage tracked: {tool_name}, success={success}")
226
+ debug_msg = f"Tool usage tracked: {tool_name}, success={success}"
227
+ if execution_time is not None:
228
+ debug_msg += f", execution_time={execution_time:.3f}s"
229
+ self.logger.debug(debug_msg)
185
230
 
186
231
  def track_error(self, error_type: str = None):
187
232
  """
@@ -193,7 +238,8 @@ class MinimalTelemetry:
193
238
  if not self.enabled:
194
239
  return
195
240
 
196
- self._metrics["errors"] += 1
241
+ with self._metrics_lock:
242
+ self._metrics["errors"] += 1
197
243
 
198
244
  # Send event to PostHog
199
245
  if self._posthog:
@@ -243,10 +289,13 @@ class MinimalTelemetry:
243
289
  if not self.enabled:
244
290
  return {"enabled": False}
245
291
 
292
+ with self._metrics_lock:
293
+ metrics_copy = self._metrics.copy()
294
+
246
295
  return {
247
296
  "enabled": True,
248
297
  "session_id": self.session_id,
249
- "metrics": self._metrics.copy(),
298
+ "metrics": metrics_copy,
250
299
  "environment": self._environment.copy(),
251
300
  }
252
301
 
@@ -281,28 +330,174 @@ class MinimalTelemetry:
281
330
  pass
282
331
 
283
332
  # Reset counters
284
- for key in self._metrics:
285
- if isinstance(self._metrics[key], int):
286
- self._metrics[key] = 0
333
+ with self._metrics_lock:
334
+ for key in self._metrics:
335
+ if isinstance(self._metrics[key], int):
336
+ self._metrics[key] = 0
287
337
 
288
338
  def shutdown(self):
289
339
  """
290
340
  Shutdown telemetry and ensure all events are sent.
341
+ Forces proper cleanup of background threads to prevent hanging.
291
342
  """
292
343
  if not self.enabled:
293
344
  return
345
+
346
+ # Use lock to prevent concurrent shutdown calls
347
+ with self._shutdown_lock:
348
+ if self._shutdown_complete:
349
+ return
350
+ self._shutdown_complete = True
294
351
 
295
352
  # Final flush
296
353
  self.flush()
297
354
 
298
355
  # Shutdown PostHog if available
299
- if hasattr(self, '_posthog') and self._posthog:
356
+ posthog_client = getattr(self, '_posthog', None)
357
+ if posthog_client:
300
358
  try:
301
- # Force a synchronous flush before shutdown
302
- self._posthog.flush()
303
- self._posthog.shutdown()
304
- except:
305
- pass
359
+ # Check if Python interpreter is shutting down
360
+ if self._is_interpreter_shutting_down():
361
+ self.logger.debug("Interpreter shutting down, skipping PostHog operations")
362
+ return
363
+
364
+ # Use a timeout-based flush to prevent hanging
365
+ import threading
366
+ import time
367
+ import concurrent.futures
368
+
369
+ # Use ThreadPoolExecutor for better control
370
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
371
+ flush_future = executor.submit(self._safe_flush_posthog, posthog_client)
372
+
373
+ try:
374
+ flush_future.result(timeout=5.0) # 5 second timeout
375
+ self.logger.debug("PostHog flush completed successfully")
376
+ except concurrent.futures.TimeoutError:
377
+ self.logger.warning("PostHog flush timed out")
378
+ flush_future.cancel()
379
+ except Exception as e:
380
+ self.logger.error(f"PostHog flush failed: {e}")
381
+
382
+ # Cleanup PostHog threads safely
383
+ self._shutdown_posthog_threads(posthog_client)
384
+
385
+ # Standard shutdown - with interpreter shutdown check
386
+ if not self._is_interpreter_shutting_down():
387
+ posthog_client.shutdown()
388
+ else:
389
+ self.logger.debug("Skipping PostHog shutdown call due to interpreter shutdown")
390
+
391
+ except Exception as e:
392
+ # Handle specific shutdown-related errors gracefully
393
+ if self._is_shutdown_related_error(e):
394
+ self.logger.debug(f"PostHog shutdown prevented due to interpreter shutdown: {e}")
395
+ else:
396
+ self.logger.error(f"Error during PostHog shutdown: {e}")
397
+ finally:
398
+ self._posthog = None
399
+
400
+ def _is_shutdown_related_error(self, error: Exception) -> bool:
401
+ """
402
+ Check if an error is related to interpreter shutdown.
403
+
404
+ Args:
405
+ error: The exception to check
406
+
407
+ Returns:
408
+ True if the error is shutdown-related, False otherwise
409
+ """
410
+ error_msg = str(error).lower()
411
+ return any(phrase in error_msg for phrase in self._SHUTDOWN_ERROR_PHRASES)
412
+
413
+ def _is_interpreter_shutting_down(self) -> bool:
414
+ """
415
+ Check if the Python interpreter is shutting down.
416
+
417
+ Returns:
418
+ True if interpreter is shutting down, False otherwise
419
+ """
420
+ try:
421
+ import sys
422
+
423
+ # Check if the interpreter is in shutdown mode
424
+ if hasattr(sys, 'is_finalizing') and sys.is_finalizing():
425
+ return True
426
+
427
+ # Check if we can create new threads (fails during shutdown)
428
+ try:
429
+ test_thread = threading.Thread(target=lambda: None)
430
+ test_thread.daemon = True
431
+ test_thread.start()
432
+ test_thread.join(timeout=0.001)
433
+ return False
434
+ except (RuntimeError, threading.ThreadError):
435
+ return True
436
+
437
+ except Exception:
438
+ # If we can't determine state, assume we're shutting down to be safe
439
+ return True
440
+
441
+ def _safe_flush_posthog(self, posthog_client):
442
+ """Safely flush PostHog data with error handling."""
443
+ try:
444
+ # Skip flush if interpreter is shutting down
445
+ if self._is_interpreter_shutting_down():
446
+ self.logger.debug("Skipping PostHog flush due to interpreter shutdown")
447
+ return False
448
+
449
+ posthog_client.flush()
450
+ return True
451
+ except Exception as e:
452
+ if self._is_shutdown_related_error(e):
453
+ self.logger.debug(f"PostHog flush prevented due to interpreter shutdown: {e}")
454
+ else:
455
+ self.logger.debug(f"PostHog flush error: {e}")
456
+ return False
457
+
458
+ def _shutdown_posthog_threads(self, posthog_client):
459
+ """Safely shutdown PostHog background threads."""
460
+ try:
461
+ # Skip thread cleanup if interpreter is shutting down
462
+ if self._is_interpreter_shutting_down():
463
+ self.logger.debug("Skipping PostHog thread cleanup due to interpreter shutdown")
464
+ return
465
+
466
+ # Access thread pool safely (fix double shutdown issue)
467
+ thread_pool = getattr(posthog_client, '_thread_pool', None)
468
+ if thread_pool:
469
+ try:
470
+ # Single shutdown call with timeout
471
+ if hasattr(thread_pool, 'shutdown'):
472
+ thread_pool.shutdown(wait=False)
473
+ # Wait briefly for graceful shutdown
474
+ import time
475
+ time.sleep(0.5)
476
+ except Exception as e:
477
+ if self._is_shutdown_related_error(e):
478
+ self.logger.debug(f"Thread pool shutdown prevented due to interpreter shutdown: {e}")
479
+ else:
480
+ self.logger.debug(f"Thread pool shutdown error: {e}")
481
+
482
+ # Clean up consumer
483
+ consumer = getattr(posthog_client, '_consumer', None)
484
+ if consumer:
485
+ try:
486
+ if hasattr(consumer, 'flush'):
487
+ consumer.flush()
488
+ if hasattr(consumer, 'shutdown'):
489
+ consumer.shutdown()
490
+ except Exception as e:
491
+ if self._is_shutdown_related_error(e):
492
+ self.logger.debug(f"Consumer shutdown prevented due to interpreter shutdown: {e}")
493
+ else:
494
+ self.logger.debug(f"Consumer shutdown error: {e}")
495
+
496
+ except Exception as e:
497
+ if self._is_shutdown_related_error(e):
498
+ self.logger.debug(f"PostHog thread cleanup prevented due to interpreter shutdown: {e}")
499
+ else:
500
+ self.logger.debug(f"Error during PostHog thread cleanup: {e}")
306
501
 
307
502
 
308
503
  # Global telemetry instance
@@ -331,6 +526,41 @@ def disable_telemetry():
331
526
  _telemetry_instance = MinimalTelemetry(enabled=False)
332
527
 
333
528
 
529
+ def force_shutdown_telemetry():
530
+ """
531
+ Force shutdown of telemetry system with comprehensive cleanup.
532
+ This function ensures proper termination of all background threads.
533
+ """
534
+ global _telemetry_instance
535
+ if _telemetry_instance:
536
+ _telemetry_instance.shutdown()
537
+
538
+ # Additional cleanup - wait for all threads to finish
539
+ import threading
540
+ import time
541
+
542
+ # Wait up to 3 seconds for any remaining threads to finish
543
+ max_wait = 3.0
544
+ start_time = time.time()
545
+
546
+ while time.time() - start_time < max_wait:
547
+ # Check for any analytics/telemetry related threads
548
+ analytics_threads = [
549
+ t for t in threading.enumerate()
550
+ if t != threading.current_thread()
551
+ and not t.daemon
552
+ and any(keyword in t.name.lower() for keyword in ['posthog', 'analytics', 'telemetry', 'consumer'])
553
+ ]
554
+
555
+ if not analytics_threads:
556
+ break
557
+
558
+ time.sleep(0.1)
559
+
560
+ # Reset the global instance
561
+ _telemetry_instance = None
562
+
563
+
334
564
  def enable_telemetry():
335
565
  """Programmatically enable telemetry (if not disabled by environment)."""
336
566
  global _telemetry_instance
@@ -49,6 +49,20 @@ TOOL_MAPPINGS = {
49
49
  'analyze_data': ('.duckdb_tools', None),
50
50
  'duckdb_tools': ('.duckdb_tools', None),
51
51
 
52
+ # MongoDB Tools
53
+ 'insert_document': ('.mongodb_tools', None),
54
+ 'insert_documents': ('.mongodb_tools', None),
55
+ 'find_documents': ('.mongodb_tools', None),
56
+ 'update_document': ('.mongodb_tools', None),
57
+ 'delete_document': ('.mongodb_tools', None),
58
+ 'create_vector_index': ('.mongodb_tools', None),
59
+ 'vector_search': ('.mongodb_tools', None),
60
+ 'store_with_embedding': ('.mongodb_tools', None),
61
+ 'text_search': ('.mongodb_tools', None),
62
+ 'get_stats': ('.mongodb_tools', None),
63
+ 'connect_mongodb': ('.mongodb_tools', None),
64
+ 'mongodb_tools': ('.mongodb_tools', None),
65
+
52
66
  # Shell Tools
53
67
  'execute_command': ('.shell_tools', None),
54
68
  'list_processes': ('.shell_tools', None),
@@ -180,12 +194,14 @@ def __getattr__(name: str) -> Any:
180
194
  'get_article', 'get_news_sources', 'get_articles_from_source', 'get_trending_topics',
181
195
  'scrape_page', 'extract_links', 'crawl', 'extract_text',
182
196
  'query', 'create_table', 'load_data', 'export_data', 'get_table_info', 'analyze_data',
197
+ 'insert_document', 'insert_documents', 'find_documents', 'update_document', 'delete_document',
198
+ 'create_vector_index', 'vector_search', 'store_with_embedding', 'text_search', 'get_stats', 'connect_mongodb',
183
199
  'execute_command', 'list_processes', 'kill_process', 'get_system_info',
184
200
  'evaluate', 'solve_equation', 'convert_units', 'calculate_statistics', 'calculate_financial'
185
201
  ]:
186
202
  return getattr(module, name)
187
203
  if name in ['file_tools', 'pandas_tools', 'wikipedia_tools',
188
- 'newspaper_tools', 'arxiv_tools', 'spider_tools', 'duckdb_tools', 'csv_tools', 'json_tools', 'excel_tools', 'xml_tools', 'yaml_tools', 'calculator_tools', 'python_tools', 'shell_tools', 'cot_tools']:
204
+ 'newspaper_tools', 'arxiv_tools', 'spider_tools', 'duckdb_tools', 'mongodb_tools', 'csv_tools', 'json_tools', 'excel_tools', 'xml_tools', 'yaml_tools', 'calculator_tools', 'python_tools', 'shell_tools', 'cot_tools']:
189
205
  return module # Returns the callable module
190
206
  return getattr(module, name)
191
207
  else: