praisonaiagents 0.0.145__py3-none-any.whl → 0.0.146__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,571 @@
1
+ """
2
+ Performance Analysis Utilities for PraisonAI
3
+
4
+ This module provides advanced analysis tools for function flow visualization,
5
+ performance bottleneck identification, and comprehensive reporting.
6
+
7
+ Features:
8
+ - Function flow analysis and visualization
9
+ - Performance bottleneck detection
10
+ - Execution path mapping
11
+ - Performance trend analysis
12
+ - Advanced reporting utilities
13
+ """
14
+
15
+ import json
16
+ from collections import defaultdict
17
+ from typing import Dict, Any, List, Optional
18
+ from datetime import datetime
19
+ import logging
20
+ from dataclasses import dataclass
21
+
22
+ try:
23
+ from .performance_monitor import performance_monitor
24
+ PERFORMANCE_MONITOR_AVAILABLE = True
25
+ except ImportError:
26
+ PERFORMANCE_MONITOR_AVAILABLE = False
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ # Performance analysis thresholds
31
+ BOTTLENECK_THRESHOLD_AVERAGE = 1.0 # seconds - average duration to consider bottleneck
32
+ BOTTLENECK_THRESHOLD_MAX = 5.0 # seconds - max duration to consider bottleneck
33
+ HIGH_SEVERITY_THRESHOLD = 2.0 # seconds - average duration for high severity bottleneck
34
+
35
+
36
+
37
+
38
+ class FunctionFlowAnalyzer:
39
+ """
40
+ Advanced function flow analysis and visualization.
41
+
42
+ Provides tools for analyzing function execution patterns,
43
+ identifying bottlenecks, and visualizing execution flow.
44
+ """
45
+
46
+ def __init__(self):
47
+ self.logger = logging.getLogger(__name__)
48
+
49
+ def analyze_execution_flow(self, flow_data: Optional[List[Dict]] = None) -> Dict[str, Any]:
50
+ """
51
+ Analyze function execution flow to identify patterns and bottlenecks.
52
+
53
+ Args:
54
+ flow_data: Optional flow data, or None to use current monitor data
55
+
56
+ Returns:
57
+ Analysis results with flow patterns, bottlenecks, and statistics
58
+ """
59
+ if flow_data is None:
60
+ if not PERFORMANCE_MONITOR_AVAILABLE:
61
+ return {"error": "Performance monitor not available and no flow data provided"}
62
+ flow_data = performance_monitor.get_function_flow()
63
+
64
+ if not flow_data:
65
+ return {"message": "No flow data available"}
66
+
67
+ analysis = {
68
+ "total_events": len(flow_data),
69
+ "execution_patterns": self._analyze_patterns(flow_data),
70
+ "bottlenecks": self._identify_bottlenecks(flow_data),
71
+ "parallelism": self._analyze_parallelism(flow_data),
72
+ "call_chains": self._build_call_chains(flow_data),
73
+ "statistics": self._calculate_flow_statistics(flow_data)
74
+ }
75
+
76
+ return analysis
77
+
78
+ def _analyze_patterns(self, flow_data: List[Dict]) -> Dict[str, Any]:
79
+ """Analyze execution patterns in the flow data."""
80
+ patterns = {
81
+ "most_frequent_sequences": [],
82
+ "recursive_calls": [],
83
+ "long_running_chains": [],
84
+ "error_patterns": []
85
+ }
86
+
87
+ # Group events by function to find sequences
88
+ function_sequences = defaultdict(list)
89
+
90
+ for event in flow_data:
91
+ func_name = event.get('function', 'unknown')
92
+ function_sequences[func_name].append(event)
93
+
94
+ # Find most frequent function sequences
95
+ sequence_counts = defaultdict(int)
96
+ for i in range(len(flow_data) - 1):
97
+ current_func = flow_data[i].get('function')
98
+ next_func = flow_data[i + 1].get('function')
99
+ if current_func and next_func:
100
+ sequence_counts[(current_func, next_func)] += 1
101
+
102
+ # Sort by frequency
103
+ frequent_sequences = sorted(sequence_counts.items(), key=lambda x: x[1], reverse=True)[:5]
104
+ patterns["most_frequent_sequences"] = [
105
+ {"sequence": f"{seq[0]} -> {seq[1]}", "count": count}
106
+ for seq, count in frequent_sequences
107
+ ]
108
+
109
+ # Find recursive calls
110
+ for func_name, events in function_sequences.items():
111
+ nested_level = 0
112
+ for event in events:
113
+ if event.get('event') == 'start':
114
+ nested_level += 1
115
+ if nested_level > 1:
116
+ patterns["recursive_calls"].append({
117
+ "function": func_name,
118
+ "max_depth": nested_level,
119
+ "timestamp": event.get('timestamp')
120
+ })
121
+ elif event.get('event') == 'end':
122
+ nested_level = max(0, nested_level - 1)
123
+
124
+ return patterns
125
+
126
+ def _identify_bottlenecks(self, flow_data: List[Dict]) -> List[Dict[str, Any]]:
127
+ """Identify performance bottlenecks in the execution flow."""
128
+ bottlenecks = []
129
+
130
+ # Group start/end events
131
+ function_durations = defaultdict(list)
132
+ active_calls = {}
133
+
134
+ for event in flow_data:
135
+ func_name = event.get('function', 'unknown')
136
+ event_type = event.get('event')
137
+ timestamp = event.get('timestamp')
138
+ thread_id = event.get('thread_id', 0)
139
+
140
+ if event_type == 'start':
141
+ call_key = f"{func_name}_{thread_id}_{timestamp}"
142
+ active_calls[call_key] = event
143
+ elif event_type == 'end':
144
+ duration = event.get('duration', 0)
145
+ if duration > 0:
146
+ function_durations[func_name].append(duration)
147
+
148
+ # Find functions with consistently slow performance
149
+ for func_name, durations in function_durations.items():
150
+ if durations:
151
+ avg_duration = sum(durations) / len(durations)
152
+ max_duration = max(durations)
153
+
154
+ # Consider it a bottleneck based on defined thresholds
155
+ if avg_duration > BOTTLENECK_THRESHOLD_AVERAGE or max_duration > BOTTLENECK_THRESHOLD_MAX:
156
+ bottlenecks.append({
157
+ "function": func_name,
158
+ "average_duration": avg_duration,
159
+ "max_duration": max_duration,
160
+ "call_count": len(durations),
161
+ "severity": "high" if avg_duration > HIGH_SEVERITY_THRESHOLD else "medium"
162
+ })
163
+
164
+ # Sort by severity and duration
165
+ bottlenecks.sort(key=lambda x: x["average_duration"], reverse=True)
166
+ return bottlenecks
167
+
168
+ def _analyze_parallelism(self, flow_data: List[Dict]) -> Dict[str, Any]:
169
+ """Analyze parallelism and concurrent execution patterns."""
170
+ thread_activities = defaultdict(list)
171
+
172
+ for event in flow_data:
173
+ thread_id = event.get('thread_id', 0)
174
+ thread_activities[thread_id].append(event)
175
+
176
+ # Find peak concurrency
177
+ timestamp_activities = defaultdict(int)
178
+ for event in flow_data:
179
+ if event.get('event') == 'start':
180
+ timestamp = event.get('timestamp')
181
+ timestamp_activities[timestamp] += 1
182
+
183
+ peak_concurrency = max(timestamp_activities.values()) if timestamp_activities else 0
184
+
185
+ return {
186
+ "total_threads": len(thread_activities),
187
+ "peak_concurrency": peak_concurrency,
188
+ "thread_utilization": {
189
+ str(thread_id): len(events)
190
+ for thread_id, events in thread_activities.items()
191
+ }
192
+ }
193
+
194
+ def _build_call_chains(self, flow_data: List[Dict]) -> List[Dict[str, Any]]:
195
+ """Build call chains from flow data."""
196
+ # Track call chains per thread more efficiently
197
+ thread_chains = defaultdict(list)
198
+ call_stacks = defaultdict(list)
199
+ current_chains = defaultdict(list)
200
+
201
+ for event in flow_data:
202
+ thread_id = event.get('thread_id', 0)
203
+ func_name = event.get('function', 'unknown')
204
+ event_type = event.get('event')
205
+
206
+ if event_type == 'start':
207
+ call_stacks[thread_id].append(func_name)
208
+ current_chains[thread_id].append(func_name)
209
+ elif event_type == 'end' and call_stacks[thread_id] and call_stacks[thread_id][-1] == func_name:
210
+ call_stacks[thread_id].pop()
211
+
212
+ # If this completes a top-level call (stack becomes empty), record the chain
213
+ if not call_stacks[thread_id] and current_chains[thread_id]:
214
+ chain = current_chains[thread_id].copy()
215
+ thread_chains[thread_id].append({
216
+ "thread_id": thread_id,
217
+ "chain_length": len(chain),
218
+ "functions": chain
219
+ })
220
+ current_chains[thread_id].clear()
221
+
222
+ # Flatten all chains and return top 10
223
+ all_chains = []
224
+ for chains in thread_chains.values():
225
+ all_chains.extend(chains)
226
+
227
+ return all_chains[:10]
228
+
229
+ def _calculate_flow_statistics(self, flow_data: List[Dict]) -> Dict[str, Any]:
230
+ """Calculate comprehensive flow statistics."""
231
+ total_events = len(flow_data)
232
+ start_events = [e for e in flow_data if e.get('event') == 'start']
233
+ end_events = [e for e in flow_data if e.get('event') == 'end']
234
+ successful_events = [e for e in end_events if e.get('success', True)]
235
+
236
+ total_duration = sum(e.get('duration', 0) for e in end_events)
237
+
238
+ return {
239
+ "total_events": total_events,
240
+ "function_calls": len(start_events),
241
+ "completed_calls": len(end_events),
242
+ "successful_calls": len(successful_events),
243
+ "success_rate": len(successful_events) / len(end_events) if end_events else 0,
244
+ "total_execution_time": total_duration,
245
+ "average_execution_time": total_duration / len(end_events) if end_events else 0
246
+ }
247
+
248
+ def visualize_flow(self, flow_data: Optional[List[Dict]] = None,
249
+ format: str = "text") -> str:
250
+ """
251
+ Create a visual representation of the function execution flow.
252
+
253
+ Args:
254
+ flow_data: Optional flow data, or None to use current monitor data
255
+ format: Output format ("text", "mermaid", "json")
256
+
257
+ Returns:
258
+ Formatted visualization string
259
+ """
260
+ if flow_data is None:
261
+ if not PERFORMANCE_MONITOR_AVAILABLE:
262
+ return "Performance monitor not available and no flow data provided"
263
+ flow_data = performance_monitor.get_function_flow(50) # Last 50 events
264
+
265
+ if not flow_data:
266
+ return "No flow data available"
267
+
268
+ if format == "text":
269
+ return self._create_text_visualization(flow_data)
270
+ elif format == "mermaid":
271
+ return self._create_mermaid_diagram(flow_data)
272
+ elif format == "json":
273
+ return json.dumps(flow_data, indent=2)
274
+ else:
275
+ return "Unknown format"
276
+
277
+ def _create_text_visualization(self, flow_data: List[Dict]) -> str:
278
+ """Create a text-based visualization of the execution flow."""
279
+ lines = []
280
+ lines.append("📊 FUNCTION EXECUTION FLOW VISUALIZATION")
281
+ lines.append("=" * 60)
282
+
283
+ # Group by thread for better visualization
284
+ threads = defaultdict(list)
285
+ for event in flow_data:
286
+ thread_id = event.get('thread_id', 0)
287
+ threads[thread_id].append(event)
288
+
289
+ for thread_id, events in threads.items():
290
+ lines.append(f"\n🧵 Thread {thread_id}:")
291
+ lines.append("-" * 30)
292
+
293
+ call_stack = []
294
+ for event in events:
295
+ func_name = event.get('function', 'unknown')
296
+ event_type = event.get('event')
297
+ duration = event.get('duration', 0)
298
+ success = event.get('success', True)
299
+
300
+ if event_type == 'start':
301
+ call_stack.append(func_name)
302
+ indent = " " * (len(call_stack) - 1)
303
+ lines.append(f"{indent}🟢 START {func_name}")
304
+ elif event_type == 'end':
305
+ if call_stack and call_stack[-1] == func_name:
306
+ call_stack.pop()
307
+ indent = " " * len(call_stack)
308
+ status = "✅" if success else "❌"
309
+ lines.append(f"{indent}{status} END {func_name} ({duration:.3f}s)")
310
+
311
+ return "\n".join(lines)
312
+
313
+ def _create_mermaid_diagram(self, flow_data: List[Dict]) -> str:
314
+ """Create a Mermaid diagram representation of the flow."""
315
+ lines = ["graph TD"]
316
+
317
+ # Build flow connections
318
+ node_counter = 0
319
+ node_map = {}
320
+
321
+ for event in flow_data:
322
+ if event.get('event') == 'start':
323
+ func_name = event.get('function', 'unknown')
324
+ if func_name not in node_map:
325
+ node_map[func_name] = f"n{node_counter}"
326
+ node_counter += 1
327
+
328
+ # Add nodes
329
+ for func_name, node_id in node_map.items():
330
+ lines.append(f" {node_id}[{func_name}]")
331
+
332
+ # Add connections based on call sequence
333
+ prev_func = None
334
+ for event in flow_data:
335
+ if event.get('event') == 'start':
336
+ curr_func = event.get('function', 'unknown')
337
+ if prev_func and curr_func != prev_func:
338
+ prev_node = node_map.get(prev_func)
339
+ curr_node = node_map.get(curr_func)
340
+ if prev_node and curr_node:
341
+ lines.append(f" {prev_node} --> {curr_node}")
342
+ prev_func = curr_func
343
+
344
+ return "\n".join(lines)
345
+
346
+
347
+ class PerformanceAnalyzer:
348
+ """
349
+ Comprehensive performance analysis tools.
350
+
351
+ Provides advanced analysis capabilities for identifying performance
352
+ issues, trends, and optimization opportunities.
353
+ """
354
+
355
+ def __init__(self):
356
+ self.flow_analyzer = FunctionFlowAnalyzer()
357
+
358
+ def analyze_performance_trends(self, hours_back: int = 24) -> Dict[str, Any]:
359
+ """
360
+ Analyze performance trends over time.
361
+
362
+ Args:
363
+ hours_back: Number of hours to analyze
364
+
365
+ Returns:
366
+ Trend analysis results
367
+ """
368
+ if not PERFORMANCE_MONITOR_AVAILABLE:
369
+ return {"error": "Performance monitor not available"}
370
+
371
+ # Get current performance data
372
+ func_stats = performance_monitor.get_function_performance()
373
+ api_stats = performance_monitor.get_api_call_performance()
374
+
375
+ trends = {
376
+ "analysis_period_hours": hours_back,
377
+ "function_trends": self._analyze_function_trends(func_stats),
378
+ "api_trends": self._analyze_api_trends(api_stats),
379
+ "recommendations": self._generate_recommendations(func_stats, api_stats)
380
+ }
381
+
382
+ return trends
383
+
384
+ def _analyze_function_trends(self, func_stats: Dict[str, Any]) -> Dict[str, Any]:
385
+ """Analyze function performance trends."""
386
+ trends = {
387
+ "improving": [],
388
+ "degrading": [],
389
+ "stable": []
390
+ }
391
+
392
+ for func_name, stats in func_stats.items():
393
+ recent_times = stats.get('recent_times', [])
394
+ if len(recent_times) >= 10: # Need sufficient data
395
+ # Compare first half to second half
396
+ mid_point = len(recent_times) // 2
397
+ first_half_avg = sum(recent_times[:mid_point]) / mid_point
398
+ second_half_avg = sum(recent_times[mid_point:]) / (len(recent_times) - mid_point)
399
+
400
+ if first_half_avg != 0:
401
+ change_percent = ((second_half_avg - first_half_avg) / first_half_avg) * 100
402
+ else:
403
+ change_percent = 0.0 # No change if first half average is zero
404
+
405
+ trend_data = {
406
+ "function": func_name,
407
+ "change_percent": change_percent,
408
+ "first_half_avg": first_half_avg,
409
+ "second_half_avg": second_half_avg
410
+ }
411
+
412
+ if change_percent < -5: # Improving (getting faster)
413
+ trends["improving"].append(trend_data)
414
+ elif change_percent > 5: # Degrading (getting slower)
415
+ trends["degrading"].append(trend_data)
416
+ else:
417
+ trends["stable"].append(trend_data)
418
+
419
+ return trends
420
+
421
+ def _analyze_api_trends(self, api_stats: Dict[str, Any]) -> Dict[str, Any]:
422
+ """Analyze API performance trends."""
423
+ trends = {
424
+ "fastest_apis": [],
425
+ "slowest_apis": [],
426
+ "most_reliable": [],
427
+ "least_reliable": []
428
+ }
429
+
430
+ api_performance = []
431
+ for api_name, stats in api_stats.items():
432
+ if stats.get('call_count', 0) > 0:
433
+ avg_time = stats['total_time'] / stats['call_count']
434
+ success_rate = stats.get('success_rate', 0)
435
+
436
+ api_performance.append({
437
+ "api": api_name,
438
+ "average_time": avg_time,
439
+ "success_rate": success_rate,
440
+ "call_count": stats['call_count']
441
+ })
442
+
443
+ # Sort by performance metrics
444
+ fastest = sorted(api_performance, key=lambda x: x['average_time'])[:5]
445
+ slowest = sorted(api_performance, key=lambda x: x['average_time'], reverse=True)[:5]
446
+ most_reliable = sorted(api_performance, key=lambda x: x['success_rate'], reverse=True)[:5]
447
+ least_reliable = sorted(api_performance, key=lambda x: x['success_rate'])[:5]
448
+
449
+ trends["fastest_apis"] = fastest
450
+ trends["slowest_apis"] = slowest
451
+ trends["most_reliable"] = most_reliable
452
+ trends["least_reliable"] = least_reliable
453
+
454
+ return trends
455
+
456
+ def _generate_recommendations(self, func_stats: Dict[str, Any],
457
+ api_stats: Dict[str, Any]) -> List[str]:
458
+ """Generate performance optimization recommendations."""
459
+ recommendations = []
460
+
461
+ # Function recommendations
462
+ for func_name, stats in func_stats.items():
463
+ if stats.get('call_count', 0) > 0:
464
+ avg_time = stats['total_time'] / stats['call_count']
465
+ error_rate = stats['error_count'] / stats['call_count']
466
+
467
+ if avg_time > 2.0:
468
+ recommendations.append(
469
+ f"⚠️ Function '{func_name}' has high average execution time ({avg_time:.2f}s). Consider optimization."
470
+ )
471
+
472
+ if error_rate > 0.1:
473
+ recommendations.append(
474
+ f"🚨 Function '{func_name}' has high error rate ({error_rate*100:.1f}%). Investigate error handling."
475
+ )
476
+
477
+ # API recommendations
478
+ for api_name, stats in api_stats.items():
479
+ if stats.get('call_count', 0) > 0:
480
+ avg_time = stats['total_time'] / stats['call_count']
481
+ success_rate = stats.get('success_rate', 1.0)
482
+
483
+ if avg_time > 5.0:
484
+ recommendations.append(
485
+ f"🐌 API '{api_name}' has high average response time ({avg_time:.2f}s). Consider caching or optimization."
486
+ )
487
+
488
+ if success_rate < 0.9:
489
+ recommendations.append(
490
+ f"⚠️ API '{api_name}' has low success rate ({success_rate*100:.1f}%). Check error handling and retry logic."
491
+ )
492
+
493
+ if not recommendations:
494
+ recommendations.append("✅ No major performance issues detected. System is performing well!")
495
+
496
+ return recommendations
497
+
498
+ def generate_comprehensive_report(self) -> str:
499
+ """Generate a comprehensive performance analysis report."""
500
+ if not PERFORMANCE_MONITOR_AVAILABLE:
501
+ return "Performance monitor not available"
502
+
503
+ report_lines = []
504
+ report_lines.append("=" * 80)
505
+ report_lines.append("COMPREHENSIVE PERFORMANCE ANALYSIS REPORT")
506
+ report_lines.append("=" * 80)
507
+ report_lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
508
+ report_lines.append("")
509
+
510
+ # Basic performance report
511
+ basic_report = performance_monitor.generate_performance_report()
512
+ report_lines.append(basic_report)
513
+ report_lines.append("")
514
+
515
+ # Flow analysis
516
+ report_lines.append("🔄 EXECUTION FLOW ANALYSIS")
517
+ report_lines.append("-" * 40)
518
+ flow_analysis = self.flow_analyzer.analyze_execution_flow()
519
+
520
+ if "bottlenecks" in flow_analysis:
521
+ bottlenecks = flow_analysis["bottlenecks"]
522
+ if bottlenecks:
523
+ report_lines.append("🚨 IDENTIFIED BOTTLENECKS:")
524
+ for bottleneck in bottlenecks[:5]:
525
+ report_lines.append(f"• {bottleneck['function']}: {bottleneck['average_duration']:.2f}s avg, {bottleneck['severity']} severity")
526
+ else:
527
+ report_lines.append("✅ No significant bottlenecks identified")
528
+
529
+ report_lines.append("")
530
+
531
+ # Performance trends
532
+ report_lines.append("📈 PERFORMANCE TRENDS")
533
+ report_lines.append("-" * 40)
534
+ trends = self.analyze_performance_trends()
535
+
536
+ if "recommendations" in trends:
537
+ recommendations = trends["recommendations"]
538
+ report_lines.append("💡 RECOMMENDATIONS:")
539
+ for rec in recommendations:
540
+ report_lines.append(f" {rec}")
541
+
542
+ report_lines.append("")
543
+ report_lines.append("=" * 80)
544
+
545
+ return "\n".join(report_lines)
546
+
547
+
548
+ # Global instances for easy access
549
+ flow_analyzer = FunctionFlowAnalyzer()
550
+ performance_analyzer = PerformanceAnalyzer()
551
+
552
+
553
+ # Convenience functions
554
+ def analyze_function_flow() -> Dict[str, Any]:
555
+ """Analyze current function execution flow."""
556
+ return flow_analyzer.analyze_execution_flow()
557
+
558
+
559
+ def visualize_execution_flow(format: str = "text") -> str:
560
+ """Visualize function execution flow."""
561
+ return flow_analyzer.visualize_flow(format=format)
562
+
563
+
564
+ def analyze_performance_trends() -> Dict[str, Any]:
565
+ """Analyze performance trends."""
566
+ return performance_analyzer.analyze_performance_trends()
567
+
568
+
569
+ def generate_comprehensive_report() -> str:
570
+ """Generate comprehensive performance analysis report."""
571
+ return performance_analyzer.generate_comprehensive_report()
@@ -119,13 +119,14 @@ class MinimalTelemetry:
119
119
  except (ImportError, KeyError, AttributeError):
120
120
  return "unknown"
121
121
 
122
- def track_agent_execution(self, agent_name: str = None, success: bool = True):
122
+ def track_agent_execution(self, agent_name: str = None, success: bool = True, async_mode: bool = False):
123
123
  """
124
124
  Track an agent execution event.
125
125
 
126
126
  Args:
127
127
  agent_name: Name of the agent (not logged, just for counting)
128
128
  success: Whether the execution was successful
129
+ async_mode: If True, defer PostHog capture to prevent blocking in streaming scenarios
129
130
  """
130
131
  if not self.enabled:
131
132
  return
@@ -133,18 +134,41 @@ class MinimalTelemetry:
133
134
  with self._metrics_lock:
134
135
  self._metrics["agent_executions"] += 1
135
136
 
137
+ # Always log immediately for debugging
138
+ self.logger.debug(f"Agent execution tracked: success={success}")
139
+
136
140
  # Send event to PostHog
137
141
  if self._posthog:
138
- self._posthog.capture(
139
- distinct_id=self.session_id,
140
- event='agent_execution',
141
- properties={
142
- 'success': success,
143
- 'session_id': self.session_id
144
- }
145
- )
146
-
147
- self.logger.debug(f"Agent execution tracked: success={success}")
142
+ if async_mode:
143
+ # Use a background thread to prevent blocking streaming responses
144
+ def _async_capture():
145
+ try:
146
+ self._posthog.capture(
147
+ distinct_id=self.session_id,
148
+ event='agent_execution',
149
+ properties={
150
+ 'success': success,
151
+ 'session_id': self.session_id
152
+ }
153
+ )
154
+ except Exception as e:
155
+ # Silently handle any telemetry errors to avoid disrupting user experience
156
+ self.logger.debug(f"Async PostHog capture error: {e}")
157
+
158
+ # Execute in background thread with daemon flag for clean shutdown
159
+ import threading
160
+ thread = threading.Thread(target=_async_capture, daemon=True)
161
+ thread.start()
162
+ else:
163
+ # Synchronous capture for backward compatibility
164
+ self._posthog.capture(
165
+ distinct_id=self.session_id,
166
+ event='agent_execution',
167
+ properties={
168
+ 'success': success,
169
+ 'session_id': self.session_id
170
+ }
171
+ )
148
172
 
149
173
  def track_task_completion(self, task_name: str = None, success: bool = True):
150
174
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.145
3
+ Version: 0.0.146
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -15,7 +15,7 @@ Requires-Dist: fastapi>=0.115.0; extra == "mcp"
15
15
  Requires-Dist: uvicorn>=0.34.0; extra == "mcp"
16
16
  Provides-Extra: memory
17
17
  Requires-Dist: chromadb>=1.0.0; extra == "memory"
18
- Requires-Dist: litellm>=1.72.0; extra == "memory"
18
+ Requires-Dist: litellm>=1.72.6; extra == "memory"
19
19
  Provides-Extra: knowledge
20
20
  Requires-Dist: mem0ai>=0.1.0; extra == "knowledge"
21
21
  Requires-Dist: chromadb>=1.0.0; extra == "knowledge"
@@ -25,7 +25,7 @@ Provides-Extra: graph
25
25
  Requires-Dist: mem0ai[graph]>=0.1.0; extra == "graph"
26
26
  Requires-Dist: chromadb>=1.0.0; extra == "graph"
27
27
  Provides-Extra: llm
28
- Requires-Dist: litellm>=1.72.0; extra == "llm"
28
+ Requires-Dist: litellm>=1.72.6; extra == "llm"
29
29
  Requires-Dist: pydantic>=2.4.2; extra == "llm"
30
30
  Provides-Extra: api
31
31
  Requires-Dist: fastapi>=0.115.0; extra == "api"
@@ -35,6 +35,11 @@ Requires-Dist: posthog>=3.0.0; extra == "telemetry"
35
35
  Provides-Extra: mongodb
36
36
  Requires-Dist: pymongo>=4.6.3; extra == "mongodb"
37
37
  Requires-Dist: motor>=3.4.0; extra == "mongodb"
38
+ Provides-Extra: auth
39
+ Requires-Dist: PyJWT>=2.8.0; extra == "auth"
40
+ Requires-Dist: passlib[bcrypt]>=1.7.4; extra == "auth"
41
+ Requires-Dist: python-jose[cryptography]>=3.3.0; extra == "auth"
42
+ Requires-Dist: python-multipart>=0.0.6; extra == "auth"
38
43
  Provides-Extra: all
39
44
  Requires-Dist: praisonaiagents[memory]; extra == "all"
40
45
  Requires-Dist: praisonaiagents[knowledge]; extra == "all"
@@ -44,3 +49,4 @@ Requires-Dist: praisonaiagents[mcp]; extra == "all"
44
49
  Requires-Dist: praisonaiagents[api]; extra == "all"
45
50
  Requires-Dist: praisonaiagents[telemetry]; extra == "all"
46
51
  Requires-Dist: praisonaiagents[mongodb]; extra == "all"
52
+ Requires-Dist: praisonaiagents[auth]; extra == "all"