praisonaiagents 0.0.145__py3-none-any.whl → 0.0.146__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/__init__.py +68 -7
- praisonaiagents/agent/agent.py +358 -48
- praisonaiagents/llm/__init__.py +40 -14
- praisonaiagents/llm/llm.py +485 -59
- praisonaiagents/llm/openai_client.py +98 -16
- praisonaiagents/memory/memory.py +45 -0
- praisonaiagents/telemetry/__init__.py +63 -3
- praisonaiagents/telemetry/integration.py +78 -10
- praisonaiagents/telemetry/performance_cli.py +397 -0
- praisonaiagents/telemetry/performance_monitor.py +573 -0
- praisonaiagents/telemetry/performance_utils.py +571 -0
- praisonaiagents/telemetry/telemetry.py +35 -11
- {praisonaiagents-0.0.145.dist-info → praisonaiagents-0.0.146.dist-info}/METADATA +9 -3
- {praisonaiagents-0.0.145.dist-info → praisonaiagents-0.0.146.dist-info}/RECORD +16 -13
- {praisonaiagents-0.0.145.dist-info → praisonaiagents-0.0.146.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.145.dist-info → praisonaiagents-0.0.146.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,397 @@
|
|
1
|
+
"""
|
2
|
+
Performance Monitoring CLI for PraisonAI
|
3
|
+
|
4
|
+
Command-line interface for accessing performance monitoring features.
|
5
|
+
Provides easy access to function performance, API call tracking, and flow analysis.
|
6
|
+
|
7
|
+
Usage:
|
8
|
+
python -m praisonaiagents.telemetry.performance_cli [command] [options]
|
9
|
+
|
10
|
+
Or import and use programmatically:
|
11
|
+
from praisonaiagents.telemetry.performance_cli import PerformanceCLI
|
12
|
+
cli = PerformanceCLI()
|
13
|
+
cli.show_performance_report()
|
14
|
+
"""
|
15
|
+
|
16
|
+
import argparse
|
17
|
+
import json
|
18
|
+
import sys
|
19
|
+
from typing import Optional
|
20
|
+
import logging
|
21
|
+
|
22
|
+
try:
|
23
|
+
from .performance_monitor import performance_monitor, get_performance_report
|
24
|
+
from .performance_utils import (
|
25
|
+
analyze_function_flow, visualize_execution_flow,
|
26
|
+
analyze_performance_trends, generate_comprehensive_report
|
27
|
+
)
|
28
|
+
PERFORMANCE_TOOLS_AVAILABLE = True
|
29
|
+
except ImportError:
|
30
|
+
PERFORMANCE_TOOLS_AVAILABLE = False
|
31
|
+
|
32
|
+
logger = logging.getLogger(__name__)
|
33
|
+
|
34
|
+
|
35
|
+
class PerformanceCLI:
|
36
|
+
"""Command-line interface for performance monitoring."""
|
37
|
+
|
38
|
+
def __init__(self):
|
39
|
+
if not PERFORMANCE_TOOLS_AVAILABLE:
|
40
|
+
print("❌ Performance monitoring tools not available")
|
41
|
+
sys.exit(1)
|
42
|
+
|
43
|
+
def show_performance_report(self, detailed: bool = False) -> None:
|
44
|
+
"""Show performance monitoring report."""
|
45
|
+
print("📊 PraisonAI Performance Monitoring Report")
|
46
|
+
print("=" * 60)
|
47
|
+
|
48
|
+
if detailed:
|
49
|
+
report = generate_comprehensive_report()
|
50
|
+
else:
|
51
|
+
report = get_performance_report()
|
52
|
+
|
53
|
+
print(report)
|
54
|
+
|
55
|
+
def show_function_stats(self, function_name: Optional[str] = None) -> None:
|
56
|
+
"""Show function performance statistics."""
|
57
|
+
stats = performance_monitor.get_function_performance(function_name)
|
58
|
+
|
59
|
+
if not stats:
|
60
|
+
print("❌ No function performance data available")
|
61
|
+
return
|
62
|
+
|
63
|
+
print("🔧 Function Performance Statistics")
|
64
|
+
print("=" * 50)
|
65
|
+
|
66
|
+
for func, data in stats.items():
|
67
|
+
print(f"\n📝 Function: {func}")
|
68
|
+
print(f" Calls: {data['call_count']}")
|
69
|
+
print(f" Total Time: {data['total_time']:.3f}s")
|
70
|
+
if data['call_count'] > 0:
|
71
|
+
print(f" Average Time: {data.get('average_time', 0):.3f}s")
|
72
|
+
print(f" Min Time: {data['min_time']:.3f}s")
|
73
|
+
print(f" Max Time: {data['max_time']:.3f}s")
|
74
|
+
print(f" Success Rate: {data.get('success_rate', 0)*100:.1f}%")
|
75
|
+
print(f" Errors: {data['error_count']}")
|
76
|
+
|
77
|
+
def show_api_stats(self, api_name: Optional[str] = None) -> None:
|
78
|
+
"""Show API call performance statistics."""
|
79
|
+
stats = performance_monitor.get_api_call_performance(api_name)
|
80
|
+
|
81
|
+
if not stats:
|
82
|
+
print("❌ No API performance data available")
|
83
|
+
return
|
84
|
+
|
85
|
+
print("🌐 API Call Performance Statistics")
|
86
|
+
print("=" * 50)
|
87
|
+
|
88
|
+
for api, data in stats.items():
|
89
|
+
print(f"\n🔗 API: {api}")
|
90
|
+
print(f" Calls: {data['call_count']}")
|
91
|
+
print(f" Total Time: {data['total_time']:.3f}s")
|
92
|
+
if data['call_count'] > 0:
|
93
|
+
print(f" Average Time: {data.get('average_time', 0):.3f}s")
|
94
|
+
print(f" Min Time: {data['min_time']:.3f}s")
|
95
|
+
print(f" Max Time: {data['max_time']:.3f}s")
|
96
|
+
print(f" Success Rate: {data.get('success_rate', 0)*100:.1f}%")
|
97
|
+
print(f" Successful Calls: {data['success_count']}")
|
98
|
+
print(f" Failed Calls: {data['error_count']}")
|
99
|
+
|
100
|
+
def show_slowest_functions(self, limit: int = 10) -> None:
|
101
|
+
"""Show slowest performing functions."""
|
102
|
+
slowest = performance_monitor.get_slowest_functions(limit)
|
103
|
+
|
104
|
+
if not slowest:
|
105
|
+
print("❌ No function performance data available")
|
106
|
+
return
|
107
|
+
|
108
|
+
print(f"🐌 Top {limit} Slowest Functions")
|
109
|
+
print("=" * 50)
|
110
|
+
|
111
|
+
for i, func in enumerate(slowest, 1):
|
112
|
+
print(f"{i:2d}. {func['function']}")
|
113
|
+
print(f" Average: {func['average_time']:.3f}s")
|
114
|
+
print(f" Max: {func['max_time']:.3f}s")
|
115
|
+
print(f" Calls: {func['call_count']}")
|
116
|
+
print()
|
117
|
+
|
118
|
+
def show_slowest_apis(self, limit: int = 10) -> None:
|
119
|
+
"""Show slowest performing API calls."""
|
120
|
+
slowest = performance_monitor.get_slowest_api_calls(limit)
|
121
|
+
|
122
|
+
if not slowest:
|
123
|
+
print("❌ No API performance data available")
|
124
|
+
return
|
125
|
+
|
126
|
+
print(f"🐌 Top {limit} Slowest API Calls")
|
127
|
+
print("=" * 50)
|
128
|
+
|
129
|
+
for i, api in enumerate(slowest, 1):
|
130
|
+
print(f"{i:2d}. {api['api']}")
|
131
|
+
print(f" Average: {api['average_time']:.3f}s")
|
132
|
+
print(f" Max: {api['max_time']:.3f}s")
|
133
|
+
print(f" Success Rate: {api['success_rate']*100:.1f}%")
|
134
|
+
print(f" Calls: {api['call_count']}")
|
135
|
+
print()
|
136
|
+
|
137
|
+
def show_function_flow(self, format: str = "text", events: int = 50) -> None:
|
138
|
+
"""Show function execution flow."""
|
139
|
+
print(f"🔄 Function Execution Flow (Last {events} events)")
|
140
|
+
print("=" * 60)
|
141
|
+
|
142
|
+
flow_data = performance_monitor.get_function_flow(events)
|
143
|
+
if not flow_data:
|
144
|
+
print("❌ No flow data available")
|
145
|
+
return
|
146
|
+
|
147
|
+
if format == "json":
|
148
|
+
print(json.dumps(flow_data, indent=2))
|
149
|
+
else:
|
150
|
+
visualization = visualize_execution_flow(format=format)
|
151
|
+
print(visualization)
|
152
|
+
|
153
|
+
def analyze_flow(self) -> None:
|
154
|
+
"""Analyze function execution flow for bottlenecks and patterns."""
|
155
|
+
print("🔍 Function Flow Analysis")
|
156
|
+
print("=" * 50)
|
157
|
+
|
158
|
+
analysis = analyze_function_flow()
|
159
|
+
|
160
|
+
if "error" in analysis:
|
161
|
+
print(f"❌ {analysis['error']}")
|
162
|
+
return
|
163
|
+
|
164
|
+
if "message" in analysis:
|
165
|
+
print(f"ℹ️ {analysis['message']}")
|
166
|
+
return
|
167
|
+
|
168
|
+
# Show bottlenecks
|
169
|
+
bottlenecks = analysis.get("bottlenecks", [])
|
170
|
+
if bottlenecks:
|
171
|
+
print("🚨 Identified Bottlenecks:")
|
172
|
+
for bottleneck in bottlenecks:
|
173
|
+
severity_emoji = "🔴" if bottleneck["severity"] == "high" else "🟡"
|
174
|
+
print(f" {severity_emoji} {bottleneck['function']}")
|
175
|
+
print(f" Average: {bottleneck['average_duration']:.3f}s")
|
176
|
+
print(f" Max: {bottleneck['max_duration']:.3f}s")
|
177
|
+
print(f" Calls: {bottleneck['call_count']}")
|
178
|
+
print()
|
179
|
+
else:
|
180
|
+
print("✅ No significant bottlenecks identified")
|
181
|
+
|
182
|
+
# Show statistics
|
183
|
+
stats = analysis.get("statistics", {})
|
184
|
+
if stats:
|
185
|
+
print("\n📊 Flow Statistics:")
|
186
|
+
print(f" Total Function Calls: {stats.get('function_calls', 0)}")
|
187
|
+
print(f" Completed Calls: {stats.get('completed_calls', 0)}")
|
188
|
+
print(f" Success Rate: {stats.get('success_rate', 0)*100:.1f}%")
|
189
|
+
print(f" Total Execution Time: {stats.get('total_execution_time', 0):.3f}s")
|
190
|
+
print(f" Average Execution Time: {stats.get('average_execution_time', 0):.3f}s")
|
191
|
+
|
192
|
+
# Show parallelism info
|
193
|
+
parallelism = analysis.get("parallelism", {})
|
194
|
+
if parallelism:
|
195
|
+
print("\n🧵 Parallelism Analysis:")
|
196
|
+
print(f" Total Threads: {parallelism.get('total_threads', 0)}")
|
197
|
+
print(f" Peak Concurrency: {parallelism.get('peak_concurrency', 0)}")
|
198
|
+
|
199
|
+
def show_active_calls(self) -> None:
|
200
|
+
"""Show currently active function calls."""
|
201
|
+
active = performance_monitor.get_active_calls()
|
202
|
+
|
203
|
+
print("⚡ Currently Active Function Calls")
|
204
|
+
print("=" * 50)
|
205
|
+
|
206
|
+
if not active:
|
207
|
+
print("✅ No active function calls")
|
208
|
+
return
|
209
|
+
|
210
|
+
for _call_id, info in active.items():
|
211
|
+
print(f"🔄 {info['function']}")
|
212
|
+
print(f" Duration: {info['duration']:.1f}s")
|
213
|
+
print(f" Thread: {info['thread_id']}")
|
214
|
+
print(f" Started: {info['started_at']}")
|
215
|
+
print()
|
216
|
+
|
217
|
+
def show_trends(self) -> None:
|
218
|
+
"""Show performance trends analysis."""
|
219
|
+
print("📈 Performance Trends Analysis")
|
220
|
+
print("=" * 50)
|
221
|
+
|
222
|
+
trends = analyze_performance_trends()
|
223
|
+
|
224
|
+
if "error" in trends:
|
225
|
+
print(f"❌ {trends['error']}")
|
226
|
+
return
|
227
|
+
|
228
|
+
# Show recommendations
|
229
|
+
recommendations = trends.get("recommendations", [])
|
230
|
+
if recommendations:
|
231
|
+
print("💡 Performance Recommendations:")
|
232
|
+
for rec in recommendations:
|
233
|
+
print(f" {rec}")
|
234
|
+
print()
|
235
|
+
|
236
|
+
# Show function trends
|
237
|
+
func_trends = trends.get("function_trends", {})
|
238
|
+
improving = func_trends.get("improving", [])
|
239
|
+
degrading = func_trends.get("degrading", [])
|
240
|
+
|
241
|
+
if improving:
|
242
|
+
print("📈 Improving Functions:")
|
243
|
+
for trend in improving:
|
244
|
+
print(f" ✅ {trend['function']} ({trend['change_percent']:+.1f}%)")
|
245
|
+
|
246
|
+
if degrading:
|
247
|
+
print("\n📉 Degrading Functions:")
|
248
|
+
for trend in degrading:
|
249
|
+
print(f" ⚠️ {trend['function']} ({trend['change_percent']:+.1f}%)")
|
250
|
+
|
251
|
+
# Show API trends
|
252
|
+
api_trends = trends.get("api_trends", {})
|
253
|
+
slowest_apis = api_trends.get("slowest_apis", [])
|
254
|
+
least_reliable = api_trends.get("least_reliable", [])
|
255
|
+
|
256
|
+
if slowest_apis:
|
257
|
+
print("\n🐌 Slowest APIs:")
|
258
|
+
for api in slowest_apis[:3]:
|
259
|
+
print(f" • {api['api']}: {api['average_time']:.3f}s avg")
|
260
|
+
|
261
|
+
if least_reliable:
|
262
|
+
print("\n⚠️ Least Reliable APIs:")
|
263
|
+
for api in least_reliable[:3]:
|
264
|
+
if api['success_rate'] < 1.0:
|
265
|
+
print(f" • {api['api']}: {api['success_rate']*100:.1f}% success rate")
|
266
|
+
|
267
|
+
def export_data(self, format: str = "json", output_file: Optional[str] = None) -> None:
|
268
|
+
"""Export performance data to file or stdout."""
|
269
|
+
data = performance_monitor.export_data(format)
|
270
|
+
|
271
|
+
if output_file:
|
272
|
+
try:
|
273
|
+
with open(output_file, 'w') as f:
|
274
|
+
f.write(data)
|
275
|
+
print(f"✅ Data exported to {output_file}")
|
276
|
+
except (IOError, OSError) as e:
|
277
|
+
print(f"❌ Error exporting to file: {e}")
|
278
|
+
else:
|
279
|
+
print(data)
|
280
|
+
|
281
|
+
def clear_data(self) -> None:
|
282
|
+
"""Clear all performance monitoring data."""
|
283
|
+
performance_monitor.clear_statistics()
|
284
|
+
print("✅ All performance monitoring data cleared")
|
285
|
+
|
286
|
+
|
287
|
+
def main():
|
288
|
+
"""Main CLI entry point."""
|
289
|
+
parser = argparse.ArgumentParser(
|
290
|
+
description="PraisonAI Performance Monitoring CLI",
|
291
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
292
|
+
epilog="""
|
293
|
+
Examples:
|
294
|
+
python -m praisonaiagents.telemetry.performance_cli report
|
295
|
+
python -m praisonaiagents.telemetry.performance_cli functions
|
296
|
+
python -m praisonaiagents.telemetry.performance_cli apis
|
297
|
+
python -m praisonaiagents.telemetry.performance_cli slowest-functions 5
|
298
|
+
python -m praisonaiagents.telemetry.performance_cli flow
|
299
|
+
python -m praisonaiagents.telemetry.performance_cli analyze-flow
|
300
|
+
python -m praisonaiagents.telemetry.performance_cli trends
|
301
|
+
"""
|
302
|
+
)
|
303
|
+
|
304
|
+
subparsers = parser.add_subparsers(dest='command', help='Available commands')
|
305
|
+
|
306
|
+
# Report command
|
307
|
+
report_parser = subparsers.add_parser('report', help='Show performance report')
|
308
|
+
report_parser.add_argument('--detailed', action='store_true',
|
309
|
+
help='Show detailed comprehensive report')
|
310
|
+
|
311
|
+
# Functions command
|
312
|
+
func_parser = subparsers.add_parser('functions', help='Show function statistics')
|
313
|
+
func_parser.add_argument('--name', help='Specific function name')
|
314
|
+
|
315
|
+
# APIs command
|
316
|
+
api_parser = subparsers.add_parser('apis', help='Show API call statistics')
|
317
|
+
api_parser.add_argument('--name', help='Specific API name')
|
318
|
+
|
319
|
+
# Slowest functions command
|
320
|
+
slowest_func_parser = subparsers.add_parser('slowest-functions',
|
321
|
+
help='Show slowest functions')
|
322
|
+
slowest_func_parser.add_argument('limit', type=int, nargs='?', default=10,
|
323
|
+
help='Number of functions to show')
|
324
|
+
|
325
|
+
# Slowest APIs command
|
326
|
+
slowest_api_parser = subparsers.add_parser('slowest-apis',
|
327
|
+
help='Show slowest API calls')
|
328
|
+
slowest_api_parser.add_argument('limit', type=int, nargs='?', default=10,
|
329
|
+
help='Number of APIs to show')
|
330
|
+
|
331
|
+
# Flow command
|
332
|
+
flow_parser = subparsers.add_parser('flow', help='Show function execution flow')
|
333
|
+
flow_parser.add_argument('--format', choices=['text', 'json', 'mermaid'],
|
334
|
+
default='text', help='Output format')
|
335
|
+
flow_parser.add_argument('--events', type=int, default=50,
|
336
|
+
help='Number of recent events to show')
|
337
|
+
|
338
|
+
# Analyze flow command
|
339
|
+
subparsers.add_parser('analyze-flow', help='Analyze function flow for bottlenecks')
|
340
|
+
|
341
|
+
# Active calls command
|
342
|
+
subparsers.add_parser('active', help='Show currently active function calls')
|
343
|
+
|
344
|
+
# Trends command
|
345
|
+
subparsers.add_parser('trends', help='Show performance trends analysis')
|
346
|
+
|
347
|
+
# Export command
|
348
|
+
export_parser = subparsers.add_parser('export', help='Export performance data')
|
349
|
+
export_parser.add_argument('--format', choices=['json', 'dict'], default='json',
|
350
|
+
help='Export format')
|
351
|
+
export_parser.add_argument('--output', help='Output file (default: stdout)')
|
352
|
+
|
353
|
+
# Clear command
|
354
|
+
subparsers.add_parser('clear', help='Clear all performance data')
|
355
|
+
|
356
|
+
args = parser.parse_args()
|
357
|
+
|
358
|
+
if not args.command:
|
359
|
+
parser.print_help()
|
360
|
+
return
|
361
|
+
|
362
|
+
try:
|
363
|
+
cli = PerformanceCLI()
|
364
|
+
|
365
|
+
if args.command == 'report':
|
366
|
+
cli.show_performance_report(detailed=args.detailed)
|
367
|
+
elif args.command == 'functions':
|
368
|
+
cli.show_function_stats(args.name)
|
369
|
+
elif args.command == 'apis':
|
370
|
+
cli.show_api_stats(args.name)
|
371
|
+
elif args.command == 'slowest-functions':
|
372
|
+
cli.show_slowest_functions(args.limit)
|
373
|
+
elif args.command == 'slowest-apis':
|
374
|
+
cli.show_slowest_apis(args.limit)
|
375
|
+
elif args.command == 'flow':
|
376
|
+
cli.show_function_flow(args.format, args.events)
|
377
|
+
elif args.command == 'analyze-flow':
|
378
|
+
cli.analyze_flow()
|
379
|
+
elif args.command == 'active':
|
380
|
+
cli.show_active_calls()
|
381
|
+
elif args.command == 'trends':
|
382
|
+
cli.show_trends()
|
383
|
+
elif args.command == 'export':
|
384
|
+
cli.export_data(args.format, args.output)
|
385
|
+
elif args.command == 'clear':
|
386
|
+
cli.clear_data()
|
387
|
+
|
388
|
+
except KeyboardInterrupt:
|
389
|
+
print("\n❌ Operation cancelled by user")
|
390
|
+
sys.exit(1)
|
391
|
+
except Exception as e:
|
392
|
+
print(f"❌ Error: {e}")
|
393
|
+
sys.exit(1)
|
394
|
+
|
395
|
+
|
396
|
+
if __name__ == "__main__":
|
397
|
+
main()
|