jaf-py 2.5.9__py3-none-any.whl → 2.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. jaf/__init__.py +154 -57
  2. jaf/a2a/__init__.py +42 -21
  3. jaf/a2a/agent.py +79 -126
  4. jaf/a2a/agent_card.py +87 -78
  5. jaf/a2a/client.py +30 -66
  6. jaf/a2a/examples/client_example.py +12 -12
  7. jaf/a2a/examples/integration_example.py +38 -47
  8. jaf/a2a/examples/server_example.py +56 -53
  9. jaf/a2a/memory/__init__.py +0 -4
  10. jaf/a2a/memory/cleanup.py +28 -21
  11. jaf/a2a/memory/factory.py +155 -133
  12. jaf/a2a/memory/providers/composite.py +21 -26
  13. jaf/a2a/memory/providers/in_memory.py +89 -83
  14. jaf/a2a/memory/providers/postgres.py +117 -115
  15. jaf/a2a/memory/providers/redis.py +128 -121
  16. jaf/a2a/memory/serialization.py +77 -87
  17. jaf/a2a/memory/tests/run_comprehensive_tests.py +112 -83
  18. jaf/a2a/memory/tests/test_cleanup.py +211 -94
  19. jaf/a2a/memory/tests/test_serialization.py +73 -68
  20. jaf/a2a/memory/tests/test_stress_concurrency.py +186 -133
  21. jaf/a2a/memory/tests/test_task_lifecycle.py +138 -120
  22. jaf/a2a/memory/types.py +91 -53
  23. jaf/a2a/protocol.py +95 -125
  24. jaf/a2a/server.py +90 -118
  25. jaf/a2a/standalone_client.py +30 -43
  26. jaf/a2a/tests/__init__.py +16 -33
  27. jaf/a2a/tests/run_tests.py +17 -53
  28. jaf/a2a/tests/test_agent.py +40 -140
  29. jaf/a2a/tests/test_client.py +54 -117
  30. jaf/a2a/tests/test_integration.py +28 -82
  31. jaf/a2a/tests/test_protocol.py +54 -139
  32. jaf/a2a/tests/test_types.py +50 -136
  33. jaf/a2a/types.py +58 -34
  34. jaf/cli.py +21 -41
  35. jaf/core/__init__.py +7 -1
  36. jaf/core/agent_tool.py +93 -72
  37. jaf/core/analytics.py +257 -207
  38. jaf/core/checkpoint.py +223 -0
  39. jaf/core/composition.py +249 -235
  40. jaf/core/engine.py +817 -519
  41. jaf/core/errors.py +55 -42
  42. jaf/core/guardrails.py +276 -202
  43. jaf/core/handoff.py +47 -31
  44. jaf/core/parallel_agents.py +69 -75
  45. jaf/core/performance.py +75 -73
  46. jaf/core/proxy.py +43 -44
  47. jaf/core/proxy_helpers.py +24 -27
  48. jaf/core/regeneration.py +220 -129
  49. jaf/core/state.py +68 -66
  50. jaf/core/streaming.py +115 -108
  51. jaf/core/tool_results.py +111 -101
  52. jaf/core/tools.py +114 -116
  53. jaf/core/tracing.py +269 -210
  54. jaf/core/types.py +371 -151
  55. jaf/core/workflows.py +209 -168
  56. jaf/exceptions.py +46 -38
  57. jaf/memory/__init__.py +1 -6
  58. jaf/memory/approval_storage.py +54 -77
  59. jaf/memory/factory.py +4 -4
  60. jaf/memory/providers/in_memory.py +216 -180
  61. jaf/memory/providers/postgres.py +216 -146
  62. jaf/memory/providers/redis.py +173 -116
  63. jaf/memory/types.py +70 -51
  64. jaf/memory/utils.py +36 -34
  65. jaf/plugins/__init__.py +12 -12
  66. jaf/plugins/base.py +105 -96
  67. jaf/policies/__init__.py +0 -1
  68. jaf/policies/handoff.py +37 -46
  69. jaf/policies/validation.py +76 -52
  70. jaf/providers/__init__.py +6 -3
  71. jaf/providers/mcp.py +97 -51
  72. jaf/providers/model.py +361 -280
  73. jaf/server/__init__.py +1 -1
  74. jaf/server/main.py +7 -11
  75. jaf/server/server.py +514 -359
  76. jaf/server/types.py +208 -52
  77. jaf/utils/__init__.py +17 -18
  78. jaf/utils/attachments.py +111 -116
  79. jaf/utils/document_processor.py +175 -174
  80. jaf/visualization/__init__.py +1 -1
  81. jaf/visualization/example.py +111 -110
  82. jaf/visualization/functional_core.py +46 -71
  83. jaf/visualization/graphviz.py +154 -189
  84. jaf/visualization/imperative_shell.py +7 -16
  85. jaf/visualization/types.py +8 -4
  86. {jaf_py-2.5.9.dist-info → jaf_py-2.5.11.dist-info}/METADATA +2 -2
  87. jaf_py-2.5.11.dist-info/RECORD +97 -0
  88. jaf_py-2.5.9.dist-info/RECORD +0 -96
  89. {jaf_py-2.5.9.dist-info → jaf_py-2.5.11.dist-info}/WHEEL +0 -0
  90. {jaf_py-2.5.9.dist-info → jaf_py-2.5.11.dist-info}/entry_points.txt +0 -0
  91. {jaf_py-2.5.9.dist-info → jaf_py-2.5.11.dist-info}/licenses/LICENSE +0 -0
  92. {jaf_py-2.5.9.dist-info → jaf_py-2.5.11.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,7 @@ detailed validation reports. This is the main entry point for running the
7
7
 
8
8
  Usage:
9
9
  python run_comprehensive_tests.py [options]
10
-
10
+
11
11
  Options:
12
12
  --providers: Comma-separated list of providers to test (memory,redis,postgres)
13
13
  --coverage: Generate code coverage report
@@ -35,6 +35,7 @@ from jaf.a2a.memory.types import A2AInMemoryTaskConfig
35
35
  @dataclass
36
36
  class TestResult:
37
37
  """Test result information"""
38
+
38
39
  test_name: str
39
40
  status: str # "PASSED", "FAILED", "SKIPPED", "ERROR"
40
41
  duration_ms: float
@@ -45,6 +46,7 @@ class TestResult:
45
46
  @dataclass
46
47
  class TestSuiteResult:
47
48
  """Results for an entire test suite"""
49
+
48
50
  suite_name: str
49
51
  provider: str
50
52
  total_tests: int = 0
@@ -59,6 +61,7 @@ class TestSuiteResult:
59
61
  @dataclass
60
62
  class ValidationReport:
61
63
  """Comprehensive validation report"""
64
+
62
65
  timestamp: str
63
66
  total_duration_ms: float
64
67
  providers_tested: List[str]
@@ -79,7 +82,7 @@ class ComprehensiveTestRunner:
79
82
  self.report = ValidationReport(
80
83
  timestamp=time.strftime("%Y-%m-%d %H:%M:%S UTC", time.gmtime()),
81
84
  total_duration_ms=0.0,
82
- providers_tested=self.providers
85
+ providers_tested=self.providers,
83
86
  )
84
87
 
85
88
  def _parse_providers(self, providers_str: str) -> List[str]:
@@ -104,7 +107,9 @@ class ComprehensiveTestRunner:
104
107
  print("🧪 A2A Memory System Comprehensive Validation")
105
108
  print("=" * 60)
106
109
  print(f"Testing providers: {', '.join(self.providers)}")
107
- print(f"Options: coverage={self.args.coverage}, performance={self.args.performance}, stress={self.args.stress}")
110
+ print(
111
+ f"Options: coverage={self.args.coverage}, performance={self.args.performance}, stress={self.args.stress}"
112
+ )
108
113
  print()
109
114
 
110
115
  start_time = time.perf_counter()
@@ -144,17 +149,14 @@ class ComprehensiveTestRunner:
144
149
  print("-" * 50)
145
150
 
146
151
  for provider in self.providers:
147
- suite_result = TestSuiteResult(
148
- suite_name="Serialization Tests",
149
- provider=provider
150
- )
152
+ suite_result = TestSuiteResult(suite_name="Serialization Tests", provider=provider)
151
153
 
152
154
  try:
153
155
  # Run serialization tests
154
156
  results = await self._execute_pytest_suite(
155
157
  "test_serialization.py",
156
158
  provider,
157
- timeout=300 # 5 minutes
159
+ timeout=300, # 5 minutes
158
160
  )
159
161
 
160
162
  suite_result.results.extend(results)
@@ -173,17 +175,14 @@ class ComprehensiveTestRunner:
173
175
  print("-" * 50)
174
176
 
175
177
  for provider in self.providers:
176
- suite_result = TestSuiteResult(
177
- suite_name="Lifecycle Tests",
178
- provider=provider
179
- )
178
+ suite_result = TestSuiteResult(suite_name="Lifecycle Tests", provider=provider)
180
179
 
181
180
  try:
182
181
  # Run lifecycle tests
183
182
  results = await self._execute_pytest_suite(
184
183
  "test_task_lifecycle.py",
185
184
  provider,
186
- timeout=600 # 10 minutes
185
+ timeout=600, # 10 minutes
187
186
  )
188
187
 
189
188
  suite_result.results.extend(results)
@@ -203,16 +202,13 @@ class ComprehensiveTestRunner:
203
202
 
204
203
  for provider in self.providers:
205
204
  # Stress tests
206
- stress_suite = TestSuiteResult(
207
- suite_name="Stress Tests",
208
- provider=provider
209
- )
205
+ stress_suite = TestSuiteResult(suite_name="Stress Tests", provider=provider)
210
206
 
211
207
  try:
212
208
  stress_results = await self._execute_pytest_suite(
213
209
  "test_stress_concurrency.py",
214
210
  provider,
215
- timeout=1800 # 30 minutes
211
+ timeout=1800, # 30 minutes
216
212
  )
217
213
 
218
214
  stress_suite.results.extend(stress_results)
@@ -226,16 +222,13 @@ class ComprehensiveTestRunner:
226
222
  self._print_suite_summary(stress_suite)
227
223
 
228
224
  # Cleanup tests
229
- cleanup_suite = TestSuiteResult(
230
- suite_name="Cleanup Tests",
231
- provider=provider
232
- )
225
+ cleanup_suite = TestSuiteResult(suite_name="Cleanup Tests", provider=provider)
233
226
 
234
227
  try:
235
228
  cleanup_results = await self._execute_pytest_suite(
236
229
  "test_cleanup.py",
237
230
  provider,
238
- timeout=600 # 10 minutes
231
+ timeout=600, # 10 minutes
239
232
  )
240
233
 
241
234
  cleanup_suite.results.extend(cleanup_results)
@@ -249,22 +242,21 @@ class ComprehensiveTestRunner:
249
242
  self._print_suite_summary(cleanup_suite)
250
243
 
251
244
  async def _execute_pytest_suite(
252
- self,
253
- test_file: str,
254
- provider: str,
255
- timeout: int = 300
245
+ self, test_file: str, provider: str, timeout: int = 300
256
246
  ) -> List[TestResult]:
257
247
  """Execute a pytest suite and parse results"""
258
248
  test_path = Path(__file__).parent / test_file
259
249
 
260
250
  # Build pytest command
261
251
  cmd = [
262
- sys.executable, "-m", "pytest",
252
+ sys.executable,
253
+ "-m",
254
+ "pytest",
263
255
  str(test_path),
264
256
  "-v",
265
257
  "--tb=short",
266
258
  "--json-report",
267
- "--json-report-file=/tmp/pytest_report.json"
259
+ "--json-report-file=/tmp/pytest_report.json",
268
260
  ]
269
261
 
270
262
  if provider != "memory":
@@ -279,14 +271,11 @@ class ComprehensiveTestRunner:
279
271
  *cmd,
280
272
  stdout=asyncio.subprocess.PIPE,
281
273
  stderr=asyncio.subprocess.PIPE,
282
- cwd=Path(__file__).parent.parent.parent.parent
274
+ cwd=Path(__file__).parent.parent.parent.parent,
283
275
  )
284
276
 
285
277
  try:
286
- stdout, stderr = await asyncio.wait_for(
287
- process.communicate(),
288
- timeout=timeout
289
- )
278
+ stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
290
279
 
291
280
  # Parse JSON report
292
281
  try:
@@ -301,22 +290,26 @@ class ComprehensiveTestRunner:
301
290
 
302
291
  except asyncio.TimeoutError:
303
292
  process.kill()
304
- return [TestResult(
305
- test_name=f"{test_file}_timeout",
306
- status="ERROR",
307
- duration_ms=timeout * 1000,
308
- error_message=f"Test suite timed out after {timeout}s",
309
- provider=provider
310
- )]
293
+ return [
294
+ TestResult(
295
+ test_name=f"{test_file}_timeout",
296
+ status="ERROR",
297
+ duration_ms=timeout * 1000,
298
+ error_message=f"Test suite timed out after {timeout}s",
299
+ provider=provider,
300
+ )
301
+ ]
311
302
 
312
303
  except Exception as e:
313
- return [TestResult(
314
- test_name=f"{test_file}_execution_error",
315
- status="ERROR",
316
- duration_ms=0.0,
317
- error_message=str(e),
318
- provider=provider
319
- )]
304
+ return [
305
+ TestResult(
306
+ test_name=f"{test_file}_execution_error",
307
+ status="ERROR",
308
+ duration_ms=0.0,
309
+ error_message=str(e),
310
+ provider=provider,
311
+ )
312
+ ]
320
313
 
321
314
  def _parse_pytest_results(self, report_data: Dict, provider: str) -> List[TestResult]:
322
315
  """Parse pytest JSON report"""
@@ -327,7 +320,7 @@ class ComprehensiveTestRunner:
327
320
  test_name=test.get("nodeid", "unknown"),
328
321
  status=test.get("outcome", "UNKNOWN").upper(),
329
322
  duration_ms=test.get("duration", 0.0) * 1000,
330
- provider=provider
323
+ provider=provider,
331
324
  )
332
325
 
333
326
  if test.get("call", {}).get("longrepr"):
@@ -343,18 +336,26 @@ class ComprehensiveTestRunner:
343
336
  lines = stdout.split("\n")
344
337
 
345
338
  for line in lines:
346
- if "::" in line and any(status in line for status in ["PASSED", "FAILED", "SKIPPED", "ERROR"]):
339
+ if "::" in line and any(
340
+ status in line for status in ["PASSED", "FAILED", "SKIPPED", "ERROR"]
341
+ ):
347
342
  parts = line.split()
348
343
  if len(parts) >= 2:
349
344
  test_name = parts[0]
350
- status = parts[1] if parts[1] in ["PASSED", "FAILED", "SKIPPED", "ERROR"] else "UNKNOWN"
351
-
352
- results.append(TestResult(
353
- test_name=test_name,
354
- status=status,
355
- duration_ms=0.0, # Duration not available from stdout
356
- provider=provider
357
- ))
345
+ status = (
346
+ parts[1]
347
+ if parts[1] in ["PASSED", "FAILED", "SKIPPED", "ERROR"]
348
+ else "UNKNOWN"
349
+ )
350
+
351
+ results.append(
352
+ TestResult(
353
+ test_name=test_name,
354
+ status=status,
355
+ duration_ms=0.0, # Duration not available from stdout
356
+ provider=provider,
357
+ )
358
+ )
358
359
 
359
360
  return results
360
361
 
@@ -385,7 +386,9 @@ class ComprehensiveTestRunner:
385
386
  status_icon = "✅" if failed == 0 and errors == 0 else "❌"
386
387
 
387
388
  print(f" {status_icon} {suite_result.provider} - {suite_result.suite_name}")
388
- print(f" Total: {total}, Passed: {passed}, Failed: {failed}, Skipped: {skipped}, Errors: {errors}")
389
+ print(
390
+ f" Total: {total}, Passed: {passed}, Failed: {failed}, Skipped: {skipped}, Errors: {errors}"
391
+ )
389
392
  print(f" Duration: {duration:.2f}s")
390
393
 
391
394
  if failed > 0 or errors > 0:
@@ -401,18 +404,20 @@ class ComprehensiveTestRunner:
401
404
  try:
402
405
  # Run pytest with coverage
403
406
  cmd = [
404
- sys.executable, "-m", "pytest",
407
+ sys.executable,
408
+ "-m",
409
+ "pytest",
405
410
  str(Path(__file__).parent),
406
411
  "--cov=jaf.a2a.memory",
407
412
  "--cov-report=json:/tmp/coverage.json",
408
- "--cov-report=term"
413
+ "--cov-report=term",
409
414
  ]
410
415
 
411
416
  process = await asyncio.create_subprocess_exec(
412
417
  *cmd,
413
418
  stdout=asyncio.subprocess.PIPE,
414
419
  stderr=asyncio.subprocess.PIPE,
415
- cwd=Path(__file__).parent.parent.parent.parent
420
+ cwd=Path(__file__).parent.parent.parent.parent,
416
421
  )
417
422
 
418
423
  stdout, stderr = await process.communicate()
@@ -426,7 +431,7 @@ class ComprehensiveTestRunner:
426
431
  "coverage_percent": coverage_data.get("totals", {}).get("percent_covered", 0),
427
432
  "lines_covered": coverage_data.get("totals", {}).get("covered_lines", 0),
428
433
  "lines_missing": coverage_data.get("totals", {}).get("missing_lines", 0),
429
- "files": list(coverage_data.get("files", {}).keys())
434
+ "files": list(coverage_data.get("files", {}).keys()),
430
435
  }
431
436
 
432
437
  coverage_percent = self.report.coverage_report["coverage_percent"]
@@ -437,7 +442,9 @@ class ComprehensiveTestRunner:
437
442
  print(f"⚠️ Good coverage: {coverage_percent:.1f}%")
438
443
  else:
439
444
  print(f"❌ Low coverage: {coverage_percent:.1f}%")
440
- self.report.critical_issues.append(f"Code coverage below 85%: {coverage_percent:.1f}%")
445
+ self.report.critical_issues.append(
446
+ f"Code coverage below 85%: {coverage_percent:.1f}%"
447
+ )
441
448
 
442
449
  except FileNotFoundError:
443
450
  print("❌ Coverage report not generated")
@@ -462,7 +469,7 @@ class ComprehensiveTestRunner:
462
469
  id="benchmark_task",
463
470
  contextId="benchmark_ctx",
464
471
  kind="task",
465
- status=A2ATaskStatus(state=TaskState.SUBMITTED)
472
+ status=A2ATaskStatus(state=TaskState.SUBMITTED),
466
473
  )
467
474
 
468
475
  # Benchmark store operation
@@ -484,19 +491,27 @@ class ComprehensiveTestRunner:
484
491
  "store_ops_per_sec": 100 / (store_time / 1000),
485
492
  "get_ops_per_sec": 100 / (get_time / 1000),
486
493
  "avg_store_time_ms": store_time / 100,
487
- "avg_get_time_ms": get_time / 100
494
+ "avg_get_time_ms": get_time / 100,
488
495
  }
489
496
 
490
497
  metrics = self.report.performance_metrics
491
- print(f"✅ Store: {metrics['store_ops_per_sec']:.2f} ops/sec ({metrics['avg_store_time_ms']:.2f}ms avg)")
492
- print(f"✅ Get: {metrics['get_ops_per_sec']:.2f} ops/sec ({metrics['avg_get_time_ms']:.2f}ms avg)")
498
+ print(
499
+ f"✅ Store: {metrics['store_ops_per_sec']:.2f} ops/sec ({metrics['avg_store_time_ms']:.2f}ms avg)"
500
+ )
501
+ print(
502
+ f"✅ Get: {metrics['get_ops_per_sec']:.2f} ops/sec ({metrics['avg_get_time_ms']:.2f}ms avg)"
503
+ )
493
504
 
494
505
  # Performance thresholds
495
- if metrics['store_ops_per_sec'] < 100:
496
- self.report.critical_issues.append(f"Store performance too slow: {metrics['store_ops_per_sec']:.2f} ops/sec")
506
+ if metrics["store_ops_per_sec"] < 100:
507
+ self.report.critical_issues.append(
508
+ f"Store performance too slow: {metrics['store_ops_per_sec']:.2f} ops/sec"
509
+ )
497
510
 
498
- if metrics['get_ops_per_sec'] < 200:
499
- self.report.critical_issues.append(f"Get performance too slow: {metrics['get_ops_per_sec']:.2f} ops/sec")
511
+ if metrics["get_ops_per_sec"] < 200:
512
+ self.report.critical_issues.append(
513
+ f"Get performance too slow: {metrics['get_ops_per_sec']:.2f} ops/sec"
514
+ )
500
515
 
501
516
  except Exception as e:
502
517
  print(f"❌ Performance benchmarks failed: {e}")
@@ -526,7 +541,9 @@ class ComprehensiveTestRunner:
526
541
  provider_suites = [s for s in self.report.suite_results if s.provider == provider]
527
542
  provider_passed = sum(s.passed for s in provider_suites)
528
543
  provider_total = sum(s.total_tests for s in provider_suites)
529
- provider_compliance[provider] = (provider_passed / provider_total * 100) if provider_total > 0 else 0
544
+ provider_compliance[provider] = (
545
+ (provider_passed / provider_total * 100) if provider_total > 0 else 0
546
+ )
530
547
 
531
548
  # Recommendations
532
549
  if self.report.coverage_report and self.report.coverage_report["coverage_percent"] < 90:
@@ -537,14 +554,20 @@ class ComprehensiveTestRunner:
537
554
 
538
555
  if self.report.performance_metrics:
539
556
  metrics = self.report.performance_metrics
540
- if metrics.get('store_ops_per_sec', 0) < 500:
541
- self.report.recommendations.append("Optimize storage performance for production workloads")
557
+ if metrics.get("store_ops_per_sec", 0) < 500:
558
+ self.report.recommendations.append(
559
+ "Optimize storage performance for production workloads"
560
+ )
542
561
 
543
562
  # Final verdict
544
563
  if len(self.report.critical_issues) == 0 and success_rate >= 98:
545
- self.report.final_verdict = "✅ PRODUCTION READY - All tests pass with excellent coverage"
564
+ self.report.final_verdict = (
565
+ "✅ PRODUCTION READY - All tests pass with excellent coverage"
566
+ )
546
567
  elif len(self.report.critical_issues) <= 2 and success_rate >= 95:
547
- self.report.final_verdict = "⚠️ READY WITH MINOR ISSUES - Address recommendations before production"
568
+ self.report.final_verdict = (
569
+ "⚠️ READY WITH MINOR ISSUES - Address recommendations before production"
570
+ )
548
571
  else:
549
572
  self.report.final_verdict = "❌ NOT READY - Critical issues must be resolved"
550
573
 
@@ -576,10 +599,10 @@ class ComprehensiveTestRunner:
576
599
  "status": result.status,
577
600
  "duration_ms": result.duration_ms,
578
601
  "error_message": result.error_message,
579
- "provider": result.provider
602
+ "provider": result.provider,
580
603
  }
581
604
  for result in suite.results
582
- ]
605
+ ],
583
606
  }
584
607
  for suite in self.report.suite_results
585
608
  ],
@@ -587,7 +610,7 @@ class ComprehensiveTestRunner:
587
610
  "performance_metrics": self.report.performance_metrics,
588
611
  "critical_issues": self.report.critical_issues,
589
612
  "recommendations": self.report.recommendations,
590
- "final_verdict": self.report.final_verdict
613
+ "final_verdict": self.report.final_verdict,
591
614
  }
592
615
 
593
616
  with open(report_path, "w") as f:
@@ -619,7 +642,9 @@ class ComprehensiveTestRunner:
619
642
  f.write(f"- **Success Rate:** {success_rate:.1f}%\n")
620
643
 
621
644
  if self.report.coverage_report:
622
- f.write(f"- **Code Coverage:** {self.report.coverage_report['coverage_percent']:.1f}%\n")
645
+ f.write(
646
+ f"- **Code Coverage:** {self.report.coverage_report['coverage_percent']:.1f}%\n"
647
+ )
623
648
 
624
649
  f.write(f"\n**Final Verdict:** {self.report.final_verdict}\n\n")
625
650
 
@@ -675,7 +700,9 @@ class ComprehensiveTestRunner:
675
700
 
676
701
  if self.report.performance_metrics:
677
702
  metrics = self.report.performance_metrics
678
- print(f" Performance: Store {metrics['store_ops_per_sec']:.0f} ops/s, Get {metrics['get_ops_per_sec']:.0f} ops/s")
703
+ print(
704
+ f" Performance: Store {metrics['store_ops_per_sec']:.0f} ops/s, Get {metrics['get_ops_per_sec']:.0f} ops/s"
705
+ )
679
706
 
680
707
  print(f"\n🎯 FINAL VERDICT: {self.report.final_verdict}")
681
708
 
@@ -695,7 +722,9 @@ class ComprehensiveTestRunner:
695
722
  async def main():
696
723
  """Main entry point"""
697
724
  parser = argparse.ArgumentParser(description="A2A Memory Comprehensive Test Runner")
698
- parser.add_argument("--providers", default="memory", help="Providers to test (memory,redis,postgres)")
725
+ parser.add_argument(
726
+ "--providers", default="memory", help="Providers to test (memory,redis,postgres)"
727
+ )
699
728
  parser.add_argument("--coverage", action="store_true", help="Generate coverage report")
700
729
  parser.add_argument("--performance", action="store_true", help="Include performance benchmarks")
701
730
  parser.add_argument("--stress", action="store_true", help="Include stress tests")