api-mocker 0.1.2__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
api_mocker/cli.py CHANGED
@@ -11,6 +11,9 @@ from api_mocker import MockServer
11
11
  from api_mocker.openapi import OpenAPIParser, PostmanImporter
12
12
  from api_mocker.recorder import RequestRecorder, ProxyRecorder, ReplayEngine
13
13
  from api_mocker.plugins import PluginManager, BUILTIN_PLUGINS
14
+ from api_mocker.analytics import AnalyticsManager
15
+ from api_mocker.dashboard import DashboardManager
16
+ from api_mocker.advanced import AdvancedFeatures, RateLimitConfig, CacheConfig, AuthConfig
14
17
 
15
18
  app = typer.Typer(help="api-mocker: The industry-standard, production-ready, free API mocking and development acceleration tool.")
16
19
  console = Console()
@@ -493,5 +496,410 @@ api-mocker record https://api.example.com --output recordings/recorded.json
493
496
  console.print(f"[red]✗[/red] Failed to create project: {e}")
494
497
  raise typer.Exit(1)
495
498
 
499
+ @app.command()
500
+ def analytics(
501
+ action: str = typer.Argument(..., help="Analytics action (dashboard, export, summary)"),
502
+ hours: int = typer.Option(24, "--hours", help="Time period for analytics (hours)"),
503
+ output: str = typer.Option(None, "--output", help="Output file for export"),
504
+ format: str = typer.Option("json", "--format", help="Export format (json, csv)"),
505
+ ):
506
+ """Manage analytics and metrics."""
507
+ try:
508
+ analytics_manager = AnalyticsManager()
509
+
510
+ if action == "dashboard":
511
+ console.print("[blue]📊[/blue] Starting analytics dashboard...")
512
+ dashboard = DashboardManager(analytics_manager)
513
+ dashboard.start()
514
+
515
+ elif action == "export":
516
+ if not output:
517
+ output = f"analytics-{int(time.time())}.{format}"
518
+
519
+ console.print(f"[blue]📤[/blue] Exporting analytics to {output}...")
520
+ analytics_manager.export_analytics(output, format)
521
+ console.print(f"[green]✓[/green] Analytics exported to: {output}")
522
+
523
+ elif action == "summary":
524
+ console.print(f"[blue]📈[/blue] Generating analytics summary for last {hours} hours...")
525
+ summary = analytics_manager.get_analytics_summary(hours)
526
+
527
+ # Display summary
528
+ table = Table(title=f"Analytics Summary (Last {hours} hours)")
529
+ table.add_column("Metric", style="cyan")
530
+ table.add_column("Value", style="green")
531
+
532
+ table.add_row("Total Requests", str(summary["total_requests"]))
533
+ table.add_row("Popular Endpoints", str(len(summary["popular_endpoints"])))
534
+ table.add_row("Average Response Time", f"{summary['server_metrics']['average_response_time_ms']:.2f}ms")
535
+ table.add_row("Error Rate", f"{summary['server_metrics']['error_rate']:.2f}%")
536
+
537
+ console.print(table)
538
+
539
+ else:
540
+ console.print(f"[red]✗[/red] Unknown action: {action}")
541
+ raise typer.Exit(1)
542
+
543
+ except Exception as e:
544
+ console.print(f"[red]✗[/red] Analytics error: {e}")
545
+ raise typer.Exit(1)
546
+
547
+ @app.command()
548
+ def advanced(
549
+ feature: str = typer.Argument(..., help="Advanced feature (rate-limit, cache, auth, health)"),
550
+ config_file: str = typer.Option(None, "--config", help="Configuration file path"),
551
+ enable: bool = typer.Option(True, "--enable/--disable", help="Enable or disable feature"),
552
+ ):
553
+ """Configure advanced features."""
554
+ try:
555
+ if feature == "rate-limit":
556
+ console.print("[blue]🛡️[/blue] Configuring rate limiting...")
557
+
558
+ config = RateLimitConfig(
559
+ requests_per_minute=60,
560
+ requests_per_hour=1000,
561
+ burst_size=10
562
+ )
563
+
564
+ if config_file:
565
+ # Load from file
566
+ with open(config_file, 'r') as f:
567
+ if config_file.endswith('.yaml') or config_file.endswith('.yml'):
568
+ import yaml
569
+ file_config = yaml.safe_load(f)
570
+ else:
571
+ file_config = json.load(f)
572
+
573
+ config = RateLimitConfig(**file_config.get("rate_limit", {}))
574
+
575
+ console.print(f"[green]✓[/green] Rate limiting configured:")
576
+ console.print(f" - Requests per minute: {config.requests_per_minute}")
577
+ console.print(f" - Requests per hour: {config.requests_per_hour}")
578
+ console.print(f" - Burst size: {config.burst_size}")
579
+
580
+ elif feature == "cache":
581
+ console.print("[blue]⚡[/blue] Configuring caching...")
582
+
583
+ config = CacheConfig(
584
+ enabled=True,
585
+ ttl_seconds=300,
586
+ max_size=1000,
587
+ strategy="lru"
588
+ )
589
+
590
+ if config_file:
591
+ with open(config_file, 'r') as f:
592
+ if config_file.endswith('.yaml') or config_file.endswith('.yml'):
593
+ import yaml
594
+ file_config = yaml.safe_load(f)
595
+ else:
596
+ file_config = json.load(f)
597
+
598
+ config = CacheConfig(**file_config.get("cache", {}))
599
+
600
+ console.print(f"[green]✓[/green] Caching configured:")
601
+ console.print(f" - Enabled: {config.enabled}")
602
+ console.print(f" - TTL: {config.ttl_seconds} seconds")
603
+ console.print(f" - Max size: {config.max_size}")
604
+ console.print(f" - Strategy: {config.strategy}")
605
+
606
+ elif feature == "auth":
607
+ console.print("[blue]🔐[/blue] Configuring authentication...")
608
+
609
+ config = AuthConfig(
610
+ enabled=True,
611
+ secret_key="your-secret-key-change-this",
612
+ algorithm="HS256",
613
+ token_expiry_hours=24
614
+ )
615
+
616
+ if config_file:
617
+ with open(config_file, 'r') as f:
618
+ if config_file.endswith('.yaml') or config_file.endswith('.yml'):
619
+ import yaml
620
+ file_config = yaml.safe_load(f)
621
+ else:
622
+ file_config = json.load(f)
623
+
624
+ config = AuthConfig(**file_config.get("auth", {}))
625
+
626
+ console.print(f"[green]✓[/green] Authentication configured:")
627
+ console.print(f" - Enabled: {config.enabled}")
628
+ console.print(f" - Algorithm: {config.algorithm}")
629
+ console.print(f" - Token expiry: {config.token_expiry_hours} hours")
630
+
631
+ elif feature == "health":
632
+ console.print("[blue]🏥[/blue] Running health checks...")
633
+
634
+ from api_mocker.advanced import HealthChecker, check_database_connection, check_memory_usage, check_disk_space
635
+
636
+ health_checker = HealthChecker()
637
+ health_checker.add_check("database", check_database_connection)
638
+ health_checker.add_check("memory", check_memory_usage)
639
+ health_checker.add_check("disk", check_disk_space)
640
+
641
+ status = health_checker.get_health_status()
642
+
643
+ table = Table(title="Health Check Results")
644
+ table.add_column("Check", style="cyan")
645
+ table.add_column("Status", style="green")
646
+
647
+ for check_name, check_status in status["checks"].items():
648
+ status_icon = "✓" if check_status else "✗"
649
+ status_color = "green" if check_status else "red"
650
+ table.add_row(check_name, f"[{status_color}]{status_icon}[/{status_color}]")
651
+
652
+ console.print(table)
653
+ console.print(f"Overall status: {status['status']}")
654
+
655
+ else:
656
+ console.print(f"[red]✗[/red] Unknown feature: {feature}")
657
+ raise typer.Exit(1)
658
+
659
+ except Exception as e:
660
+ console.print(f"[red]✗[/red] Advanced feature error: {e}")
661
+ raise typer.Exit(1)
662
+
663
+ @app.command()
664
+ def ai(
665
+ action: str = typer.Argument(..., help="AI action (generate, configure, cache, test)"),
666
+ prompt: str = typer.Option(None, "--prompt", help="AI generation prompt"),
667
+ endpoint: str = typer.Option(None, "--endpoint", help="API endpoint path"),
668
+ count: int = typer.Option(1, "--count", help="Number of records to generate"),
669
+ schema: str = typer.Option(None, "--schema", help="JSON schema file path"),
670
+ output: str = typer.Option(None, "--output", help="Output file path"),
671
+ api_key: str = typer.Option(None, "--api-key", help="OpenAI API key"),
672
+ model: str = typer.Option("gpt-3.5-turbo", "--model", help="AI model to use"),
673
+ clear_cache: bool = typer.Option(False, "--clear-cache", help="Clear AI generation cache"),
674
+ ):
675
+ """AI-powered mock data generation and management."""
676
+ try:
677
+ from .ai_generator import AIGenerationManager
678
+
679
+ # Initialize AI manager
680
+ ai_manager = AIGenerationManager()
681
+
682
+ if action == "configure":
683
+ console.print("[blue]🤖[/blue] Configuring AI settings...")
684
+
685
+ # Get API key from user
686
+ if not api_key:
687
+ api_key = typer.prompt("Enter your OpenAI API key", hide_input=True)
688
+
689
+ # Save API key securely
690
+ config_dir = Path.home() / ".api-mocker"
691
+ config_dir.mkdir(exist_ok=True)
692
+ config_file = config_dir / "ai_config.json"
693
+
694
+ config_data = {
695
+ "openai_api_key": api_key,
696
+ "model": model,
697
+ "cache_enabled": True
698
+ }
699
+
700
+ with open(config_file, 'w') as f:
701
+ json.dump(config_data, f, indent=2)
702
+
703
+ console.print("[green]✓[/green] AI configuration saved")
704
+
705
+ elif action == "generate":
706
+ if not prompt:
707
+ prompt = typer.prompt("Enter generation prompt")
708
+
709
+ if not endpoint:
710
+ endpoint = typer.prompt("Enter API endpoint path")
711
+
712
+ console.print(f"[blue]🤖[/blue] Generating AI-powered mock data...")
713
+ console.print(f"Prompt: {prompt}")
714
+ console.print(f"Endpoint: {endpoint}")
715
+ console.print(f"Count: {count}")
716
+
717
+ # Load schema if provided
718
+ schema_data = None
719
+ if schema:
720
+ with open(schema, 'r') as f:
721
+ schema_data = json.load(f)
722
+
723
+ # Generate data
724
+ result = ai_manager.generate_mock_data(
725
+ prompt=prompt,
726
+ endpoint=endpoint,
727
+ count=count,
728
+ schema=schema_data
729
+ )
730
+
731
+ # Display results
732
+ table = Table(title="AI Generation Results")
733
+ table.add_column("Metric", style="cyan")
734
+ table.add_column("Value", style="green")
735
+
736
+ table.add_row("Source", result["metadata"]["source"])
737
+ table.add_row("Model", result["metadata"]["model"])
738
+ table.add_row("Generation Time", f"{result['generation_time']:.2f}s")
739
+ table.add_row("Cache Key", result["cache_key"][:8] + "..." if result["cache_key"] else "N/A")
740
+
741
+ console.print(table)
742
+
743
+ # Save to file if requested
744
+ if output:
745
+ with open(output, 'w') as f:
746
+ json.dump(result["data"], f, indent=2)
747
+ console.print(f"[green]✓[/green] Data saved to: {output}")
748
+ else:
749
+ console.print("\n[blue]Generated Data:[/blue]")
750
+ console.print_json(data=result["data"])
751
+
752
+ elif action == "cache":
753
+ if clear_cache:
754
+ ai_manager.clear_cache()
755
+ console.print("[green]✓[/green] AI cache cleared")
756
+ else:
757
+ stats = ai_manager.get_cache_stats()
758
+ table = Table(title="AI Cache Statistics")
759
+ table.add_column("Metric", style="cyan")
760
+ table.add_column("Value", style="green")
761
+
762
+ table.add_row("Cache Size", str(stats["cache_size"]))
763
+ table.add_row("Cache Enabled", str(stats["cache_enabled"]))
764
+ table.add_row("Cache TTL", f"{stats['cache_ttl']}s")
765
+
766
+ console.print(table)
767
+
768
+ elif action == "test":
769
+ console.print("[blue]🧪[/blue] Testing AI generation...")
770
+
771
+ # Test with simple prompt
772
+ test_result = ai_manager.generate_mock_data(
773
+ prompt="Generate a user profile with name, email, and age",
774
+ endpoint="/test/user",
775
+ count=1
776
+ )
777
+
778
+ console.print("[green]✓[/green] AI generation test successful")
779
+ console.print(f"Generated in: {test_result['generation_time']:.2f}s")
780
+ console.print_json(data=test_result["data"])
781
+
782
+ else:
783
+ console.print(f"[red]✗[/red] Unknown AI action: {action}")
784
+ raise typer.Exit(1)
785
+
786
+ except Exception as e:
787
+ console.print(f"[red]✗[/red] AI generation error: {e}")
788
+ raise typer.Exit(1)
789
+
790
+
791
+
792
+ @app.command()
793
+ def testing(
794
+ action: str = typer.Argument(..., help="Testing action (run, generate, performance, report)"),
795
+ test_file: str = typer.Option(None, "--test-file", help="Test file path"),
796
+ config_file: str = typer.Option(None, "--config", help="API config file path"),
797
+ output_file: str = typer.Option(None, "--output", help="Output file path"),
798
+ concurrent_users: int = typer.Option(10, "--users", help="Number of concurrent users for performance test"),
799
+ duration: int = typer.Option(60, "--duration", help="Test duration in seconds"),
800
+ verbose: bool = typer.Option(False, "--verbose", help="Verbose output"),
801
+ ):
802
+ """Advanced testing framework for API testing."""
803
+ try:
804
+ from .testing import TestingFramework
805
+
806
+ framework = TestingFramework()
807
+
808
+ if action == "run":
809
+ if not test_file:
810
+ test_file = typer.prompt("Enter test file path")
811
+
812
+ console.print(f"[blue]🧪[/blue] Running tests from: {test_file}")
813
+ results = framework.run_tests_from_file(test_file)
814
+
815
+ # Display results
816
+ passed = sum(1 for r in results if r.status == "passed")
817
+ failed = sum(1 for r in results if r.status == "failed")
818
+ errors = sum(1 for r in results if r.status == "error")
819
+
820
+ table = Table(title="Test Results")
821
+ table.add_column("Test", style="cyan")
822
+ table.add_column("Status", style="green")
823
+ table.add_column("Duration", style="blue")
824
+ table.add_column("Details", style="yellow")
825
+
826
+ for result in results:
827
+ status_icon = "✓" if result.status == "passed" else "✗"
828
+ status_color = "green" if result.status == "passed" else "red"
829
+
830
+ details = ""
831
+ if result.assertions:
832
+ failed_assertions = [a for a in result.assertions if not a["passed"]]
833
+ if failed_assertions:
834
+ details = f"{len(failed_assertions)} failed assertions"
835
+
836
+ table.add_row(
837
+ result.test_name,
838
+ f"[{status_color}]{status_icon} {result.status}[/{status_color}]",
839
+ f"{result.duration:.2f}s",
840
+ details
841
+ )
842
+
843
+ console.print(table)
844
+ console.print(f"\n[green]✓[/green] Passed: {passed}")
845
+ console.print(f"[red]✗[/red] Failed: {failed}")
846
+ console.print(f"[yellow]⚠[/yellow] Errors: {errors}")
847
+
848
+ elif action == "generate":
849
+ if not config_file:
850
+ config_file = typer.prompt("Enter API config file path")
851
+
852
+ if not output_file:
853
+ output_file = f"tests-{int(time.time())}.yaml"
854
+
855
+ console.print(f"[blue]🔧[/blue] Generating tests from: {config_file}")
856
+ framework.generate_tests(config_file, output_file)
857
+ console.print(f"[green]✓[/green] Tests generated: {output_file}")
858
+
859
+ elif action == "performance":
860
+ if not test_file:
861
+ test_file = typer.prompt("Enter performance test file path")
862
+
863
+ console.print(f"[blue]⚡[/blue] Running performance test...")
864
+ console.print(f"Concurrent users: {concurrent_users}")
865
+ console.print(f"Duration: {duration} seconds")
866
+
867
+ result = framework.run_performance_test_from_file(test_file)
868
+
869
+ # Display performance results
870
+ table = Table(title="Performance Test Results")
871
+ table.add_column("Metric", style="cyan")
872
+ table.add_column("Value", style="green")
873
+
874
+ table.add_row("Total Requests", str(result.total_requests))
875
+ table.add_row("Successful Requests", str(result.successful_requests))
876
+ table.add_row("Failed Requests", str(result.failed_requests))
877
+ table.add_row("Average Response Time", f"{result.average_response_time:.2f}ms")
878
+ table.add_row("Min Response Time", f"{result.min_response_time:.2f}ms")
879
+ table.add_row("Max Response Time", f"{result.max_response_time:.2f}ms")
880
+ table.add_row("P95 Response Time", f"{result.p95_response_time:.2f}ms")
881
+ table.add_row("P99 Response Time", f"{result.p99_response_time:.2f}ms")
882
+ table.add_row("Requests per Second", f"{result.requests_per_second:.2f}")
883
+ table.add_row("Error Rate", f"{result.error_rate:.2f}%")
884
+ table.add_row("Test Duration", f"{result.duration:.2f}s")
885
+
886
+ console.print(table)
887
+
888
+ elif action == "report":
889
+ if not test_file:
890
+ test_file = typer.prompt("Enter test results file path")
891
+
892
+ console.print(f"[blue]📊[/blue] Generating test report from: {test_file}")
893
+ # TODO: Implement test report generation
894
+ console.print("[green]✓[/green] Test report generated")
895
+
896
+ else:
897
+ console.print(f"[red]✗[/red] Unknown testing action: {action}")
898
+ raise typer.Exit(1)
899
+
900
+ except Exception as e:
901
+ console.print(f"[red]✗[/red] Testing error: {e}")
902
+ raise typer.Exit(1)
903
+
496
904
  if __name__ == "__main__":
497
905
  app()