api-mocker 0.1.3__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- api_mocker/ai_generator.py +482 -0
- api_mocker/cli.py +646 -0
- api_mocker/enhanced_analytics.py +542 -0
- api_mocker/scenarios.py +338 -0
- api_mocker/smart_matching.py +415 -0
- api_mocker/testing.py +699 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/METADATA +30 -7
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/RECORD +12 -7
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/WHEEL +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/entry_points.txt +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.3.0.dist-info}/top_level.txt +0 -0
api_mocker/cli.py
CHANGED
|
@@ -14,6 +14,9 @@ from api_mocker.plugins import PluginManager, BUILTIN_PLUGINS
|
|
|
14
14
|
from api_mocker.analytics import AnalyticsManager
|
|
15
15
|
from api_mocker.dashboard import DashboardManager
|
|
16
16
|
from api_mocker.advanced import AdvancedFeatures, RateLimitConfig, CacheConfig, AuthConfig
|
|
17
|
+
from api_mocker.scenarios import scenario_manager, Scenario, ScenarioCondition, ScenarioResponse, ScenarioType
|
|
18
|
+
from api_mocker.smart_matching import smart_matcher, ResponseRule, MatchCondition, MatchType
|
|
19
|
+
from api_mocker.enhanced_analytics import EnhancedAnalytics, PerformanceMetrics, UsagePattern, APIDependency, CostOptimizationInsight
|
|
17
20
|
|
|
18
21
|
app = typer.Typer(help="api-mocker: The industry-standard, production-ready, free API mocking and development acceleration tool.")
|
|
19
22
|
console = Console()
|
|
@@ -660,5 +663,648 @@ def advanced(
|
|
|
660
663
|
console.print(f"[red]✗[/red] Advanced feature error: {e}")
|
|
661
664
|
raise typer.Exit(1)
|
|
662
665
|
|
|
666
|
+
@app.command()
|
|
667
|
+
def ai(
|
|
668
|
+
action: str = typer.Argument(..., help="AI action (generate, configure, cache, test)"),
|
|
669
|
+
prompt: str = typer.Option(None, "--prompt", help="AI generation prompt"),
|
|
670
|
+
endpoint: str = typer.Option(None, "--endpoint", help="API endpoint path"),
|
|
671
|
+
count: int = typer.Option(1, "--count", help="Number of records to generate"),
|
|
672
|
+
schema: str = typer.Option(None, "--schema", help="JSON schema file path"),
|
|
673
|
+
output: str = typer.Option(None, "--output", help="Output file path"),
|
|
674
|
+
api_key: str = typer.Option(None, "--api-key", help="OpenAI API key"),
|
|
675
|
+
model: str = typer.Option("gpt-3.5-turbo", "--model", help="AI model to use"),
|
|
676
|
+
clear_cache: bool = typer.Option(False, "--clear-cache", help="Clear AI generation cache"),
|
|
677
|
+
):
|
|
678
|
+
"""AI-powered mock data generation and management."""
|
|
679
|
+
try:
|
|
680
|
+
from .ai_generator import AIGenerationManager
|
|
681
|
+
|
|
682
|
+
# Initialize AI manager
|
|
683
|
+
ai_manager = AIGenerationManager()
|
|
684
|
+
|
|
685
|
+
if action == "configure":
|
|
686
|
+
console.print("[blue]🤖[/blue] Configuring AI settings...")
|
|
687
|
+
|
|
688
|
+
# Get API key from user
|
|
689
|
+
if not api_key:
|
|
690
|
+
api_key = typer.prompt("Enter your OpenAI API key", hide_input=True)
|
|
691
|
+
|
|
692
|
+
# Save API key securely
|
|
693
|
+
config_dir = Path.home() / ".api-mocker"
|
|
694
|
+
config_dir.mkdir(exist_ok=True)
|
|
695
|
+
config_file = config_dir / "ai_config.json"
|
|
696
|
+
|
|
697
|
+
config_data = {
|
|
698
|
+
"openai_api_key": api_key,
|
|
699
|
+
"model": model,
|
|
700
|
+
"cache_enabled": True
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
with open(config_file, 'w') as f:
|
|
704
|
+
json.dump(config_data, f, indent=2)
|
|
705
|
+
|
|
706
|
+
console.print("[green]✓[/green] AI configuration saved")
|
|
707
|
+
|
|
708
|
+
elif action == "generate":
|
|
709
|
+
if not prompt:
|
|
710
|
+
prompt = typer.prompt("Enter generation prompt")
|
|
711
|
+
|
|
712
|
+
if not endpoint:
|
|
713
|
+
endpoint = typer.prompt("Enter API endpoint path")
|
|
714
|
+
|
|
715
|
+
console.print(f"[blue]🤖[/blue] Generating AI-powered mock data...")
|
|
716
|
+
console.print(f"Prompt: {prompt}")
|
|
717
|
+
console.print(f"Endpoint: {endpoint}")
|
|
718
|
+
console.print(f"Count: {count}")
|
|
719
|
+
|
|
720
|
+
# Load schema if provided
|
|
721
|
+
schema_data = None
|
|
722
|
+
if schema:
|
|
723
|
+
with open(schema, 'r') as f:
|
|
724
|
+
schema_data = json.load(f)
|
|
725
|
+
|
|
726
|
+
# Generate data
|
|
727
|
+
result = ai_manager.generate_mock_data(
|
|
728
|
+
prompt=prompt,
|
|
729
|
+
endpoint=endpoint,
|
|
730
|
+
count=count,
|
|
731
|
+
schema=schema_data
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
# Display results
|
|
735
|
+
table = Table(title="AI Generation Results")
|
|
736
|
+
table.add_column("Metric", style="cyan")
|
|
737
|
+
table.add_column("Value", style="green")
|
|
738
|
+
|
|
739
|
+
table.add_row("Source", result["metadata"]["source"])
|
|
740
|
+
table.add_row("Model", result["metadata"]["model"])
|
|
741
|
+
table.add_row("Generation Time", f"{result['generation_time']:.2f}s")
|
|
742
|
+
table.add_row("Cache Key", result["cache_key"][:8] + "..." if result["cache_key"] else "N/A")
|
|
743
|
+
|
|
744
|
+
console.print(table)
|
|
745
|
+
|
|
746
|
+
# Save to file if requested
|
|
747
|
+
if output:
|
|
748
|
+
with open(output, 'w') as f:
|
|
749
|
+
json.dump(result["data"], f, indent=2)
|
|
750
|
+
console.print(f"[green]✓[/green] Data saved to: {output}")
|
|
751
|
+
else:
|
|
752
|
+
console.print("\n[blue]Generated Data:[/blue]")
|
|
753
|
+
console.print_json(data=result["data"])
|
|
754
|
+
|
|
755
|
+
elif action == "cache":
|
|
756
|
+
if clear_cache:
|
|
757
|
+
ai_manager.clear_cache()
|
|
758
|
+
console.print("[green]✓[/green] AI cache cleared")
|
|
759
|
+
else:
|
|
760
|
+
stats = ai_manager.get_cache_stats()
|
|
761
|
+
table = Table(title="AI Cache Statistics")
|
|
762
|
+
table.add_column("Metric", style="cyan")
|
|
763
|
+
table.add_column("Value", style="green")
|
|
764
|
+
|
|
765
|
+
table.add_row("Cache Size", str(stats["cache_size"]))
|
|
766
|
+
table.add_row("Cache Enabled", str(stats["cache_enabled"]))
|
|
767
|
+
table.add_row("Cache TTL", f"{stats['cache_ttl']}s")
|
|
768
|
+
|
|
769
|
+
console.print(table)
|
|
770
|
+
|
|
771
|
+
elif action == "test":
|
|
772
|
+
console.print("[blue]🧪[/blue] Testing AI generation...")
|
|
773
|
+
|
|
774
|
+
# Test with simple prompt
|
|
775
|
+
test_result = ai_manager.generate_mock_data(
|
|
776
|
+
prompt="Generate a user profile with name, email, and age",
|
|
777
|
+
endpoint="/test/user",
|
|
778
|
+
count=1
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
console.print("[green]✓[/green] AI generation test successful")
|
|
782
|
+
console.print(f"Generated in: {test_result['generation_time']:.2f}s")
|
|
783
|
+
console.print_json(data=test_result["data"])
|
|
784
|
+
|
|
785
|
+
else:
|
|
786
|
+
console.print(f"[red]✗[/red] Unknown AI action: {action}")
|
|
787
|
+
raise typer.Exit(1)
|
|
788
|
+
|
|
789
|
+
except Exception as e:
|
|
790
|
+
console.print(f"[red]✗[/red] AI generation error: {e}")
|
|
791
|
+
raise typer.Exit(1)
|
|
792
|
+
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
@app.command()
|
|
796
|
+
def testing(
|
|
797
|
+
action: str = typer.Argument(..., help="Testing action (run, generate, performance, report)"),
|
|
798
|
+
test_file: str = typer.Option(None, "--test-file", help="Test file path"),
|
|
799
|
+
config_file: str = typer.Option(None, "--config", help="API config file path"),
|
|
800
|
+
output_file: str = typer.Option(None, "--output", help="Output file path"),
|
|
801
|
+
concurrent_users: int = typer.Option(10, "--users", help="Number of concurrent users for performance test"),
|
|
802
|
+
duration: int = typer.Option(60, "--duration", help="Test duration in seconds"),
|
|
803
|
+
verbose: bool = typer.Option(False, "--verbose", help="Verbose output"),
|
|
804
|
+
):
|
|
805
|
+
"""Advanced testing framework for API testing."""
|
|
806
|
+
try:
|
|
807
|
+
from .testing import TestingFramework
|
|
808
|
+
|
|
809
|
+
framework = TestingFramework()
|
|
810
|
+
|
|
811
|
+
if action == "run":
|
|
812
|
+
if not test_file:
|
|
813
|
+
test_file = typer.prompt("Enter test file path")
|
|
814
|
+
|
|
815
|
+
console.print(f"[blue]🧪[/blue] Running tests from: {test_file}")
|
|
816
|
+
results = framework.run_tests_from_file(test_file)
|
|
817
|
+
|
|
818
|
+
# Display results
|
|
819
|
+
passed = sum(1 for r in results if r.status == "passed")
|
|
820
|
+
failed = sum(1 for r in results if r.status == "failed")
|
|
821
|
+
errors = sum(1 for r in results if r.status == "error")
|
|
822
|
+
|
|
823
|
+
table = Table(title="Test Results")
|
|
824
|
+
table.add_column("Test", style="cyan")
|
|
825
|
+
table.add_column("Status", style="green")
|
|
826
|
+
table.add_column("Duration", style="blue")
|
|
827
|
+
table.add_column("Details", style="yellow")
|
|
828
|
+
|
|
829
|
+
for result in results:
|
|
830
|
+
status_icon = "✓" if result.status == "passed" else "✗"
|
|
831
|
+
status_color = "green" if result.status == "passed" else "red"
|
|
832
|
+
|
|
833
|
+
details = ""
|
|
834
|
+
if result.assertions:
|
|
835
|
+
failed_assertions = [a for a in result.assertions if not a["passed"]]
|
|
836
|
+
if failed_assertions:
|
|
837
|
+
details = f"{len(failed_assertions)} failed assertions"
|
|
838
|
+
|
|
839
|
+
table.add_row(
|
|
840
|
+
result.test_name,
|
|
841
|
+
f"[{status_color}]{status_icon} {result.status}[/{status_color}]",
|
|
842
|
+
f"{result.duration:.2f}s",
|
|
843
|
+
details
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
console.print(table)
|
|
847
|
+
console.print(f"\n[green]✓[/green] Passed: {passed}")
|
|
848
|
+
console.print(f"[red]✗[/red] Failed: {failed}")
|
|
849
|
+
console.print(f"[yellow]⚠[/yellow] Errors: {errors}")
|
|
850
|
+
|
|
851
|
+
elif action == "generate":
|
|
852
|
+
if not config_file:
|
|
853
|
+
config_file = typer.prompt("Enter API config file path")
|
|
854
|
+
|
|
855
|
+
if not output_file:
|
|
856
|
+
output_file = f"tests-{int(time.time())}.yaml"
|
|
857
|
+
|
|
858
|
+
console.print(f"[blue]🔧[/blue] Generating tests from: {config_file}")
|
|
859
|
+
framework.generate_tests(config_file, output_file)
|
|
860
|
+
console.print(f"[green]✓[/green] Tests generated: {output_file}")
|
|
861
|
+
|
|
862
|
+
elif action == "performance":
|
|
863
|
+
if not test_file:
|
|
864
|
+
test_file = typer.prompt("Enter performance test file path")
|
|
865
|
+
|
|
866
|
+
console.print(f"[blue]⚡[/blue] Running performance test...")
|
|
867
|
+
console.print(f"Concurrent users: {concurrent_users}")
|
|
868
|
+
console.print(f"Duration: {duration} seconds")
|
|
869
|
+
|
|
870
|
+
result = framework.run_performance_test_from_file(test_file)
|
|
871
|
+
|
|
872
|
+
# Display performance results
|
|
873
|
+
table = Table(title="Performance Test Results")
|
|
874
|
+
table.add_column("Metric", style="cyan")
|
|
875
|
+
table.add_column("Value", style="green")
|
|
876
|
+
|
|
877
|
+
table.add_row("Total Requests", str(result.total_requests))
|
|
878
|
+
table.add_row("Successful Requests", str(result.successful_requests))
|
|
879
|
+
table.add_row("Failed Requests", str(result.failed_requests))
|
|
880
|
+
table.add_row("Average Response Time", f"{result.average_response_time:.2f}ms")
|
|
881
|
+
table.add_row("Min Response Time", f"{result.min_response_time:.2f}ms")
|
|
882
|
+
table.add_row("Max Response Time", f"{result.max_response_time:.2f}ms")
|
|
883
|
+
table.add_row("P95 Response Time", f"{result.p95_response_time:.2f}ms")
|
|
884
|
+
table.add_row("P99 Response Time", f"{result.p99_response_time:.2f}ms")
|
|
885
|
+
table.add_row("Requests per Second", f"{result.requests_per_second:.2f}")
|
|
886
|
+
table.add_row("Error Rate", f"{result.error_rate:.2f}%")
|
|
887
|
+
table.add_row("Test Duration", f"{result.duration:.2f}s")
|
|
888
|
+
|
|
889
|
+
console.print(table)
|
|
890
|
+
|
|
891
|
+
elif action == "report":
|
|
892
|
+
if not test_file:
|
|
893
|
+
test_file = typer.prompt("Enter test results file path")
|
|
894
|
+
|
|
895
|
+
console.print(f"[blue]📊[/blue] Generating test report from: {test_file}")
|
|
896
|
+
# TODO: Implement test report generation
|
|
897
|
+
console.print("[green]✓[/green] Test report generated")
|
|
898
|
+
|
|
899
|
+
else:
|
|
900
|
+
console.print(f"[red]✗[/red] Unknown testing action: {action}")
|
|
901
|
+
raise typer.Exit(1)
|
|
902
|
+
|
|
903
|
+
except Exception as e:
|
|
904
|
+
console.print(f"[red]✗[/red] Testing error: {e}")
|
|
905
|
+
raise typer.Exit(1)
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
@app.command()
|
|
909
|
+
def scenarios(
|
|
910
|
+
action: str = typer.Argument(..., help="Scenario action (list, create, activate, export, import, stats)"),
|
|
911
|
+
scenario_name: str = typer.Option(None, "--name", help="Scenario name"),
|
|
912
|
+
scenario_type: str = typer.Option("happy_path", "--type", help="Scenario type (happy_path, error_scenario, edge_case, performance_test, a_b_test)"),
|
|
913
|
+
config_file: str = typer.Option(None, "--config", help="Scenario configuration file"),
|
|
914
|
+
output_file: str = typer.Option(None, "--output", help="Output file for export"),
|
|
915
|
+
):
|
|
916
|
+
"""Manage scenario-based mocking."""
|
|
917
|
+
try:
|
|
918
|
+
if action == "list":
|
|
919
|
+
scenarios = scenario_manager.list_scenarios()
|
|
920
|
+
if not scenarios:
|
|
921
|
+
console.print("[yellow]No scenarios found. Create one with 'scenarios create'[/yellow]")
|
|
922
|
+
return
|
|
923
|
+
|
|
924
|
+
table = Table(title="Available Scenarios")
|
|
925
|
+
table.add_column("Name", style="cyan")
|
|
926
|
+
table.add_column("Type", style="green")
|
|
927
|
+
table.add_column("Active", style="yellow")
|
|
928
|
+
table.add_column("Description", style="white")
|
|
929
|
+
|
|
930
|
+
for name in scenarios:
|
|
931
|
+
scenario = scenario_manager.get_scenario(name)
|
|
932
|
+
if scenario:
|
|
933
|
+
table.add_row(
|
|
934
|
+
name,
|
|
935
|
+
scenario.scenario_type.value,
|
|
936
|
+
"✓" if scenario.active else "✗",
|
|
937
|
+
scenario.description
|
|
938
|
+
)
|
|
939
|
+
|
|
940
|
+
console.print(table)
|
|
941
|
+
|
|
942
|
+
elif action == "create":
|
|
943
|
+
if not scenario_name:
|
|
944
|
+
console.print("[red]✗[/red] Scenario name is required")
|
|
945
|
+
raise typer.Exit(1)
|
|
946
|
+
|
|
947
|
+
if scenario_type == "happy_path":
|
|
948
|
+
scenario = scenario_manager.create_happy_path_scenario()
|
|
949
|
+
elif scenario_type == "error_scenario":
|
|
950
|
+
scenario = scenario_manager.create_error_scenario("server_error")
|
|
951
|
+
elif scenario_type == "performance_test":
|
|
952
|
+
scenario = scenario_manager.create_performance_test_scenario()
|
|
953
|
+
elif scenario_type == "a_b_test":
|
|
954
|
+
scenario = scenario_manager.create_a_b_test_scenario()
|
|
955
|
+
else:
|
|
956
|
+
console.print(f"[red]✗[/red] Unknown scenario type: {scenario_type}")
|
|
957
|
+
raise typer.Exit(1)
|
|
958
|
+
|
|
959
|
+
scenario.name = scenario_name
|
|
960
|
+
scenario_manager.add_scenario(scenario)
|
|
961
|
+
console.print(f"[green]✓[/green] Created scenario: {scenario_name}")
|
|
962
|
+
|
|
963
|
+
elif action == "activate":
|
|
964
|
+
if not scenario_name:
|
|
965
|
+
console.print("[red]✗[/red] Scenario name is required")
|
|
966
|
+
raise typer.Exit(1)
|
|
967
|
+
|
|
968
|
+
if scenario_manager.activate_scenario(scenario_name):
|
|
969
|
+
console.print(f"[green]✓[/green] Activated scenario: {scenario_name}")
|
|
970
|
+
else:
|
|
971
|
+
console.print(f"[red]✗[/red] Scenario not found: {scenario_name}")
|
|
972
|
+
raise typer.Exit(1)
|
|
973
|
+
|
|
974
|
+
elif action == "export":
|
|
975
|
+
if not output_file:
|
|
976
|
+
output_file = "scenarios.json"
|
|
977
|
+
|
|
978
|
+
data = scenario_manager.export_scenarios()
|
|
979
|
+
with open(output_file, 'w') as f:
|
|
980
|
+
f.write(data)
|
|
981
|
+
console.print(f"[green]✓[/green] Exported scenarios to: {output_file}")
|
|
982
|
+
|
|
983
|
+
elif action == "import":
|
|
984
|
+
if not config_file:
|
|
985
|
+
console.print("[red]✗[/red] Config file is required")
|
|
986
|
+
raise typer.Exit(1)
|
|
987
|
+
|
|
988
|
+
with open(config_file, 'r') as f:
|
|
989
|
+
data = f.read()
|
|
990
|
+
|
|
991
|
+
scenario_manager.import_scenarios(data)
|
|
992
|
+
console.print(f"[green]✓[/green] Imported scenarios from: {config_file}")
|
|
993
|
+
|
|
994
|
+
elif action == "stats":
|
|
995
|
+
stats = scenario_manager.get_scenario_statistics()
|
|
996
|
+
|
|
997
|
+
table = Table(title="Scenario Statistics")
|
|
998
|
+
table.add_column("Metric", style="cyan")
|
|
999
|
+
table.add_column("Value", style="green")
|
|
1000
|
+
|
|
1001
|
+
table.add_row("Total Scenarios", str(stats["total_scenarios"]))
|
|
1002
|
+
table.add_row("Active Scenarios", str(stats["active_scenarios"]))
|
|
1003
|
+
table.add_row("Current Active", stats["current_active"] or "None")
|
|
1004
|
+
|
|
1005
|
+
for scenario_type, count in stats["scenario_types"].items():
|
|
1006
|
+
table.add_row(f"Type: {scenario_type}", str(count))
|
|
1007
|
+
|
|
1008
|
+
console.print(table)
|
|
1009
|
+
|
|
1010
|
+
else:
|
|
1011
|
+
console.print(f"[red]✗[/red] Unknown action: {action}")
|
|
1012
|
+
raise typer.Exit(1)
|
|
1013
|
+
|
|
1014
|
+
except Exception as e:
|
|
1015
|
+
console.print(f"[red]✗[/red] Scenario error: {e}")
|
|
1016
|
+
raise typer.Exit(1)
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
@app.command()
|
|
1020
|
+
def smart_matching(
|
|
1021
|
+
action: str = typer.Argument(..., help="Smart matching action (list, create, test, export, import, stats)"),
|
|
1022
|
+
rule_name: str = typer.Option(None, "--name", help="Rule name"),
|
|
1023
|
+
rule_type: str = typer.Option(None, "--type", help="Rule type (user_type, api_version, premium_user, rate_limit, error, performance)"),
|
|
1024
|
+
config_file: str = typer.Option(None, "--config", help="Rule configuration file"),
|
|
1025
|
+
output_file: str = typer.Option(None, "--output", help="Output file for export"),
|
|
1026
|
+
test_request: str = typer.Option(None, "--test-request", help="Test request JSON"),
|
|
1027
|
+
):
|
|
1028
|
+
"""Manage smart response matching rules."""
|
|
1029
|
+
try:
|
|
1030
|
+
if action == "list":
|
|
1031
|
+
rules = smart_matcher.rules
|
|
1032
|
+
if not rules:
|
|
1033
|
+
console.print("[yellow]No rules found. Create one with 'smart-matching create'[/yellow]")
|
|
1034
|
+
return
|
|
1035
|
+
|
|
1036
|
+
table = Table(title="Smart Matching Rules")
|
|
1037
|
+
table.add_column("Name", style="cyan")
|
|
1038
|
+
table.add_column("Priority", style="green")
|
|
1039
|
+
table.add_column("Weight", style="yellow")
|
|
1040
|
+
table.add_column("Conditions", style="white")
|
|
1041
|
+
|
|
1042
|
+
for rule in rules:
|
|
1043
|
+
conditions = ", ".join([f"{c.field}={c.value}" for c in rule.conditions[:2]])
|
|
1044
|
+
if len(rule.conditions) > 2:
|
|
1045
|
+
conditions += "..."
|
|
1046
|
+
|
|
1047
|
+
table.add_row(
|
|
1048
|
+
rule.name,
|
|
1049
|
+
str(rule.priority),
|
|
1050
|
+
str(rule.weight),
|
|
1051
|
+
conditions
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
console.print(table)
|
|
1055
|
+
|
|
1056
|
+
elif action == "create":
|
|
1057
|
+
if not rule_name or not rule_type:
|
|
1058
|
+
console.print("[red]✗[/red] Rule name and type are required")
|
|
1059
|
+
raise typer.Exit(1)
|
|
1060
|
+
|
|
1061
|
+
# Create sample response based on rule type
|
|
1062
|
+
sample_response = {
|
|
1063
|
+
"status_code": 200,
|
|
1064
|
+
"body": {"message": f"Response for {rule_type} rule"},
|
|
1065
|
+
"headers": {"Content-Type": "application/json"}
|
|
1066
|
+
}
|
|
1067
|
+
|
|
1068
|
+
if rule_type == "user_type":
|
|
1069
|
+
rule = smart_matcher.create_user_type_rule("premium", sample_response)
|
|
1070
|
+
elif rule_type == "api_version":
|
|
1071
|
+
rule = smart_matcher.create_api_version_rule("v2", sample_response)
|
|
1072
|
+
elif rule_type == "premium_user":
|
|
1073
|
+
rule = smart_matcher.create_premium_user_rule(sample_response)
|
|
1074
|
+
elif rule_type == "rate_limit":
|
|
1075
|
+
rule = smart_matcher.create_rate_limit_rule(100, sample_response)
|
|
1076
|
+
elif rule_type == "error":
|
|
1077
|
+
rule = smart_matcher.create_error_rule("invalid_token", sample_response)
|
|
1078
|
+
elif rule_type == "performance":
|
|
1079
|
+
rule = smart_matcher.create_performance_rule((1, 3), sample_response)
|
|
1080
|
+
else:
|
|
1081
|
+
console.print(f"[red]✗[/red] Unknown rule type: {rule_type}")
|
|
1082
|
+
raise typer.Exit(1)
|
|
1083
|
+
|
|
1084
|
+
rule.name = rule_name
|
|
1085
|
+
smart_matcher.add_rule(rule)
|
|
1086
|
+
console.print(f"[green]✓[/green] Created rule: {rule_name}")
|
|
1087
|
+
|
|
1088
|
+
elif action == "test":
|
|
1089
|
+
if not test_request:
|
|
1090
|
+
console.print("[red]✗[/red] Test request is required")
|
|
1091
|
+
raise typer.Exit(1)
|
|
1092
|
+
|
|
1093
|
+
try:
|
|
1094
|
+
request_data = json.loads(test_request)
|
|
1095
|
+
except json.JSONDecodeError:
|
|
1096
|
+
console.print("[red]✗[/red] Invalid JSON in test request")
|
|
1097
|
+
raise typer.Exit(1)
|
|
1098
|
+
|
|
1099
|
+
response, rule = smart_matcher.find_matching_response(request_data)
|
|
1100
|
+
|
|
1101
|
+
if response:
|
|
1102
|
+
console.print(f"[green]✓[/green] Matched rule: {rule.name if rule else 'Default'}")
|
|
1103
|
+
console.print(f"Response: {json.dumps(response, indent=2)}")
|
|
1104
|
+
else:
|
|
1105
|
+
console.print("[yellow]No matching rule found[/yellow]")
|
|
1106
|
+
|
|
1107
|
+
elif action == "export":
|
|
1108
|
+
if not output_file:
|
|
1109
|
+
output_file = "smart_rules.json"
|
|
1110
|
+
|
|
1111
|
+
data = smart_matcher.export_rules()
|
|
1112
|
+
with open(output_file, 'w') as f:
|
|
1113
|
+
f.write(data)
|
|
1114
|
+
console.print(f"[green]✓[/green] Exported rules to: {output_file}")
|
|
1115
|
+
|
|
1116
|
+
elif action == "import":
|
|
1117
|
+
if not config_file:
|
|
1118
|
+
console.print("[red]✗[/red] Config file is required")
|
|
1119
|
+
raise typer.Exit(1)
|
|
1120
|
+
|
|
1121
|
+
with open(config_file, 'r') as f:
|
|
1122
|
+
data = f.read()
|
|
1123
|
+
|
|
1124
|
+
smart_matcher.import_rules(data)
|
|
1125
|
+
console.print(f"[green]✓[/green] Imported rules from: {config_file}")
|
|
1126
|
+
|
|
1127
|
+
elif action == "stats":
|
|
1128
|
+
stats = smart_matcher.get_matching_statistics()
|
|
1129
|
+
|
|
1130
|
+
table = Table(title="Smart Matching Statistics")
|
|
1131
|
+
table.add_column("Metric", style="cyan")
|
|
1132
|
+
table.add_column("Value", style="green")
|
|
1133
|
+
|
|
1134
|
+
table.add_row("Total Rules", str(stats["total_rules"]))
|
|
1135
|
+
table.add_row("No Match Count", str(stats["no_match_count"]))
|
|
1136
|
+
|
|
1137
|
+
for rule_name, count in stats["rule_usage"].items():
|
|
1138
|
+
table.add_row(f"Rule: {rule_name}", str(count))
|
|
1139
|
+
|
|
1140
|
+
console.print(table)
|
|
1141
|
+
|
|
1142
|
+
else:
|
|
1143
|
+
console.print(f"[red]✗[/red] Unknown action: {action}")
|
|
1144
|
+
raise typer.Exit(1)
|
|
1145
|
+
|
|
1146
|
+
except Exception as e:
|
|
1147
|
+
console.print(f"[red]✗[/red] Smart matching error: {e}")
|
|
1148
|
+
raise typer.Exit(1)
|
|
1149
|
+
|
|
1150
|
+
|
|
1151
|
+
@app.command()
|
|
1152
|
+
def enhanced_analytics(
|
|
1153
|
+
action: str = typer.Argument(..., help="Enhanced analytics action (performance, patterns, dependencies, insights, summary, export)"),
|
|
1154
|
+
endpoint: str = typer.Option(None, "--endpoint", help="Specific endpoint to analyze"),
|
|
1155
|
+
hours: int = typer.Option(24, "--hours", help="Time period for analysis (hours)"),
|
|
1156
|
+
output_file: str = typer.Option(None, "--output", help="Output file for export"),
|
|
1157
|
+
format: str = typer.Option("json", "--format", help="Export format (json, csv)"),
|
|
1158
|
+
):
|
|
1159
|
+
"""Enhanced analytics with performance benchmarking and insights."""
|
|
1160
|
+
try:
|
|
1161
|
+
# Create enhanced analytics instance
|
|
1162
|
+
analytics = EnhancedAnalytics()
|
|
1163
|
+
if action == "performance":
|
|
1164
|
+
metrics = analytics.calculate_performance_metrics(endpoint, hours)
|
|
1165
|
+
|
|
1166
|
+
if not metrics:
|
|
1167
|
+
console.print("[yellow]No performance data found[/yellow]")
|
|
1168
|
+
return
|
|
1169
|
+
|
|
1170
|
+
table = Table(title=f"Performance Metrics (Last {hours} hours)")
|
|
1171
|
+
table.add_column("Endpoint", style="cyan")
|
|
1172
|
+
table.add_column("Method", style="green")
|
|
1173
|
+
table.add_column("P50 (ms)", style="yellow")
|
|
1174
|
+
table.add_column("P95 (ms)", style="yellow")
|
|
1175
|
+
table.add_column("P99 (ms)", style="yellow")
|
|
1176
|
+
table.add_column("Throughput", style="blue")
|
|
1177
|
+
table.add_column("Error Rate", style="red")
|
|
1178
|
+
|
|
1179
|
+
for metric in metrics:
|
|
1180
|
+
table.add_row(
|
|
1181
|
+
metric.endpoint,
|
|
1182
|
+
metric.method,
|
|
1183
|
+
f"{metric.response_time_p50:.2f}",
|
|
1184
|
+
f"{metric.response_time_p95:.2f}",
|
|
1185
|
+
f"{metric.response_time_p99:.2f}",
|
|
1186
|
+
f"{metric.throughput:.2f}",
|
|
1187
|
+
f"{metric.error_rate:.2%}"
|
|
1188
|
+
)
|
|
1189
|
+
|
|
1190
|
+
console.print(table)
|
|
1191
|
+
|
|
1192
|
+
elif action == "patterns":
|
|
1193
|
+
patterns = analytics.analyze_usage_patterns(endpoint, hours//24)
|
|
1194
|
+
|
|
1195
|
+
if not patterns:
|
|
1196
|
+
console.print("[yellow]No usage pattern data found[/yellow]")
|
|
1197
|
+
return
|
|
1198
|
+
|
|
1199
|
+
table = Table(title=f"Usage Patterns (Last {hours//24} days)")
|
|
1200
|
+
table.add_column("Endpoint", style="cyan")
|
|
1201
|
+
table.add_column("Method", style="green")
|
|
1202
|
+
table.add_column("Peak Hours", style="yellow")
|
|
1203
|
+
table.add_column("Peak Days", style="blue")
|
|
1204
|
+
table.add_column("Top User Agent", style="white")
|
|
1205
|
+
|
|
1206
|
+
for pattern in patterns:
|
|
1207
|
+
peak_hours = ", ".join(map(str, pattern.peak_hours[:3]))
|
|
1208
|
+
peak_days = ", ".join(pattern.peak_days[:3])
|
|
1209
|
+
top_ua = list(pattern.user_agents.keys())[0] if pattern.user_agents else "N/A"
|
|
1210
|
+
|
|
1211
|
+
table.add_row(
|
|
1212
|
+
pattern.endpoint,
|
|
1213
|
+
pattern.method,
|
|
1214
|
+
peak_hours,
|
|
1215
|
+
peak_days,
|
|
1216
|
+
top_ua[:30] + "..." if len(top_ua) > 30 else top_ua
|
|
1217
|
+
)
|
|
1218
|
+
|
|
1219
|
+
console.print(table)
|
|
1220
|
+
|
|
1221
|
+
elif action == "dependencies":
|
|
1222
|
+
dependencies = analytics.detect_api_dependencies(hours)
|
|
1223
|
+
|
|
1224
|
+
if not dependencies:
|
|
1225
|
+
console.print("[yellow]No API dependencies found[/yellow]")
|
|
1226
|
+
return
|
|
1227
|
+
|
|
1228
|
+
table = Table(title=f"API Dependencies (Last {hours} hours)")
|
|
1229
|
+
table.add_column("Source", style="cyan")
|
|
1230
|
+
table.add_column("Target", style="green")
|
|
1231
|
+
table.add_column("Type", style="yellow")
|
|
1232
|
+
table.add_column("Confidence", style="blue")
|
|
1233
|
+
table.add_column("Frequency", style="white")
|
|
1234
|
+
table.add_column("Avg Latency (ms)", style="red")
|
|
1235
|
+
|
|
1236
|
+
for dep in dependencies:
|
|
1237
|
+
table.add_row(
|
|
1238
|
+
dep.source_endpoint,
|
|
1239
|
+
dep.target_endpoint,
|
|
1240
|
+
dep.dependency_type,
|
|
1241
|
+
f"{dep.confidence:.2%}",
|
|
1242
|
+
str(dep.frequency),
|
|
1243
|
+
f"{dep.avg_latency:.2f}"
|
|
1244
|
+
)
|
|
1245
|
+
|
|
1246
|
+
console.print(table)
|
|
1247
|
+
|
|
1248
|
+
elif action == "insights":
|
|
1249
|
+
insights = analytics.generate_cost_optimization_insights()
|
|
1250
|
+
|
|
1251
|
+
if not insights:
|
|
1252
|
+
console.print("[yellow]No cost optimization insights found[/yellow]")
|
|
1253
|
+
return
|
|
1254
|
+
|
|
1255
|
+
table = Table(title="Cost Optimization Insights")
|
|
1256
|
+
table.add_column("Type", style="cyan")
|
|
1257
|
+
table.add_column("Description", style="white")
|
|
1258
|
+
table.add_column("Potential Savings", style="green")
|
|
1259
|
+
table.add_column("Priority", style="yellow")
|
|
1260
|
+
table.add_column("Recommendation", style="blue")
|
|
1261
|
+
|
|
1262
|
+
for insight in insights:
|
|
1263
|
+
table.add_row(
|
|
1264
|
+
insight.insight_type,
|
|
1265
|
+
insight.description[:50] + "..." if len(insight.description) > 50 else insight.description,
|
|
1266
|
+
f"${insight.potential_savings:.2f}",
|
|
1267
|
+
insight.priority,
|
|
1268
|
+
insight.recommendation[:50] + "..." if len(insight.recommendation) > 50 else insight.recommendation
|
|
1269
|
+
)
|
|
1270
|
+
|
|
1271
|
+
console.print(table)
|
|
1272
|
+
|
|
1273
|
+
elif action == "summary":
|
|
1274
|
+
summary = analytics.get_analytics_summary(hours)
|
|
1275
|
+
|
|
1276
|
+
table = Table(title=f"Enhanced Analytics Summary ({summary['time_period']})")
|
|
1277
|
+
table.add_column("Metric", style="cyan")
|
|
1278
|
+
table.add_column("Value", style="green")
|
|
1279
|
+
|
|
1280
|
+
table.add_row("Total Requests", str(summary["total_requests"]))
|
|
1281
|
+
table.add_row("Total Errors", str(summary["total_errors"]))
|
|
1282
|
+
table.add_row("Error Rate", f"{summary['error_rate']:.2%}")
|
|
1283
|
+
table.add_row("Avg Response Time", f"{summary['avg_response_time']:.2f}ms")
|
|
1284
|
+
table.add_row("Endpoints Analyzed", str(summary["endpoints_analyzed"]))
|
|
1285
|
+
table.add_row("Usage Patterns", str(summary["usage_patterns"]))
|
|
1286
|
+
table.add_row("Dependencies Found", str(summary["dependencies_found"]))
|
|
1287
|
+
table.add_row("Cost Insights", str(summary["cost_insights"]))
|
|
1288
|
+
|
|
1289
|
+
console.print(table)
|
|
1290
|
+
|
|
1291
|
+
elif action == "export":
|
|
1292
|
+
if not output_file:
|
|
1293
|
+
output_file = f"enhanced_analytics_{action}_{hours}h.{format}"
|
|
1294
|
+
|
|
1295
|
+
data = analytics.export_analytics(format, hours)
|
|
1296
|
+
with open(output_file, 'w') as f:
|
|
1297
|
+
f.write(data)
|
|
1298
|
+
console.print(f"[green]✓[/green] Exported analytics to: {output_file}")
|
|
1299
|
+
|
|
1300
|
+
else:
|
|
1301
|
+
console.print(f"[red]✗[/red] Unknown action: {action}")
|
|
1302
|
+
raise typer.Exit(1)
|
|
1303
|
+
|
|
1304
|
+
except Exception as e:
|
|
1305
|
+
console.print(f"[red]✗[/red] Enhanced analytics error: {e}")
|
|
1306
|
+
raise typer.Exit(1)
|
|
1307
|
+
|
|
1308
|
+
|
|
663
1309
|
if __name__ == "__main__":
|
|
664
1310
|
app()
|