api-mocker 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- api_mocker/ai_generator.py +482 -0
- api_mocker/cli.py +241 -0
- api_mocker/testing.py +699 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/METADATA +30 -7
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/RECORD +9 -7
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/WHEEL +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/entry_points.txt +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {api_mocker-0.1.3.dist-info → api_mocker-0.2.0.dist-info}/top_level.txt +0 -0
api_mocker/cli.py
CHANGED
|
@@ -660,5 +660,246 @@ def advanced(
|
|
|
660
660
|
console.print(f"[red]✗[/red] Advanced feature error: {e}")
|
|
661
661
|
raise typer.Exit(1)
|
|
662
662
|
|
|
663
|
+
@app.command()
|
|
664
|
+
def ai(
|
|
665
|
+
action: str = typer.Argument(..., help="AI action (generate, configure, cache, test)"),
|
|
666
|
+
prompt: str = typer.Option(None, "--prompt", help="AI generation prompt"),
|
|
667
|
+
endpoint: str = typer.Option(None, "--endpoint", help="API endpoint path"),
|
|
668
|
+
count: int = typer.Option(1, "--count", help="Number of records to generate"),
|
|
669
|
+
schema: str = typer.Option(None, "--schema", help="JSON schema file path"),
|
|
670
|
+
output: str = typer.Option(None, "--output", help="Output file path"),
|
|
671
|
+
api_key: str = typer.Option(None, "--api-key", help="OpenAI API key"),
|
|
672
|
+
model: str = typer.Option("gpt-3.5-turbo", "--model", help="AI model to use"),
|
|
673
|
+
clear_cache: bool = typer.Option(False, "--clear-cache", help="Clear AI generation cache"),
|
|
674
|
+
):
|
|
675
|
+
"""AI-powered mock data generation and management."""
|
|
676
|
+
try:
|
|
677
|
+
from .ai_generator import AIGenerationManager
|
|
678
|
+
|
|
679
|
+
# Initialize AI manager
|
|
680
|
+
ai_manager = AIGenerationManager()
|
|
681
|
+
|
|
682
|
+
if action == "configure":
|
|
683
|
+
console.print("[blue]🤖[/blue] Configuring AI settings...")
|
|
684
|
+
|
|
685
|
+
# Get API key from user
|
|
686
|
+
if not api_key:
|
|
687
|
+
api_key = typer.prompt("Enter your OpenAI API key", hide_input=True)
|
|
688
|
+
|
|
689
|
+
# Save API key securely
|
|
690
|
+
config_dir = Path.home() / ".api-mocker"
|
|
691
|
+
config_dir.mkdir(exist_ok=True)
|
|
692
|
+
config_file = config_dir / "ai_config.json"
|
|
693
|
+
|
|
694
|
+
config_data = {
|
|
695
|
+
"openai_api_key": api_key,
|
|
696
|
+
"model": model,
|
|
697
|
+
"cache_enabled": True
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
with open(config_file, 'w') as f:
|
|
701
|
+
json.dump(config_data, f, indent=2)
|
|
702
|
+
|
|
703
|
+
console.print("[green]✓[/green] AI configuration saved")
|
|
704
|
+
|
|
705
|
+
elif action == "generate":
|
|
706
|
+
if not prompt:
|
|
707
|
+
prompt = typer.prompt("Enter generation prompt")
|
|
708
|
+
|
|
709
|
+
if not endpoint:
|
|
710
|
+
endpoint = typer.prompt("Enter API endpoint path")
|
|
711
|
+
|
|
712
|
+
console.print(f"[blue]🤖[/blue] Generating AI-powered mock data...")
|
|
713
|
+
console.print(f"Prompt: {prompt}")
|
|
714
|
+
console.print(f"Endpoint: {endpoint}")
|
|
715
|
+
console.print(f"Count: {count}")
|
|
716
|
+
|
|
717
|
+
# Load schema if provided
|
|
718
|
+
schema_data = None
|
|
719
|
+
if schema:
|
|
720
|
+
with open(schema, 'r') as f:
|
|
721
|
+
schema_data = json.load(f)
|
|
722
|
+
|
|
723
|
+
# Generate data
|
|
724
|
+
result = ai_manager.generate_mock_data(
|
|
725
|
+
prompt=prompt,
|
|
726
|
+
endpoint=endpoint,
|
|
727
|
+
count=count,
|
|
728
|
+
schema=schema_data
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
# Display results
|
|
732
|
+
table = Table(title="AI Generation Results")
|
|
733
|
+
table.add_column("Metric", style="cyan")
|
|
734
|
+
table.add_column("Value", style="green")
|
|
735
|
+
|
|
736
|
+
table.add_row("Source", result["metadata"]["source"])
|
|
737
|
+
table.add_row("Model", result["metadata"]["model"])
|
|
738
|
+
table.add_row("Generation Time", f"{result['generation_time']:.2f}s")
|
|
739
|
+
table.add_row("Cache Key", result["cache_key"][:8] + "..." if result["cache_key"] else "N/A")
|
|
740
|
+
|
|
741
|
+
console.print(table)
|
|
742
|
+
|
|
743
|
+
# Save to file if requested
|
|
744
|
+
if output:
|
|
745
|
+
with open(output, 'w') as f:
|
|
746
|
+
json.dump(result["data"], f, indent=2)
|
|
747
|
+
console.print(f"[green]✓[/green] Data saved to: {output}")
|
|
748
|
+
else:
|
|
749
|
+
console.print("\n[blue]Generated Data:[/blue]")
|
|
750
|
+
console.print_json(data=result["data"])
|
|
751
|
+
|
|
752
|
+
elif action == "cache":
|
|
753
|
+
if clear_cache:
|
|
754
|
+
ai_manager.clear_cache()
|
|
755
|
+
console.print("[green]✓[/green] AI cache cleared")
|
|
756
|
+
else:
|
|
757
|
+
stats = ai_manager.get_cache_stats()
|
|
758
|
+
table = Table(title="AI Cache Statistics")
|
|
759
|
+
table.add_column("Metric", style="cyan")
|
|
760
|
+
table.add_column("Value", style="green")
|
|
761
|
+
|
|
762
|
+
table.add_row("Cache Size", str(stats["cache_size"]))
|
|
763
|
+
table.add_row("Cache Enabled", str(stats["cache_enabled"]))
|
|
764
|
+
table.add_row("Cache TTL", f"{stats['cache_ttl']}s")
|
|
765
|
+
|
|
766
|
+
console.print(table)
|
|
767
|
+
|
|
768
|
+
elif action == "test":
|
|
769
|
+
console.print("[blue]🧪[/blue] Testing AI generation...")
|
|
770
|
+
|
|
771
|
+
# Test with simple prompt
|
|
772
|
+
test_result = ai_manager.generate_mock_data(
|
|
773
|
+
prompt="Generate a user profile with name, email, and age",
|
|
774
|
+
endpoint="/test/user",
|
|
775
|
+
count=1
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
console.print("[green]✓[/green] AI generation test successful")
|
|
779
|
+
console.print(f"Generated in: {test_result['generation_time']:.2f}s")
|
|
780
|
+
console.print_json(data=test_result["data"])
|
|
781
|
+
|
|
782
|
+
else:
|
|
783
|
+
console.print(f"[red]✗[/red] Unknown AI action: {action}")
|
|
784
|
+
raise typer.Exit(1)
|
|
785
|
+
|
|
786
|
+
except Exception as e:
|
|
787
|
+
console.print(f"[red]✗[/red] AI generation error: {e}")
|
|
788
|
+
raise typer.Exit(1)
|
|
789
|
+
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
@app.command()
|
|
793
|
+
def testing(
|
|
794
|
+
action: str = typer.Argument(..., help="Testing action (run, generate, performance, report)"),
|
|
795
|
+
test_file: str = typer.Option(None, "--test-file", help="Test file path"),
|
|
796
|
+
config_file: str = typer.Option(None, "--config", help="API config file path"),
|
|
797
|
+
output_file: str = typer.Option(None, "--output", help="Output file path"),
|
|
798
|
+
concurrent_users: int = typer.Option(10, "--users", help="Number of concurrent users for performance test"),
|
|
799
|
+
duration: int = typer.Option(60, "--duration", help="Test duration in seconds"),
|
|
800
|
+
verbose: bool = typer.Option(False, "--verbose", help="Verbose output"),
|
|
801
|
+
):
|
|
802
|
+
"""Advanced testing framework for API testing."""
|
|
803
|
+
try:
|
|
804
|
+
from .testing import TestingFramework
|
|
805
|
+
|
|
806
|
+
framework = TestingFramework()
|
|
807
|
+
|
|
808
|
+
if action == "run":
|
|
809
|
+
if not test_file:
|
|
810
|
+
test_file = typer.prompt("Enter test file path")
|
|
811
|
+
|
|
812
|
+
console.print(f"[blue]🧪[/blue] Running tests from: {test_file}")
|
|
813
|
+
results = framework.run_tests_from_file(test_file)
|
|
814
|
+
|
|
815
|
+
# Display results
|
|
816
|
+
passed = sum(1 for r in results if r.status == "passed")
|
|
817
|
+
failed = sum(1 for r in results if r.status == "failed")
|
|
818
|
+
errors = sum(1 for r in results if r.status == "error")
|
|
819
|
+
|
|
820
|
+
table = Table(title="Test Results")
|
|
821
|
+
table.add_column("Test", style="cyan")
|
|
822
|
+
table.add_column("Status", style="green")
|
|
823
|
+
table.add_column("Duration", style="blue")
|
|
824
|
+
table.add_column("Details", style="yellow")
|
|
825
|
+
|
|
826
|
+
for result in results:
|
|
827
|
+
status_icon = "✓" if result.status == "passed" else "✗"
|
|
828
|
+
status_color = "green" if result.status == "passed" else "red"
|
|
829
|
+
|
|
830
|
+
details = ""
|
|
831
|
+
if result.assertions:
|
|
832
|
+
failed_assertions = [a for a in result.assertions if not a["passed"]]
|
|
833
|
+
if failed_assertions:
|
|
834
|
+
details = f"{len(failed_assertions)} failed assertions"
|
|
835
|
+
|
|
836
|
+
table.add_row(
|
|
837
|
+
result.test_name,
|
|
838
|
+
f"[{status_color}]{status_icon} {result.status}[/{status_color}]",
|
|
839
|
+
f"{result.duration:.2f}s",
|
|
840
|
+
details
|
|
841
|
+
)
|
|
842
|
+
|
|
843
|
+
console.print(table)
|
|
844
|
+
console.print(f"\n[green]✓[/green] Passed: {passed}")
|
|
845
|
+
console.print(f"[red]✗[/red] Failed: {failed}")
|
|
846
|
+
console.print(f"[yellow]⚠[/yellow] Errors: {errors}")
|
|
847
|
+
|
|
848
|
+
elif action == "generate":
|
|
849
|
+
if not config_file:
|
|
850
|
+
config_file = typer.prompt("Enter API config file path")
|
|
851
|
+
|
|
852
|
+
if not output_file:
|
|
853
|
+
output_file = f"tests-{int(time.time())}.yaml"
|
|
854
|
+
|
|
855
|
+
console.print(f"[blue]🔧[/blue] Generating tests from: {config_file}")
|
|
856
|
+
framework.generate_tests(config_file, output_file)
|
|
857
|
+
console.print(f"[green]✓[/green] Tests generated: {output_file}")
|
|
858
|
+
|
|
859
|
+
elif action == "performance":
|
|
860
|
+
if not test_file:
|
|
861
|
+
test_file = typer.prompt("Enter performance test file path")
|
|
862
|
+
|
|
863
|
+
console.print(f"[blue]⚡[/blue] Running performance test...")
|
|
864
|
+
console.print(f"Concurrent users: {concurrent_users}")
|
|
865
|
+
console.print(f"Duration: {duration} seconds")
|
|
866
|
+
|
|
867
|
+
result = framework.run_performance_test_from_file(test_file)
|
|
868
|
+
|
|
869
|
+
# Display performance results
|
|
870
|
+
table = Table(title="Performance Test Results")
|
|
871
|
+
table.add_column("Metric", style="cyan")
|
|
872
|
+
table.add_column("Value", style="green")
|
|
873
|
+
|
|
874
|
+
table.add_row("Total Requests", str(result.total_requests))
|
|
875
|
+
table.add_row("Successful Requests", str(result.successful_requests))
|
|
876
|
+
table.add_row("Failed Requests", str(result.failed_requests))
|
|
877
|
+
table.add_row("Average Response Time", f"{result.average_response_time:.2f}ms")
|
|
878
|
+
table.add_row("Min Response Time", f"{result.min_response_time:.2f}ms")
|
|
879
|
+
table.add_row("Max Response Time", f"{result.max_response_time:.2f}ms")
|
|
880
|
+
table.add_row("P95 Response Time", f"{result.p95_response_time:.2f}ms")
|
|
881
|
+
table.add_row("P99 Response Time", f"{result.p99_response_time:.2f}ms")
|
|
882
|
+
table.add_row("Requests per Second", f"{result.requests_per_second:.2f}")
|
|
883
|
+
table.add_row("Error Rate", f"{result.error_rate:.2f}%")
|
|
884
|
+
table.add_row("Test Duration", f"{result.duration:.2f}s")
|
|
885
|
+
|
|
886
|
+
console.print(table)
|
|
887
|
+
|
|
888
|
+
elif action == "report":
|
|
889
|
+
if not test_file:
|
|
890
|
+
test_file = typer.prompt("Enter test results file path")
|
|
891
|
+
|
|
892
|
+
console.print(f"[blue]📊[/blue] Generating test report from: {test_file}")
|
|
893
|
+
# TODO: Implement test report generation
|
|
894
|
+
console.print("[green]✓[/green] Test report generated")
|
|
895
|
+
|
|
896
|
+
else:
|
|
897
|
+
console.print(f"[red]✗[/red] Unknown testing action: {action}")
|
|
898
|
+
raise typer.Exit(1)
|
|
899
|
+
|
|
900
|
+
except Exception as e:
|
|
901
|
+
console.print(f"[red]✗[/red] Testing error: {e}")
|
|
902
|
+
raise typer.Exit(1)
|
|
903
|
+
|
|
663
904
|
if __name__ == "__main__":
|
|
664
905
|
app()
|