airbyte-internal-ops 0.1.11__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {airbyte_internal_ops-0.1.11.dist-info → airbyte_internal_ops-0.2.1.dist-info}/METADATA +2 -2
  2. {airbyte_internal_ops-0.1.11.dist-info → airbyte_internal_ops-0.2.1.dist-info}/RECORD +41 -40
  3. {airbyte_internal_ops-0.1.11.dist-info → airbyte_internal_ops-0.2.1.dist-info}/entry_points.txt +1 -0
  4. airbyte_ops_mcp/__init__.py +2 -2
  5. airbyte_ops_mcp/cli/cloud.py +264 -301
  6. airbyte_ops_mcp/cloud_admin/api_client.py +51 -26
  7. airbyte_ops_mcp/cloud_admin/auth.py +32 -0
  8. airbyte_ops_mcp/cloud_admin/connection_config.py +2 -2
  9. airbyte_ops_mcp/constants.py +18 -0
  10. airbyte_ops_mcp/github_actions.py +94 -5
  11. airbyte_ops_mcp/mcp/_http_headers.py +254 -0
  12. airbyte_ops_mcp/mcp/_mcp_utils.py +2 -2
  13. airbyte_ops_mcp/mcp/cloud_connector_versions.py +162 -52
  14. airbyte_ops_mcp/mcp/github.py +34 -1
  15. airbyte_ops_mcp/mcp/prod_db_queries.py +67 -24
  16. airbyte_ops_mcp/mcp/{live_tests.py → regression_tests.py} +165 -152
  17. airbyte_ops_mcp/mcp/server.py +84 -11
  18. airbyte_ops_mcp/prod_db_access/db_engine.py +15 -11
  19. airbyte_ops_mcp/prod_db_access/queries.py +27 -15
  20. airbyte_ops_mcp/prod_db_access/sql.py +17 -16
  21. airbyte_ops_mcp/{live_tests → regression_tests}/__init__.py +3 -3
  22. airbyte_ops_mcp/{live_tests → regression_tests}/cdk_secrets.py +1 -1
  23. airbyte_ops_mcp/{live_tests → regression_tests}/connection_secret_retriever.py +3 -3
  24. airbyte_ops_mcp/{live_tests → regression_tests}/connector_runner.py +1 -1
  25. airbyte_ops_mcp/{live_tests → regression_tests}/message_cache/__init__.py +3 -1
  26. airbyte_ops_mcp/{live_tests → regression_tests}/regression/__init__.py +1 -1
  27. airbyte_ops_mcp/{live_tests → regression_tests}/schema_generation.py +3 -1
  28. airbyte_ops_mcp/{live_tests → regression_tests}/validation/__init__.py +2 -2
  29. airbyte_ops_mcp/{live_tests → regression_tests}/validation/record_validators.py +4 -2
  30. {airbyte_internal_ops-0.1.11.dist-info → airbyte_internal_ops-0.2.1.dist-info}/WHEEL +0 -0
  31. /airbyte_ops_mcp/{live_tests → regression_tests}/ci_output.py +0 -0
  32. /airbyte_ops_mcp/{live_tests → regression_tests}/commons/__init__.py +0 -0
  33. /airbyte_ops_mcp/{live_tests → regression_tests}/config.py +0 -0
  34. /airbyte_ops_mcp/{live_tests → regression_tests}/connection_fetcher.py +0 -0
  35. /airbyte_ops_mcp/{live_tests → regression_tests}/evaluation_modes.py +0 -0
  36. /airbyte_ops_mcp/{live_tests → regression_tests}/http_metrics.py +0 -0
  37. /airbyte_ops_mcp/{live_tests → regression_tests}/message_cache/duckdb_cache.py +0 -0
  38. /airbyte_ops_mcp/{live_tests → regression_tests}/models.py +0 -0
  39. /airbyte_ops_mcp/{live_tests → regression_tests}/obfuscation.py +0 -0
  40. /airbyte_ops_mcp/{live_tests → regression_tests}/regression/comparators.py +0 -0
  41. /airbyte_ops_mcp/{live_tests → regression_tests}/validation/catalog_validators.py +0 -0
@@ -5,8 +5,7 @@ Commands:
5
5
  airbyte-ops cloud connector get-version-info - Get connector version info
6
6
  airbyte-ops cloud connector set-version-override - Set connector version override
7
7
  airbyte-ops cloud connector clear-version-override - Clear connector version override
8
- airbyte-ops cloud connector live-test - Run live validation tests on a connector
9
- airbyte-ops cloud connector regression-test - Run regression tests comparing connector versions
8
+ airbyte-ops cloud connector regression-test - Run regression tests (single-version or comparison)
10
9
  airbyte-ops cloud connector fetch-connection-config - Fetch connection config to local file
11
10
  """
12
11
 
@@ -45,8 +44,12 @@ from airbyte_ops_mcp.constants import (
45
44
  DEFAULT_CLOUD_SQL_PROXY_PORT,
46
45
  ENV_GCP_PROD_DB_ACCESS_CREDENTIALS,
47
46
  )
48
- from airbyte_ops_mcp.live_tests.cdk_secrets import get_first_config_from_secrets
49
- from airbyte_ops_mcp.live_tests.ci_output import (
47
+ from airbyte_ops_mcp.mcp.cloud_connector_versions import (
48
+ get_cloud_connector_version,
49
+ set_cloud_connector_version_override,
50
+ )
51
+ from airbyte_ops_mcp.regression_tests.cdk_secrets import get_first_config_from_secrets
52
+ from airbyte_ops_mcp.regression_tests.ci_output import (
50
53
  generate_regression_report,
51
54
  get_report_summary,
52
55
  write_github_output,
@@ -55,28 +58,28 @@ from airbyte_ops_mcp.live_tests.ci_output import (
55
58
  write_json_output,
56
59
  write_test_summary,
57
60
  )
58
- from airbyte_ops_mcp.live_tests.connection_fetcher import (
61
+ from airbyte_ops_mcp.regression_tests.connection_fetcher import (
59
62
  fetch_connection_data,
60
63
  save_connection_data_to_files,
61
64
  )
62
- from airbyte_ops_mcp.live_tests.connector_runner import (
65
+ from airbyte_ops_mcp.regression_tests.connection_secret_retriever import (
66
+ enrich_config_with_secrets,
67
+ should_use_secret_retriever,
68
+ )
69
+ from airbyte_ops_mcp.regression_tests.connector_runner import (
63
70
  ConnectorRunner,
64
71
  ensure_image_available,
65
72
  )
66
- from airbyte_ops_mcp.live_tests.http_metrics import (
73
+ from airbyte_ops_mcp.regression_tests.http_metrics import (
67
74
  MitmproxyManager,
68
75
  parse_http_dump,
69
76
  )
70
- from airbyte_ops_mcp.live_tests.models import (
77
+ from airbyte_ops_mcp.regression_tests.models import (
71
78
  Command,
72
79
  ConnectorUnderTest,
73
80
  ExecutionInputs,
74
81
  TargetOrControl,
75
82
  )
76
- from airbyte_ops_mcp.mcp.cloud_connector_versions import (
77
- get_cloud_connector_version,
78
- set_cloud_connector_version_override,
79
- )
80
83
 
81
84
  # Path to connectors directory within the airbyte repo
82
85
  CONNECTORS_SUBDIR = Path("airbyte-integrations") / "connectors"
@@ -318,6 +321,20 @@ def set_version_override(
318
321
  str,
319
322
  Parameter(help="Explanation for the override (min 10 characters)."),
320
323
  ],
324
+ issue_url: Annotated[
325
+ str,
326
+ Parameter(help="GitHub issue URL providing context for this operation."),
327
+ ],
328
+ approval_comment_url: Annotated[
329
+ str,
330
+ Parameter(help="GitHub comment URL where admin authorized this deployment."),
331
+ ],
332
+ ai_agent_session_url: Annotated[
333
+ str | None,
334
+ Parameter(
335
+ help="URL to AI agent session driving this operation (for auditability)."
336
+ ),
337
+ ] = None,
321
338
  reason_url: Annotated[
322
339
  str | None,
323
340
  Parameter(help="Optional URL with more context (e.g., issue link)."),
@@ -328,6 +345,7 @@ def set_version_override(
328
345
  Requires admin authentication via AIRBYTE_INTERNAL_ADMIN_FLAG and
329
346
  AIRBYTE_INTERNAL_ADMIN_USER environment variables.
330
347
  """
348
+ admin_user_email = os.environ.get("AIRBYTE_INTERNAL_ADMIN_USER")
331
349
  result = set_cloud_connector_version_override(
332
350
  workspace_id=workspace_id,
333
351
  actor_id=connector_id,
@@ -336,6 +354,10 @@ def set_version_override(
336
354
  unset=False,
337
355
  override_reason=reason,
338
356
  override_reason_reference_url=reason_url,
357
+ admin_user_email=admin_user_email,
358
+ issue_url=issue_url,
359
+ approval_comment_url=approval_comment_url,
360
+ ai_agent_session_url=ai_agent_session_url,
339
361
  )
340
362
  if result.success:
341
363
  print_success(result.message)
@@ -358,12 +380,27 @@ def clear_version_override(
358
380
  Literal["source", "destination"],
359
381
  Parameter(help="The type of connector."),
360
382
  ],
383
+ issue_url: Annotated[
384
+ str,
385
+ Parameter(help="GitHub issue URL providing context for this operation."),
386
+ ],
387
+ approval_comment_url: Annotated[
388
+ str,
389
+ Parameter(help="GitHub comment URL where admin authorized this deployment."),
390
+ ],
391
+ ai_agent_session_url: Annotated[
392
+ str | None,
393
+ Parameter(
394
+ help="URL to AI agent session driving this operation (for auditability)."
395
+ ),
396
+ ] = None,
361
397
  ) -> None:
362
398
  """Clear a version override from a deployed connector.
363
399
 
364
400
  Requires admin authentication via AIRBYTE_INTERNAL_ADMIN_FLAG and
365
401
  AIRBYTE_INTERNAL_ADMIN_USER environment variables.
366
402
  """
403
+ admin_user_email = os.environ.get("AIRBYTE_INTERNAL_ADMIN_USER")
367
404
  result = set_cloud_connector_version_override(
368
405
  workspace_id=workspace_id,
369
406
  actor_id=connector_id,
@@ -372,6 +409,10 @@ def clear_version_override(
372
409
  unset=True,
373
410
  override_reason=None,
374
411
  override_reason_reference_url=None,
412
+ admin_user_email=admin_user_email,
413
+ issue_url=issue_url,
414
+ approval_comment_url=approval_comment_url,
415
+ ai_agent_session_url=ai_agent_session_url,
375
416
  )
376
417
  if result.success:
377
418
  print_success(result.message)
@@ -554,187 +595,6 @@ def _fetch_control_image_from_metadata(connector_name: str) -> str | None:
554
595
  return f"{docker_repository}:{docker_image_tag}"
555
596
 
556
597
 
557
- @connector_app.command(name="live-test")
558
- def live_test(
559
- connector_image: Annotated[
560
- str | None,
561
- Parameter(
562
- help="Full connector image name with tag (e.g., airbyte/source-github:1.0.0). "
563
- "Optional if connector_name or connection_id is provided."
564
- ),
565
- ] = None,
566
- connector_name: Annotated[
567
- str | None,
568
- Parameter(
569
- help="Connector name to build from source (e.g., 'source-pokeapi'). "
570
- "If provided, builds the image locally with tag 'dev'."
571
- ),
572
- ] = None,
573
- repo_root: Annotated[
574
- str | None,
575
- Parameter(
576
- help="Path to the airbyte repo root. Required if connector_name is provided "
577
- "and the repo cannot be auto-detected."
578
- ),
579
- ] = None,
580
- command: Annotated[
581
- Literal["spec", "check", "discover", "read"],
582
- Parameter(help="The Airbyte command to run."),
583
- ] = "check",
584
- connection_id: Annotated[
585
- str | None,
586
- Parameter(
587
- help="Airbyte Cloud connection ID to fetch config/catalog from. "
588
- "Mutually exclusive with config-path/catalog-path. "
589
- "If provided, connector_image can be auto-detected."
590
- ),
591
- ] = None,
592
- config_path: Annotated[
593
- str | None,
594
- Parameter(help="Path to the connector config JSON file."),
595
- ] = None,
596
- catalog_path: Annotated[
597
- str | None,
598
- Parameter(help="Path to the configured catalog JSON file (required for read)."),
599
- ] = None,
600
- state_path: Annotated[
601
- str | None,
602
- Parameter(help="Path to the state JSON file (optional for read)."),
603
- ] = None,
604
- output_dir: Annotated[
605
- str,
606
- Parameter(help="Directory to store test artifacts."),
607
- ] = "/tmp/live_test_artifacts",
608
- ) -> None:
609
- """Run live validation tests on a connector.
610
-
611
- This command runs the specified Airbyte protocol command against a connector
612
- and validates the output. Results are written to the output directory and
613
- to GitHub Actions outputs if running in CI.
614
-
615
- You can provide the connector image in three ways:
616
- 1. --connector-image: Use a pre-built image from Docker registry
617
- 2. --connector-name: Build the image locally from source code
618
- 3. --connection-id: Auto-detect from an Airbyte Cloud connection
619
-
620
- You can provide config/catalog either via file paths OR via a connection_id
621
- that fetches them from Airbyte Cloud.
622
- """
623
- output_path = Path(output_dir)
624
- output_path.mkdir(parents=True, exist_ok=True)
625
-
626
- cmd = Command(command)
627
-
628
- config_file: Path | None = None
629
- catalog_file: Path | None = None
630
- state_file = Path(state_path) if state_path else None
631
- resolved_connector_image: str | None = connector_image
632
-
633
- # If connector_name is provided, build the image from source
634
- if connector_name:
635
- if connector_image:
636
- write_github_output("success", False)
637
- write_github_output(
638
- "error", "Cannot specify both connector_image and connector_name"
639
- )
640
- exit_with_error("Cannot specify both connector_image and connector_name")
641
-
642
- repo_root_path = Path(repo_root) if repo_root else None
643
- built_image = _build_connector_image_from_source(
644
- connector_name=connector_name,
645
- repo_root=repo_root_path,
646
- tag="dev",
647
- )
648
- if not built_image:
649
- write_github_output("success", False)
650
- write_github_output("error", f"Failed to build image for {connector_name}")
651
- exit_with_error(f"Failed to build image for {connector_name}")
652
- resolved_connector_image = built_image
653
-
654
- if connection_id:
655
- if config_path or catalog_path:
656
- write_github_output("success", False)
657
- write_github_output(
658
- "error", "Cannot specify both connection_id and file paths"
659
- )
660
- exit_with_error(
661
- "Cannot specify both connection_id and config_path/catalog_path"
662
- )
663
-
664
- print_success(f"Fetching config/catalog from connection: {connection_id}")
665
- connection_data = fetch_connection_data(connection_id)
666
- config_file, catalog_file = save_connection_data_to_files(
667
- connection_data, output_path / "connection_data"
668
- )
669
- print_success(
670
- f"Fetched config for source: {connection_data.source_name} "
671
- f"with {len(connection_data.stream_names)} streams"
672
- )
673
-
674
- if not resolved_connector_image and connection_data.connector_image:
675
- resolved_connector_image = connection_data.connector_image
676
- print_success(f"Auto-detected connector image: {resolved_connector_image}")
677
- else:
678
- config_file = Path(config_path) if config_path else None
679
- catalog_file = Path(catalog_path) if catalog_path else None
680
-
681
- if not resolved_connector_image:
682
- write_github_output("success", False)
683
- write_github_output("error", "Missing connector image")
684
- exit_with_error(
685
- "You must provide one of the following: a connector_image, a connector_name, "
686
- "or a connection_id for a connection that has an associated connector image. "
687
- "If using connection_id, ensure the connection has a connector image configured."
688
- )
689
-
690
- # If connector_name was provided, we just built the image locally and it is already
691
- # available in Docker, so we skip the image availability check/pull. Only try to pull
692
- # if we didn't just build it (i.e., using a pre-built image from registry).
693
- if not connector_name and not ensure_image_available(resolved_connector_image):
694
- write_github_output("success", False)
695
- write_github_output(
696
- "error", f"Failed to pull image: {resolved_connector_image}"
697
- )
698
- exit_with_error(f"Failed to pull connector image: {resolved_connector_image}")
699
-
700
- result = _run_connector_command(
701
- connector_image=resolved_connector_image,
702
- command=cmd,
703
- output_dir=output_path,
704
- target_or_control=TargetOrControl.TARGET,
705
- config_path=config_file,
706
- catalog_path=catalog_file,
707
- state_path=state_file,
708
- )
709
-
710
- print_json(result)
711
-
712
- write_github_outputs(
713
- {
714
- "success": result["success"],
715
- "connector": resolved_connector_image,
716
- "command": command,
717
- "exit_code": result["exit_code"],
718
- }
719
- )
720
-
721
- write_test_summary(
722
- connector_image=resolved_connector_image,
723
- test_type="live-test",
724
- success=result["success"],
725
- results={
726
- "command": command,
727
- "exit_code": result["exit_code"],
728
- "output_dir": output_dir,
729
- },
730
- )
731
-
732
- if result["success"]:
733
- print_success(f"Live test passed for {resolved_connector_image}")
734
- else:
735
- exit_with_error(f"Live test failed for {resolved_connector_image}")
736
-
737
-
738
598
  def _run_with_optional_http_metrics(
739
599
  connector_image: str,
740
600
  command: Command,
@@ -813,25 +673,34 @@ def _run_with_optional_http_metrics(
813
673
 
814
674
  @connector_app.command(name="regression-test")
815
675
  def regression_test(
816
- target_image: Annotated[
676
+ skip_compare: Annotated[
677
+ bool,
678
+ Parameter(
679
+ help="If True, skip comparison and run single-version tests only. "
680
+ "If False (default), run comparison tests (target vs control)."
681
+ ),
682
+ ] = False,
683
+ test_image: Annotated[
817
684
  str | None,
818
685
  Parameter(
819
- help="Target connector image (new version) with tag (e.g., airbyte/source-github:2.0.0). "
820
- "Optional if connector_name is provided."
686
+ help="Test connector image with tag (e.g., airbyte/source-github:1.0.0). "
687
+ "This is the image under test - in comparison mode, it's compared against control_image."
821
688
  ),
822
689
  ] = None,
823
690
  control_image: Annotated[
824
691
  str | None,
825
692
  Parameter(
826
693
  help="Control connector image (baseline version) with tag (e.g., airbyte/source-github:1.0.0). "
827
- "Optional if connection_id is provided (auto-detected from connection)."
694
+ "Ignored if `skip_compare=True`."
828
695
  ),
829
696
  ] = None,
830
697
  connector_name: Annotated[
831
698
  str | None,
832
699
  Parameter(
833
- help="Connector name to build target image from source (e.g., 'source-pokeapi'). "
834
- "If provided, builds the target image locally with tag 'dev'."
700
+ help="Connector name to build image from source (e.g., 'source-pokeapi'). "
701
+ "If provided, builds the image locally with tag 'dev'. "
702
+ "For comparison tests (default), this builds the target image. "
703
+ "For single-version tests (skip_compare=True), this builds the test image."
835
704
  ),
836
705
  ] = None,
837
706
  repo_root: Annotated[
@@ -850,7 +719,7 @@ def regression_test(
850
719
  Parameter(
851
720
  help="Airbyte Cloud connection ID to fetch config/catalog from. "
852
721
  "Mutually exclusive with config-path/catalog-path. "
853
- "If provided, control_image can be auto-detected."
722
+ "If provided, test_image/control_image can be auto-detected."
854
723
  ),
855
724
  ] = None,
856
725
  config_path: Annotated[
@@ -873,26 +742,30 @@ def regression_test(
873
742
  bool,
874
743
  Parameter(
875
744
  help="Capture HTTP traffic metrics via mitmproxy (experimental). "
876
- "Requires mitmdump to be installed."
745
+ "Requires mitmdump to be installed. Only used in comparison mode."
877
746
  ),
878
747
  ] = False,
879
748
  ) -> None:
880
- """Run regression tests comparing two connector versions.
749
+ """Run regression tests on connectors.
750
+
751
+ This command supports two modes:
752
+
753
+ Comparison mode (skip_compare=False, default):
754
+ Runs the specified Airbyte protocol command against both the target (new)
755
+ and control (baseline) connector versions, then compares the results.
756
+ This helps identify regressions between versions.
881
757
 
882
- This command runs the specified Airbyte protocol command against both the
883
- target (new) and control (baseline) connector versions, then compares the
884
- results. This helps identify regressions between versions.
758
+ Single-version mode (skip_compare=True):
759
+ Runs the specified Airbyte protocol command against a single connector
760
+ and validates the output. No comparison is performed.
885
761
 
886
762
  Results are written to the output directory and to GitHub Actions outputs
887
763
  if running in CI.
888
764
 
889
- You can provide the target image in two ways:
890
- 1. --target-image: Use a pre-built image from Docker registry
891
- 2. --connector-name: Build the target image locally from source code
892
-
893
- You can provide the control image in two ways:
894
- 1. --control-image: Use a pre-built image from Docker registry
895
- 2. --connection-id: Auto-detect from an Airbyte Cloud connection
765
+ You can provide the test image in three ways:
766
+ 1. --test-image: Use a pre-built image from Docker registry
767
+ 2. --connector-name: Build the image locally from source code
768
+ 3. --connection-id: Auto-detect from an Airbyte Cloud connection
896
769
 
897
770
  You can provide config/catalog either via file paths OR via a connection_id
898
771
  that fetches them from Airbyte Cloud.
@@ -905,17 +778,31 @@ def regression_test(
905
778
  config_file: Path | None = None
906
779
  catalog_file: Path | None = None
907
780
  state_file = Path(state_path) if state_path else None
908
- resolved_target_image: str | None = target_image
781
+
782
+ # Resolve the test image (used in both single-version and comparison modes)
783
+ resolved_test_image: str | None = test_image
909
784
  resolved_control_image: str | None = control_image
910
785
 
911
- # If connector_name is provided, build the target image from source
786
+ # Validate conflicting parameters
787
+ # Single-version mode: reject comparison-specific parameters
788
+ if skip_compare and control_image:
789
+ write_github_output("success", False)
790
+ write_github_output(
791
+ "error", "Cannot specify control_image with skip_compare=True"
792
+ )
793
+ exit_with_error(
794
+ "Cannot specify --control-image with --skip-compare. "
795
+ "Control image is only used in comparison mode."
796
+ )
797
+
798
+ # If connector_name is provided, build the image from source
912
799
  if connector_name:
913
- if target_image:
800
+ if resolved_test_image:
914
801
  write_github_output("success", False)
915
802
  write_github_output(
916
- "error", "Cannot specify both target_image and connector_name"
803
+ "error", "Cannot specify both test_image and connector_name"
917
804
  )
918
- exit_with_error("Cannot specify both target_image and connector_name")
805
+ exit_with_error("Cannot specify both --test-image and --connector-name")
919
806
 
920
807
  repo_root_path = Path(repo_root) if repo_root else None
921
808
  built_image = _build_connector_image_from_source(
@@ -927,7 +814,7 @@ def regression_test(
927
814
  write_github_output("success", False)
928
815
  write_github_output("error", f"Failed to build image for {connector_name}")
929
816
  exit_with_error(f"Failed to build image for {connector_name}")
930
- resolved_target_image = built_image
817
+ resolved_test_image = built_image
931
818
 
932
819
  if connection_id:
933
820
  if config_path or catalog_path:
@@ -941,6 +828,27 @@ def regression_test(
941
828
 
942
829
  print_success(f"Fetching config/catalog from connection: {connection_id}")
943
830
  connection_data = fetch_connection_data(connection_id)
831
+
832
+ # Check if we should retrieve unmasked secrets
833
+ if should_use_secret_retriever():
834
+ print_success(
835
+ "USE_CONNECTION_SECRET_RETRIEVER enabled - enriching config with unmasked secrets..."
836
+ )
837
+ try:
838
+ connection_data = enrich_config_with_secrets(
839
+ connection_data,
840
+ retrieval_reason="Regression test with USE_CONNECTION_SECRET_RETRIEVER=true",
841
+ )
842
+ print_success("Successfully retrieved unmasked secrets from database")
843
+ except Exception as e:
844
+ print_error(f"Failed to retrieve unmasked secrets: {e}")
845
+ exit_with_error(
846
+ f"Failed to retrieve unmasked secrets: {e}\n"
847
+ f"Unset USE_CONNECTION_SECRET_RETRIEVER or verify that the "
848
+ f"{ENV_GCP_PROD_DB_ACCESS_CREDENTIALS} environment variable is set "
849
+ f"with valid database credentials and that the Cloud SQL Proxy is running."
850
+ )
851
+
944
852
  config_file, catalog_file = save_connection_data_to_files(
945
853
  connection_data, output_path / "connection_data"
946
854
  )
@@ -949,8 +857,16 @@ def regression_test(
949
857
  f"with {len(connection_data.stream_names)} streams"
950
858
  )
951
859
 
952
- # Auto-detect control_image from connection if not provided
953
- if not resolved_control_image and connection_data.connector_image:
860
+ # Auto-detect test/control image from connection if not provided
861
+ if not resolved_test_image and connection_data.connector_image:
862
+ resolved_test_image = connection_data.connector_image
863
+ print_success(f"Auto-detected test image: {resolved_test_image}")
864
+
865
+ if (
866
+ not skip_compare
867
+ and not resolved_control_image
868
+ and connection_data.connector_image
869
+ ):
954
870
  resolved_control_image = connection_data.connector_image
955
871
  print_success(f"Auto-detected control image: {resolved_control_image}")
956
872
  elif config_path:
@@ -987,24 +903,24 @@ def regression_test(
987
903
  config_file = None
988
904
  catalog_file = Path(catalog_path) if catalog_path else None
989
905
 
990
- # Auto-detect control_image from metadata.yaml if connector_name is provided
991
- if not resolved_control_image and connector_name:
906
+ # Auto-detect control_image from metadata.yaml if connector_name is provided (comparison mode only)
907
+ if not skip_compare and not resolved_control_image and connector_name:
992
908
  resolved_control_image = _fetch_control_image_from_metadata(connector_name)
993
909
  if resolved_control_image:
994
910
  print_success(
995
911
  f"Auto-detected control image from metadata.yaml: {resolved_control_image}"
996
912
  )
997
913
 
998
- # Validate that we have both images
999
- if not resolved_target_image:
914
+ # Validate that we have the required images
915
+ if not resolved_test_image:
1000
916
  write_github_output("success", False)
1001
- write_github_output("error", "No target image specified")
917
+ write_github_output("error", "No test image specified")
1002
918
  exit_with_error(
1003
- "You must provide one of the following: a target_image or a connector_name "
1004
- "to build the target image from source."
919
+ "You must provide one of the following: a test_image, a connector_name "
920
+ "to build the image from source, or a connection_id to auto-detect the image."
1005
921
  )
1006
922
 
1007
- if not resolved_control_image:
923
+ if not skip_compare and not resolved_control_image:
1008
924
  write_github_output("success", False)
1009
925
  write_github_output("error", "No control image specified")
1010
926
  exit_with_error(
@@ -1014,97 +930,144 @@ def regression_test(
1014
930
  )
1015
931
 
1016
932
  # Pull images if they weren't just built locally
1017
- # If connector_name was provided, we just built the target image locally
1018
- if not connector_name and not ensure_image_available(resolved_target_image):
933
+ # If connector_name was provided, we just built the test image locally
934
+ if not connector_name and not ensure_image_available(resolved_test_image):
1019
935
  write_github_output("success", False)
1020
- write_github_output("error", f"Failed to pull image: {resolved_target_image}")
1021
- exit_with_error(
1022
- f"Failed to pull target connector image: {resolved_target_image}"
1023
- )
1024
-
1025
- if not ensure_image_available(resolved_control_image):
936
+ write_github_output("error", f"Failed to pull image: {resolved_test_image}")
937
+ exit_with_error(f"Failed to pull test image: {resolved_test_image}")
938
+
939
+ if (
940
+ not skip_compare
941
+ and resolved_control_image
942
+ and not ensure_image_available(resolved_control_image)
943
+ ):
1026
944
  write_github_output("success", False)
1027
945
  write_github_output("error", f"Failed to pull image: {resolved_control_image}")
1028
946
  exit_with_error(
1029
947
  f"Failed to pull control connector image: {resolved_control_image}"
1030
948
  )
1031
949
 
1032
- target_output = output_path / "target"
1033
- control_output = output_path / "control"
1034
-
1035
- target_result = _run_with_optional_http_metrics(
1036
- connector_image=resolved_target_image,
1037
- command=cmd,
1038
- output_dir=target_output,
1039
- target_or_control=TargetOrControl.TARGET,
1040
- enable_http_metrics=enable_http_metrics,
1041
- config_path=config_file,
1042
- catalog_path=catalog_file,
1043
- state_path=state_file,
1044
- )
950
+ # Execute the appropriate mode
951
+ if skip_compare:
952
+ # Single-version mode: run only the connector image
953
+ result = _run_connector_command(
954
+ connector_image=resolved_test_image,
955
+ command=cmd,
956
+ output_dir=output_path,
957
+ target_or_control=TargetOrControl.TARGET,
958
+ config_path=config_file,
959
+ catalog_path=catalog_file,
960
+ state_path=state_file,
961
+ )
1045
962
 
1046
- control_result = _run_with_optional_http_metrics(
1047
- connector_image=resolved_control_image,
1048
- command=cmd,
1049
- output_dir=control_output,
1050
- target_or_control=TargetOrControl.CONTROL,
1051
- enable_http_metrics=enable_http_metrics,
1052
- config_path=config_file,
1053
- catalog_path=catalog_file,
1054
- state_path=state_file,
1055
- )
963
+ print_json(result)
1056
964
 
1057
- both_succeeded = target_result["success"] and control_result["success"]
1058
- regression_detected = target_result["success"] != control_result["success"]
965
+ write_github_outputs(
966
+ {
967
+ "success": result["success"],
968
+ "connector": resolved_test_image,
969
+ "command": command,
970
+ "exit_code": result["exit_code"],
971
+ }
972
+ )
1059
973
 
1060
- combined_result = {
1061
- "target": target_result,
1062
- "control": control_result,
1063
- "both_succeeded": both_succeeded,
1064
- "regression_detected": regression_detected,
1065
- }
974
+ write_test_summary(
975
+ connector_image=resolved_test_image,
976
+ test_type="regression-test",
977
+ success=result["success"],
978
+ results={
979
+ "command": command,
980
+ "exit_code": result["exit_code"],
981
+ "output_dir": output_dir,
982
+ },
983
+ )
1066
984
 
1067
- print_json(combined_result)
985
+ if result["success"]:
986
+ print_success(
987
+ f"Single-version regression test passed for {resolved_test_image}"
988
+ )
989
+ else:
990
+ exit_with_error(
991
+ f"Single-version regression test failed for {resolved_test_image}"
992
+ )
993
+ else:
994
+ # Comparison mode: run both target and control images
995
+ target_output = output_path / "target"
996
+ control_output = output_path / "control"
997
+
998
+ target_result = _run_with_optional_http_metrics(
999
+ connector_image=resolved_test_image,
1000
+ command=cmd,
1001
+ output_dir=target_output,
1002
+ target_or_control=TargetOrControl.TARGET,
1003
+ enable_http_metrics=enable_http_metrics,
1004
+ config_path=config_file,
1005
+ catalog_path=catalog_file,
1006
+ state_path=state_file,
1007
+ )
1008
+
1009
+ control_result = _run_with_optional_http_metrics(
1010
+ connector_image=resolved_control_image, # type: ignore[arg-type]
1011
+ command=cmd,
1012
+ output_dir=control_output,
1013
+ target_or_control=TargetOrControl.CONTROL,
1014
+ enable_http_metrics=enable_http_metrics,
1015
+ config_path=config_file,
1016
+ catalog_path=catalog_file,
1017
+ state_path=state_file,
1018
+ )
1019
+
1020
+ both_succeeded = target_result["success"] and control_result["success"]
1021
+ regression_detected = target_result["success"] != control_result["success"]
1068
1022
 
1069
- write_github_outputs(
1070
- {
1071
- "success": both_succeeded and not regression_detected,
1072
- "target_image": resolved_target_image,
1073
- "control_image": resolved_control_image,
1074
- "command": command,
1075
- "target_exit_code": target_result["exit_code"],
1076
- "control_exit_code": control_result["exit_code"],
1023
+ combined_result = {
1024
+ "target": target_result,
1025
+ "control": control_result,
1026
+ "both_succeeded": both_succeeded,
1077
1027
  "regression_detected": regression_detected,
1078
1028
  }
1079
- )
1080
-
1081
- write_json_output("regression_report", combined_result)
1082
1029
 
1083
- report_path = generate_regression_report(
1084
- target_image=resolved_target_image,
1085
- control_image=resolved_control_image,
1086
- command=command,
1087
- target_result=target_result,
1088
- control_result=control_result,
1089
- output_dir=output_path,
1090
- )
1091
- print_success(f"Generated regression report: {report_path}")
1030
+ print_json(combined_result)
1031
+
1032
+ write_github_outputs(
1033
+ {
1034
+ "success": both_succeeded and not regression_detected,
1035
+ "target_image": resolved_test_image,
1036
+ "control_image": resolved_control_image,
1037
+ "command": command,
1038
+ "target_exit_code": target_result["exit_code"],
1039
+ "control_exit_code": control_result["exit_code"],
1040
+ "regression_detected": regression_detected,
1041
+ }
1042
+ )
1092
1043
 
1093
- summary = get_report_summary(report_path)
1094
- write_github_summary(summary)
1044
+ write_json_output("regression_report", combined_result)
1095
1045
 
1096
- if regression_detected:
1097
- exit_with_error(
1098
- f"Regression detected between {resolved_target_image} and {resolved_control_image}"
1099
- )
1100
- elif both_succeeded:
1101
- print_success(
1102
- f"Regression test passed for {resolved_target_image} vs {resolved_control_image}"
1103
- )
1104
- else:
1105
- exit_with_error(
1106
- f"Both versions failed for {resolved_target_image} vs {resolved_control_image}"
1046
+ report_path = generate_regression_report(
1047
+ target_image=resolved_test_image,
1048
+ control_image=resolved_control_image, # type: ignore[arg-type]
1049
+ command=command,
1050
+ target_result=target_result,
1051
+ control_result=control_result,
1052
+ output_dir=output_path,
1107
1053
  )
1054
+ print_success(f"Generated regression report: {report_path}")
1055
+
1056
+ summary = get_report_summary(report_path)
1057
+ write_github_summary(summary)
1058
+
1059
+ if regression_detected:
1060
+ exit_with_error(
1061
+ f"Regression detected between {resolved_test_image} and {resolved_control_image}"
1062
+ )
1063
+ elif both_succeeded:
1064
+ print_success(
1065
+ f"Regression test passed for {resolved_test_image} vs {resolved_control_image}"
1066
+ )
1067
+ else:
1068
+ exit_with_error(
1069
+ f"Both versions failed for {resolved_test_image} vs {resolved_control_image}"
1070
+ )
1108
1071
 
1109
1072
 
1110
1073
  @connector_app.command(name="fetch-connection-config")