qontract-reconcile 0.10.0__py3-none-any.whl → 0.10.1.dev1203__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qontract_reconcile-0.10.1.dev1203.dist-info/METADATA +500 -0
- qontract_reconcile-0.10.1.dev1203.dist-info/RECORD +771 -0
- {qontract_reconcile-0.10.0.dist-info → qontract_reconcile-0.10.1.dev1203.dist-info}/WHEEL +1 -2
- {qontract_reconcile-0.10.0.dist-info → qontract_reconcile-0.10.1.dev1203.dist-info}/entry_points.txt +4 -2
- reconcile/acs_notifiers.py +126 -0
- reconcile/acs_policies.py +243 -0
- reconcile/acs_rbac.py +596 -0
- reconcile/aus/advanced_upgrade_service.py +621 -8
- reconcile/aus/aus_label_source.py +115 -0
- reconcile/aus/base.py +1053 -353
- reconcile/{utils → aus}/cluster_version_data.py +27 -12
- reconcile/aus/healthchecks.py +77 -0
- reconcile/aus/metrics.py +158 -0
- reconcile/aus/models.py +245 -5
- reconcile/aus/node_pool_spec.py +35 -0
- reconcile/aus/ocm_addons_upgrade_scheduler_org.py +225 -110
- reconcile/aus/ocm_upgrade_scheduler.py +76 -71
- reconcile/aus/ocm_upgrade_scheduler_org.py +81 -23
- reconcile/aus/version_gate_approver.py +204 -0
- reconcile/aus/version_gates/__init__.py +12 -0
- reconcile/aus/version_gates/handler.py +33 -0
- reconcile/aus/version_gates/ingress_gate_handler.py +32 -0
- reconcile/aus/version_gates/ocp_gate_handler.py +26 -0
- reconcile/aus/version_gates/sts_version_gate_handler.py +100 -0
- reconcile/aws_account_manager/README.md +5 -0
- reconcile/aws_account_manager/integration.py +373 -0
- reconcile/aws_account_manager/merge_request_manager.py +114 -0
- reconcile/aws_account_manager/metrics.py +39 -0
- reconcile/aws_account_manager/reconciler.py +403 -0
- reconcile/aws_account_manager/utils.py +41 -0
- reconcile/aws_ami_cleanup/integration.py +273 -0
- reconcile/aws_ami_share.py +18 -14
- reconcile/aws_cloudwatch_log_retention/integration.py +253 -0
- reconcile/aws_iam_keys.py +1 -1
- reconcile/aws_iam_password_reset.py +56 -20
- reconcile/aws_saml_idp/integration.py +204 -0
- reconcile/aws_saml_roles/integration.py +322 -0
- reconcile/aws_support_cases_sos.py +2 -2
- reconcile/aws_version_sync/integration.py +430 -0
- reconcile/aws_version_sync/merge_request_manager/merge_request.py +156 -0
- reconcile/aws_version_sync/merge_request_manager/merge_request_manager.py +160 -0
- reconcile/aws_version_sync/utils.py +64 -0
- reconcile/blackbox_exporter_endpoint_monitoring.py +2 -5
- reconcile/change_owners/README.md +34 -0
- reconcile/change_owners/approver.py +7 -9
- reconcile/change_owners/bundle.py +134 -9
- reconcile/change_owners/change_log_tracking.py +236 -0
- reconcile/change_owners/change_owners.py +204 -194
- reconcile/change_owners/change_types.py +183 -265
- reconcile/change_owners/changes.py +488 -0
- reconcile/change_owners/decision.py +120 -41
- reconcile/change_owners/diff.py +63 -92
- reconcile/change_owners/implicit_ownership.py +19 -16
- reconcile/change_owners/self_service_roles.py +158 -35
- reconcile/change_owners/tester.py +20 -18
- reconcile/checkpoint.py +4 -6
- reconcile/cli.py +1523 -242
- reconcile/closedbox_endpoint_monitoring_base.py +10 -17
- reconcile/cluster_auth_rhidp/integration.py +257 -0
- reconcile/cluster_deployment_mapper.py +2 -5
- reconcile/cna/assets/asset.py +4 -7
- reconcile/cna/assets/null.py +2 -5
- reconcile/cna/integration.py +2 -3
- reconcile/cna/state.py +6 -9
- reconcile/dashdotdb_base.py +31 -10
- reconcile/dashdotdb_cso.py +3 -6
- reconcile/dashdotdb_dora.py +530 -0
- reconcile/dashdotdb_dvo.py +10 -13
- reconcile/dashdotdb_slo.py +75 -19
- reconcile/database_access_manager.py +753 -0
- reconcile/deadmanssnitch.py +207 -0
- reconcile/dynatrace_token_provider/dependencies.py +69 -0
- reconcile/dynatrace_token_provider/integration.py +656 -0
- reconcile/dynatrace_token_provider/metrics.py +62 -0
- reconcile/dynatrace_token_provider/model.py +14 -0
- reconcile/dynatrace_token_provider/ocm.py +140 -0
- reconcile/dynatrace_token_provider/validate.py +48 -0
- reconcile/endpoints_discovery/integration.py +348 -0
- reconcile/endpoints_discovery/merge_request.py +96 -0
- reconcile/endpoints_discovery/merge_request_manager.py +178 -0
- reconcile/external_resources/aws.py +204 -0
- reconcile/external_resources/factories.py +163 -0
- reconcile/external_resources/integration.py +194 -0
- reconcile/external_resources/integration_secrets_sync.py +47 -0
- reconcile/external_resources/manager.py +405 -0
- reconcile/external_resources/meta.py +17 -0
- reconcile/external_resources/metrics.py +95 -0
- reconcile/external_resources/model.py +350 -0
- reconcile/external_resources/reconciler.py +265 -0
- reconcile/external_resources/secrets_sync.py +465 -0
- reconcile/external_resources/state.py +258 -0
- reconcile/gabi_authorized_users.py +19 -11
- reconcile/gcr_mirror.py +43 -34
- reconcile/github_org.py +4 -6
- reconcile/github_owners.py +1 -1
- reconcile/github_repo_invites.py +2 -5
- reconcile/gitlab_fork_compliance.py +14 -13
- reconcile/gitlab_housekeeping.py +185 -91
- reconcile/gitlab_labeler.py +15 -14
- reconcile/gitlab_members.py +126 -120
- reconcile/gitlab_owners.py +53 -66
- reconcile/gitlab_permissions.py +167 -6
- reconcile/glitchtip/README.md +150 -0
- reconcile/glitchtip/integration.py +99 -51
- reconcile/glitchtip/reconciler.py +99 -70
- reconcile/glitchtip_project_alerts/__init__.py +0 -0
- reconcile/glitchtip_project_alerts/integration.py +333 -0
- reconcile/glitchtip_project_dsn/integration.py +43 -43
- reconcile/gql_definitions/acs/__init__.py +0 -0
- reconcile/gql_definitions/acs/acs_instances.py +83 -0
- reconcile/gql_definitions/acs/acs_policies.py +239 -0
- reconcile/gql_definitions/acs/acs_rbac.py +111 -0
- reconcile/gql_definitions/advanced_upgrade_service/aus_clusters.py +46 -8
- reconcile/gql_definitions/advanced_upgrade_service/aus_organization.py +38 -8
- reconcile/gql_definitions/app_interface_metrics_exporter/__init__.py +0 -0
- reconcile/gql_definitions/app_interface_metrics_exporter/onboarding_status.py +61 -0
- reconcile/gql_definitions/aws_account_manager/__init__.py +0 -0
- reconcile/gql_definitions/aws_account_manager/aws_accounts.py +177 -0
- reconcile/gql_definitions/aws_ami_cleanup/__init__.py +0 -0
- reconcile/gql_definitions/aws_ami_cleanup/aws_accounts.py +161 -0
- reconcile/gql_definitions/aws_saml_idp/__init__.py +0 -0
- reconcile/gql_definitions/aws_saml_idp/aws_accounts.py +117 -0
- reconcile/gql_definitions/aws_saml_roles/__init__.py +0 -0
- reconcile/gql_definitions/aws_saml_roles/aws_accounts.py +117 -0
- reconcile/gql_definitions/aws_saml_roles/roles.py +97 -0
- reconcile/gql_definitions/aws_version_sync/__init__.py +0 -0
- reconcile/gql_definitions/aws_version_sync/clusters.py +83 -0
- reconcile/gql_definitions/aws_version_sync/namespaces.py +143 -0
- reconcile/gql_definitions/change_owners/queries/change_types.py +16 -29
- reconcile/gql_definitions/change_owners/queries/self_service_roles.py +45 -11
- reconcile/gql_definitions/cluster_auth_rhidp/__init__.py +0 -0
- reconcile/gql_definitions/cluster_auth_rhidp/clusters.py +128 -0
- reconcile/gql_definitions/cna/queries/cna_provisioners.py +6 -8
- reconcile/gql_definitions/cna/queries/cna_resources.py +3 -5
- reconcile/gql_definitions/common/alerting_services_settings.py +2 -2
- reconcile/gql_definitions/common/app_code_component_repos.py +9 -5
- reconcile/gql_definitions/{glitchtip/glitchtip_settings.py → common/app_interface_custom_messages.py} +14 -16
- reconcile/gql_definitions/common/app_interface_dms_settings.py +86 -0
- reconcile/gql_definitions/common/app_interface_repo_settings.py +2 -2
- reconcile/gql_definitions/common/app_interface_state_settings.py +3 -5
- reconcile/gql_definitions/common/app_interface_vault_settings.py +3 -5
- reconcile/gql_definitions/common/app_quay_repos_escalation_policies.py +120 -0
- reconcile/gql_definitions/common/apps.py +72 -0
- reconcile/gql_definitions/common/aws_vpc_requests.py +109 -0
- reconcile/gql_definitions/common/aws_vpcs.py +84 -0
- reconcile/gql_definitions/common/clusters.py +120 -254
- reconcile/gql_definitions/common/clusters_minimal.py +11 -35
- reconcile/gql_definitions/common/clusters_with_dms.py +72 -0
- reconcile/gql_definitions/common/clusters_with_peering.py +70 -98
- reconcile/gql_definitions/common/github_orgs.py +2 -2
- reconcile/gql_definitions/common/jira_settings.py +68 -0
- reconcile/gql_definitions/common/jiralert_settings.py +68 -0
- reconcile/gql_definitions/common/namespaces.py +74 -32
- reconcile/gql_definitions/common/namespaces_minimal.py +4 -10
- reconcile/gql_definitions/common/ocm_env_telemeter.py +95 -0
- reconcile/gql_definitions/common/ocm_environments.py +4 -2
- reconcile/gql_definitions/common/pagerduty_instances.py +5 -5
- reconcile/gql_definitions/common/pgp_reencryption_settings.py +5 -11
- reconcile/gql_definitions/common/pipeline_providers.py +45 -90
- reconcile/gql_definitions/common/quay_instances.py +64 -0
- reconcile/gql_definitions/common/quay_orgs.py +68 -0
- reconcile/gql_definitions/common/reserved_networks.py +94 -0
- reconcile/gql_definitions/common/saas_files.py +133 -95
- reconcile/gql_definitions/common/saas_target_namespaces.py +41 -26
- reconcile/gql_definitions/common/saasherder_settings.py +2 -2
- reconcile/gql_definitions/common/slack_workspaces.py +62 -0
- reconcile/gql_definitions/common/smtp_client_settings.py +2 -2
- reconcile/gql_definitions/common/state_aws_account.py +77 -0
- reconcile/gql_definitions/common/users.py +3 -2
- reconcile/gql_definitions/cost_report/__init__.py +0 -0
- reconcile/gql_definitions/cost_report/app_names.py +68 -0
- reconcile/gql_definitions/cost_report/cost_namespaces.py +86 -0
- reconcile/gql_definitions/cost_report/settings.py +77 -0
- reconcile/gql_definitions/dashdotdb_slo/slo_documents_query.py +42 -12
- reconcile/gql_definitions/dynatrace_token_provider/__init__.py +0 -0
- reconcile/gql_definitions/dynatrace_token_provider/dynatrace_bootstrap_tokens.py +79 -0
- reconcile/gql_definitions/dynatrace_token_provider/token_specs.py +84 -0
- reconcile/gql_definitions/endpoints_discovery/__init__.py +0 -0
- reconcile/gql_definitions/endpoints_discovery/namespaces.py +127 -0
- reconcile/gql_definitions/external_resources/__init__.py +0 -0
- reconcile/gql_definitions/external_resources/aws_accounts.py +73 -0
- reconcile/gql_definitions/external_resources/external_resources_modules.py +78 -0
- reconcile/gql_definitions/external_resources/external_resources_namespaces.py +1111 -0
- reconcile/gql_definitions/external_resources/external_resources_settings.py +98 -0
- reconcile/gql_definitions/fragments/aus_organization.py +34 -39
- reconcile/gql_definitions/fragments/aws_account_common.py +62 -0
- reconcile/gql_definitions/fragments/aws_account_managed.py +57 -0
- reconcile/gql_definitions/fragments/aws_account_sso.py +35 -0
- reconcile/gql_definitions/fragments/aws_infra_management_account.py +2 -2
- reconcile/gql_definitions/fragments/aws_vpc.py +47 -0
- reconcile/gql_definitions/fragments/aws_vpc_request.py +65 -0
- reconcile/gql_definitions/fragments/aws_vpc_request_subnet.py +29 -0
- reconcile/gql_definitions/fragments/deplopy_resources.py +7 -7
- reconcile/gql_definitions/fragments/disable.py +28 -0
- reconcile/gql_definitions/fragments/jumphost_common_fields.py +2 -2
- reconcile/gql_definitions/fragments/membership_source.py +47 -0
- reconcile/gql_definitions/fragments/minimal_ocm_organization.py +29 -0
- reconcile/gql_definitions/fragments/oc_connection_cluster.py +4 -9
- reconcile/gql_definitions/fragments/ocm_environment.py +5 -5
- reconcile/gql_definitions/fragments/pipeline_provider_retention.py +30 -0
- reconcile/gql_definitions/fragments/prometheus_instance.py +48 -0
- reconcile/gql_definitions/fragments/resource_limits_requirements.py +29 -0
- reconcile/gql_definitions/fragments/{resource_requirements.py → resource_requests_requirements.py} +3 -3
- reconcile/gql_definitions/fragments/resource_values.py +2 -2
- reconcile/gql_definitions/fragments/saas_target_namespace.py +55 -12
- reconcile/gql_definitions/fragments/serviceaccount_token.py +38 -0
- reconcile/gql_definitions/fragments/terraform_state.py +36 -0
- reconcile/gql_definitions/fragments/upgrade_policy.py +5 -3
- reconcile/gql_definitions/fragments/user.py +3 -2
- reconcile/gql_definitions/fragments/vault_secret.py +2 -2
- reconcile/gql_definitions/gitlab_members/gitlab_instances.py +6 -2
- reconcile/gql_definitions/gitlab_members/permissions.py +3 -5
- reconcile/gql_definitions/glitchtip/glitchtip_instance.py +16 -2
- reconcile/gql_definitions/glitchtip/glitchtip_project.py +22 -23
- reconcile/gql_definitions/glitchtip_project_alerts/__init__.py +0 -0
- reconcile/gql_definitions/glitchtip_project_alerts/glitchtip_project.py +173 -0
- reconcile/gql_definitions/integrations/integrations.py +62 -45
- reconcile/gql_definitions/introspection.json +51176 -0
- reconcile/gql_definitions/jenkins_configs/jenkins_configs.py +13 -5
- reconcile/gql_definitions/jenkins_configs/jenkins_instances.py +79 -0
- reconcile/gql_definitions/jira/__init__.py +0 -0
- reconcile/gql_definitions/jira/jira_servers.py +80 -0
- reconcile/gql_definitions/jira_permissions_validator/__init__.py +0 -0
- reconcile/gql_definitions/jira_permissions_validator/jira_boards_for_permissions_validator.py +131 -0
- reconcile/gql_definitions/jumphosts/jumphosts.py +3 -5
- reconcile/gql_definitions/ldap_groups/__init__.py +0 -0
- reconcile/gql_definitions/ldap_groups/roles.py +111 -0
- reconcile/gql_definitions/ldap_groups/settings.py +79 -0
- reconcile/gql_definitions/maintenance/__init__.py +0 -0
- reconcile/gql_definitions/maintenance/maintenances.py +101 -0
- reconcile/gql_definitions/membershipsources/__init__.py +0 -0
- reconcile/gql_definitions/membershipsources/roles.py +112 -0
- reconcile/gql_definitions/ocm_labels/__init__.py +0 -0
- reconcile/gql_definitions/ocm_labels/clusters.py +112 -0
- reconcile/gql_definitions/ocm_labels/organizations.py +78 -0
- reconcile/gql_definitions/ocm_subscription_labels/__init__.py +0 -0
- reconcile/gql_definitions/openshift_cluster_bots/__init__.py +0 -0
- reconcile/gql_definitions/openshift_cluster_bots/clusters.py +126 -0
- reconcile/gql_definitions/openshift_groups/managed_groups.py +2 -2
- reconcile/gql_definitions/openshift_groups/managed_roles.py +3 -2
- reconcile/gql_definitions/openshift_serviceaccount_tokens/__init__.py +0 -0
- reconcile/gql_definitions/openshift_serviceaccount_tokens/tokens.py +132 -0
- reconcile/gql_definitions/quay_membership/quay_membership.py +3 -5
- reconcile/gql_definitions/rhidp/__init__.py +0 -0
- reconcile/gql_definitions/rhidp/organizations.py +96 -0
- reconcile/gql_definitions/service_dependencies/jenkins_instance_fragment.py +2 -2
- reconcile/gql_definitions/service_dependencies/service_dependencies.py +9 -31
- reconcile/gql_definitions/sharding/aws_accounts.py +2 -2
- reconcile/gql_definitions/sharding/ocm_organization.py +63 -0
- reconcile/gql_definitions/skupper_network/site_controller_template.py +2 -2
- reconcile/gql_definitions/skupper_network/skupper_networks.py +12 -38
- reconcile/gql_definitions/slack_usergroups/clusters.py +2 -2
- reconcile/gql_definitions/slack_usergroups/permissions.py +8 -15
- reconcile/gql_definitions/slack_usergroups/users.py +3 -2
- reconcile/gql_definitions/slo_documents/__init__.py +0 -0
- reconcile/gql_definitions/slo_documents/slo_documents.py +142 -0
- reconcile/gql_definitions/status_board/__init__.py +0 -0
- reconcile/gql_definitions/status_board/status_board.py +163 -0
- reconcile/gql_definitions/statuspage/statuspages.py +56 -7
- reconcile/gql_definitions/templating/__init__.py +0 -0
- reconcile/gql_definitions/templating/template_collection.py +130 -0
- reconcile/gql_definitions/templating/templates.py +108 -0
- reconcile/gql_definitions/terraform_cloudflare_dns/app_interface_cloudflare_dns_settings.py +4 -8
- reconcile/gql_definitions/terraform_cloudflare_dns/terraform_cloudflare_zones.py +8 -8
- reconcile/gql_definitions/terraform_cloudflare_resources/terraform_cloudflare_accounts.py +6 -8
- reconcile/gql_definitions/terraform_cloudflare_resources/terraform_cloudflare_resources.py +45 -56
- reconcile/gql_definitions/terraform_cloudflare_users/app_interface_setting_cloudflare_and_vault.py +4 -8
- reconcile/gql_definitions/terraform_cloudflare_users/terraform_cloudflare_roles.py +4 -8
- reconcile/gql_definitions/terraform_init/__init__.py +0 -0
- reconcile/gql_definitions/terraform_init/aws_accounts.py +93 -0
- reconcile/gql_definitions/terraform_repo/__init__.py +0 -0
- reconcile/gql_definitions/terraform_repo/terraform_repo.py +141 -0
- reconcile/gql_definitions/terraform_resources/database_access_manager.py +158 -0
- reconcile/gql_definitions/terraform_resources/terraform_resources_namespaces.py +153 -162
- reconcile/gql_definitions/terraform_tgw_attachments/__init__.py +0 -0
- reconcile/gql_definitions/terraform_tgw_attachments/aws_accounts.py +119 -0
- reconcile/gql_definitions/unleash_feature_toggles/__init__.py +0 -0
- reconcile/gql_definitions/unleash_feature_toggles/feature_toggles.py +113 -0
- reconcile/gql_definitions/vault_instances/vault_instances.py +17 -50
- reconcile/gql_definitions/vault_policies/vault_policies.py +2 -2
- reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator.py +49 -12
- reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator_peered_cluster_fragment.py +7 -2
- reconcile/integrations_manager.py +25 -13
- reconcile/jenkins/types.py +5 -1
- reconcile/jenkins_base.py +36 -0
- reconcile/jenkins_job_builder.py +10 -48
- reconcile/jenkins_job_builds_cleaner.py +40 -25
- reconcile/jenkins_job_cleaner.py +1 -3
- reconcile/jenkins_roles.py +22 -26
- reconcile/jenkins_webhooks.py +9 -6
- reconcile/jenkins_worker_fleets.py +11 -6
- reconcile/jira_permissions_validator.py +340 -0
- reconcile/jira_watcher.py +3 -5
- reconcile/ldap_groups/__init__.py +0 -0
- reconcile/ldap_groups/integration.py +279 -0
- reconcile/ldap_users.py +3 -0
- reconcile/ocm/types.py +39 -59
- reconcile/ocm_additional_routers.py +0 -1
- reconcile/ocm_addons_upgrade_tests_trigger.py +10 -15
- reconcile/ocm_aws_infrastructure_access.py +30 -32
- reconcile/ocm_clusters.py +217 -130
- reconcile/ocm_external_configuration_labels.py +15 -0
- reconcile/ocm_github_idp.py +1 -1
- reconcile/ocm_groups.py +25 -5
- reconcile/ocm_internal_notifications/__init__.py +0 -0
- reconcile/ocm_internal_notifications/integration.py +119 -0
- reconcile/ocm_labels/__init__.py +0 -0
- reconcile/ocm_labels/integration.py +409 -0
- reconcile/ocm_machine_pools.py +517 -108
- reconcile/ocm_upgrade_scheduler_org_updater.py +15 -11
- reconcile/openshift_base.py +609 -207
- reconcile/openshift_cluster_bots.py +344 -0
- reconcile/openshift_clusterrolebindings.py +15 -15
- reconcile/openshift_groups.py +42 -45
- reconcile/openshift_limitranges.py +1 -0
- reconcile/openshift_namespace_labels.py +22 -28
- reconcile/openshift_namespaces.py +22 -22
- reconcile/openshift_network_policies.py +4 -8
- reconcile/openshift_prometheus_rules.py +43 -0
- reconcile/openshift_resourcequotas.py +2 -16
- reconcile/openshift_resources.py +12 -10
- reconcile/openshift_resources_base.py +304 -328
- reconcile/openshift_rolebindings.py +18 -20
- reconcile/openshift_saas_deploy.py +105 -21
- reconcile/openshift_saas_deploy_change_tester.py +30 -35
- reconcile/openshift_saas_deploy_trigger_base.py +39 -36
- reconcile/openshift_saas_deploy_trigger_cleaner.py +41 -27
- reconcile/openshift_saas_deploy_trigger_configs.py +1 -2
- reconcile/openshift_saas_deploy_trigger_images.py +1 -2
- reconcile/openshift_saas_deploy_trigger_moving_commits.py +1 -2
- reconcile/openshift_saas_deploy_trigger_upstream_jobs.py +1 -2
- reconcile/openshift_serviceaccount_tokens.py +138 -74
- reconcile/openshift_tekton_resources.py +89 -24
- reconcile/openshift_upgrade_watcher.py +110 -62
- reconcile/openshift_users.py +16 -15
- reconcile/openshift_vault_secrets.py +11 -6
- reconcile/oum/__init__.py +0 -0
- reconcile/oum/base.py +387 -0
- reconcile/oum/labelset.py +55 -0
- reconcile/oum/metrics.py +71 -0
- reconcile/oum/models.py +69 -0
- reconcile/oum/providers.py +59 -0
- reconcile/oum/standalone.py +196 -0
- reconcile/prometheus_rules_tester/integration.py +31 -23
- reconcile/quay_base.py +4 -1
- reconcile/quay_membership.py +1 -2
- reconcile/quay_mirror.py +111 -61
- reconcile/quay_mirror_org.py +34 -21
- reconcile/quay_permissions.py +7 -3
- reconcile/quay_repos.py +24 -32
- reconcile/queries.py +263 -198
- reconcile/query_validator.py +3 -5
- reconcile/resource_scraper.py +3 -4
- reconcile/{template_tester.py → resource_template_tester.py} +3 -3
- reconcile/rhidp/__init__.py +0 -0
- reconcile/rhidp/common.py +214 -0
- reconcile/rhidp/metrics.py +20 -0
- reconcile/rhidp/ocm_oidc_idp/__init__.py +0 -0
- reconcile/rhidp/ocm_oidc_idp/base.py +221 -0
- reconcile/rhidp/ocm_oidc_idp/integration.py +56 -0
- reconcile/rhidp/ocm_oidc_idp/metrics.py +22 -0
- reconcile/rhidp/sso_client/__init__.py +0 -0
- reconcile/rhidp/sso_client/base.py +266 -0
- reconcile/rhidp/sso_client/integration.py +60 -0
- reconcile/rhidp/sso_client/metrics.py +39 -0
- reconcile/run_integration.py +293 -0
- reconcile/saas_auto_promotions_manager/integration.py +69 -24
- reconcile/saas_auto_promotions_manager/merge_request_manager/batcher.py +208 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/desired_state.py +28 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/merge_request.py +3 -4
- reconcile/saas_auto_promotions_manager/merge_request_manager/merge_request_manager_v2.py +172 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/metrics.py +42 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/mr_parser.py +226 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/open_merge_requests.py +23 -0
- reconcile/saas_auto_promotions_manager/merge_request_manager/renderer.py +108 -32
- reconcile/saas_auto_promotions_manager/meta.py +4 -0
- reconcile/saas_auto_promotions_manager/publisher.py +32 -4
- reconcile/saas_auto_promotions_manager/s3_exporter.py +77 -0
- reconcile/saas_auto_promotions_manager/subscriber.py +110 -23
- reconcile/saas_auto_promotions_manager/utils/saas_files_inventory.py +48 -41
- reconcile/saas_file_validator.py +16 -6
- reconcile/sendgrid_teammates.py +27 -12
- reconcile/service_dependencies.py +0 -3
- reconcile/signalfx_endpoint_monitoring.py +2 -5
- reconcile/skupper_network/integration.py +10 -11
- reconcile/skupper_network/models.py +3 -5
- reconcile/skupper_network/reconciler.py +28 -35
- reconcile/skupper_network/site_controller.py +8 -8
- reconcile/slack_base.py +4 -7
- reconcile/slack_usergroups.py +249 -171
- reconcile/sql_query.py +324 -171
- reconcile/status.py +0 -1
- reconcile/status_board.py +275 -0
- reconcile/statuspage/__init__.py +0 -5
- reconcile/statuspage/atlassian.py +219 -80
- reconcile/statuspage/integration.py +9 -97
- reconcile/statuspage/integrations/__init__.py +0 -0
- reconcile/statuspage/integrations/components.py +77 -0
- reconcile/statuspage/integrations/maintenances.py +111 -0
- reconcile/statuspage/page.py +107 -72
- reconcile/statuspage/state.py +6 -11
- reconcile/statuspage/status.py +8 -12
- reconcile/templates/rosa-classic-cluster-creation.sh.j2 +60 -0
- reconcile/templates/rosa-hcp-cluster-creation.sh.j2 +61 -0
- reconcile/templating/__init__.py +0 -0
- reconcile/templating/lib/__init__.py +0 -0
- reconcile/templating/lib/merge_request_manager.py +180 -0
- reconcile/templating/lib/model.py +20 -0
- reconcile/templating/lib/rendering.py +191 -0
- reconcile/templating/renderer.py +410 -0
- reconcile/templating/validator.py +153 -0
- reconcile/terraform_aws_route53.py +13 -10
- reconcile/terraform_cloudflare_dns.py +92 -122
- reconcile/terraform_cloudflare_resources.py +15 -13
- reconcile/terraform_cloudflare_users.py +27 -27
- reconcile/terraform_init/__init__.py +0 -0
- reconcile/terraform_init/integration.py +165 -0
- reconcile/terraform_init/merge_request.py +57 -0
- reconcile/terraform_init/merge_request_manager.py +102 -0
- reconcile/terraform_repo.py +403 -0
- reconcile/terraform_resources.py +266 -168
- reconcile/terraform_tgw_attachments.py +417 -167
- reconcile/terraform_users.py +40 -17
- reconcile/terraform_vpc_peerings.py +310 -142
- reconcile/terraform_vpc_resources/__init__.py +0 -0
- reconcile/terraform_vpc_resources/integration.py +220 -0
- reconcile/terraform_vpc_resources/merge_request.py +57 -0
- reconcile/terraform_vpc_resources/merge_request_manager.py +107 -0
- reconcile/typed_queries/alerting_services_settings.py +1 -2
- reconcile/typed_queries/app_interface_custom_messages.py +24 -0
- reconcile/typed_queries/app_interface_deadmanssnitch_settings.py +17 -0
- reconcile/typed_queries/app_interface_metrics_exporter/__init__.py +0 -0
- reconcile/typed_queries/app_interface_metrics_exporter/onboarding_status.py +13 -0
- reconcile/typed_queries/app_interface_repo_url.py +1 -2
- reconcile/typed_queries/app_interface_state_settings.py +1 -3
- reconcile/typed_queries/app_interface_vault_settings.py +1 -2
- reconcile/typed_queries/app_quay_repos_escalation_policies.py +14 -0
- reconcile/typed_queries/apps.py +11 -0
- reconcile/typed_queries/aws_vpc_requests.py +9 -0
- reconcile/typed_queries/aws_vpcs.py +12 -0
- reconcile/typed_queries/cloudflare.py +10 -0
- reconcile/typed_queries/clusters.py +7 -5
- reconcile/typed_queries/clusters_minimal.py +6 -5
- reconcile/typed_queries/clusters_with_dms.py +16 -0
- reconcile/typed_queries/cost_report/__init__.py +0 -0
- reconcile/typed_queries/cost_report/app_names.py +22 -0
- reconcile/typed_queries/cost_report/cost_namespaces.py +43 -0
- reconcile/typed_queries/cost_report/settings.py +15 -0
- reconcile/typed_queries/dynatrace.py +10 -0
- reconcile/typed_queries/dynatrace_environments.py +14 -0
- reconcile/typed_queries/dynatrace_token_provider_token_specs.py +14 -0
- reconcile/typed_queries/external_resources.py +46 -0
- reconcile/typed_queries/get_state_aws_account.py +20 -0
- reconcile/typed_queries/glitchtip.py +10 -0
- reconcile/typed_queries/jenkins.py +25 -0
- reconcile/typed_queries/jira.py +7 -0
- reconcile/typed_queries/jira_settings.py +16 -0
- reconcile/typed_queries/jiralert_settings.py +22 -0
- reconcile/typed_queries/ocm.py +8 -0
- reconcile/typed_queries/pagerduty_instances.py +2 -7
- reconcile/typed_queries/quay.py +23 -0
- reconcile/typed_queries/repos.py +20 -8
- reconcile/typed_queries/reserved_networks.py +12 -0
- reconcile/typed_queries/saas_files.py +221 -167
- reconcile/typed_queries/slack.py +7 -0
- reconcile/typed_queries/slo_documents.py +12 -0
- reconcile/typed_queries/status_board.py +58 -0
- reconcile/typed_queries/tekton_pipeline_providers.py +1 -2
- reconcile/typed_queries/terraform_namespaces.py +1 -2
- reconcile/typed_queries/terraform_tgw_attachments/__init__.py +0 -0
- reconcile/typed_queries/terraform_tgw_attachments/aws_accounts.py +16 -0
- reconcile/typed_queries/unleash.py +10 -0
- reconcile/typed_queries/users.py +11 -0
- reconcile/typed_queries/vault.py +10 -0
- reconcile/unleash_feature_toggles/__init__.py +0 -0
- reconcile/unleash_feature_toggles/integration.py +287 -0
- reconcile/utils/acs/__init__.py +0 -0
- reconcile/utils/acs/base.py +81 -0
- reconcile/utils/acs/notifiers.py +143 -0
- reconcile/utils/acs/policies.py +163 -0
- reconcile/utils/acs/rbac.py +277 -0
- reconcile/utils/aggregated_list.py +11 -9
- reconcile/utils/amtool.py +6 -4
- reconcile/utils/aws_api.py +279 -66
- reconcile/utils/aws_api_typed/__init__.py +0 -0
- reconcile/utils/aws_api_typed/account.py +23 -0
- reconcile/utils/aws_api_typed/api.py +273 -0
- reconcile/utils/aws_api_typed/dynamodb.py +16 -0
- reconcile/utils/aws_api_typed/iam.py +67 -0
- reconcile/utils/aws_api_typed/organization.py +152 -0
- reconcile/utils/aws_api_typed/s3.py +26 -0
- reconcile/utils/aws_api_typed/service_quotas.py +79 -0
- reconcile/utils/aws_api_typed/sts.py +36 -0
- reconcile/utils/aws_api_typed/support.py +79 -0
- reconcile/utils/aws_helper.py +42 -3
- reconcile/utils/batches.py +11 -0
- reconcile/utils/binary.py +7 -9
- reconcile/utils/cloud_resource_best_practice/__init__.py +0 -0
- reconcile/utils/cloud_resource_best_practice/aws_rds.py +66 -0
- reconcile/utils/clusterhealth/__init__.py +0 -0
- reconcile/utils/clusterhealth/providerbase.py +39 -0
- reconcile/utils/clusterhealth/telemeter.py +39 -0
- reconcile/utils/config.py +3 -4
- reconcile/utils/deadmanssnitch_api.py +86 -0
- reconcile/utils/differ.py +205 -0
- reconcile/utils/disabled_integrations.py +4 -6
- reconcile/utils/dynatrace/__init__.py +0 -0
- reconcile/utils/dynatrace/client.py +93 -0
- reconcile/utils/early_exit_cache.py +289 -0
- reconcile/utils/elasticsearch_exceptions.py +5 -0
- reconcile/utils/environ.py +2 -2
- reconcile/utils/exceptions.py +4 -0
- reconcile/utils/expiration.py +4 -8
- reconcile/utils/extended_early_exit.py +210 -0
- reconcile/utils/external_resource_spec.py +34 -12
- reconcile/utils/external_resources.py +48 -20
- reconcile/utils/filtering.py +16 -0
- reconcile/utils/git.py +49 -16
- reconcile/utils/github_api.py +10 -9
- reconcile/utils/gitlab_api.py +333 -190
- reconcile/utils/glitchtip/client.py +97 -100
- reconcile/utils/glitchtip/models.py +89 -11
- reconcile/utils/gql.py +157 -58
- reconcile/utils/grouping.py +17 -0
- reconcile/utils/helm.py +89 -18
- reconcile/utils/helpers.py +51 -0
- reconcile/utils/imap_client.py +5 -6
- reconcile/utils/internal_groups/__init__.py +0 -0
- reconcile/utils/internal_groups/client.py +160 -0
- reconcile/utils/internal_groups/models.py +71 -0
- reconcile/utils/jenkins_api.py +10 -34
- reconcile/utils/jinja2/__init__.py +0 -0
- reconcile/utils/{jinja2_ext.py → jinja2/extensions.py} +6 -4
- reconcile/utils/jinja2/filters.py +142 -0
- reconcile/utils/jinja2/utils.py +278 -0
- reconcile/utils/jira_client.py +165 -8
- reconcile/utils/jjb_client.py +47 -35
- reconcile/utils/jobcontroller/__init__.py +0 -0
- reconcile/utils/jobcontroller/controller.py +413 -0
- reconcile/utils/jobcontroller/models.py +195 -0
- reconcile/utils/jsonpath.py +4 -5
- reconcile/utils/jump_host.py +13 -12
- reconcile/utils/keycloak.py +106 -0
- reconcile/utils/ldap_client.py +35 -6
- reconcile/utils/lean_terraform_client.py +115 -6
- reconcile/utils/membershipsources/__init__.py +0 -0
- reconcile/utils/membershipsources/app_interface_resolver.py +60 -0
- reconcile/utils/membershipsources/models.py +91 -0
- reconcile/utils/membershipsources/resolver.py +110 -0
- reconcile/utils/merge_request_manager/__init__.py +0 -0
- reconcile/utils/merge_request_manager/merge_request_manager.py +99 -0
- reconcile/utils/merge_request_manager/parser.py +67 -0
- reconcile/utils/metrics.py +511 -1
- reconcile/utils/models.py +123 -0
- reconcile/utils/mr/README.md +198 -0
- reconcile/utils/mr/__init__.py +14 -10
- reconcile/utils/mr/app_interface_reporter.py +2 -2
- reconcile/utils/mr/aws_access.py +4 -4
- reconcile/utils/mr/base.py +51 -31
- reconcile/utils/mr/clusters_updates.py +10 -7
- reconcile/utils/mr/glitchtip_access_reporter.py +2 -4
- reconcile/utils/mr/labels.py +14 -1
- reconcile/utils/mr/notificator.py +1 -3
- reconcile/utils/mr/ocm_update_recommended_version.py +1 -2
- reconcile/utils/mr/ocm_upgrade_scheduler_org_updates.py +7 -3
- reconcile/utils/mr/promote_qontract.py +203 -0
- reconcile/utils/mr/user_maintenance.py +24 -4
- reconcile/utils/oauth2_backend_application_session.py +132 -0
- reconcile/utils/oc.py +194 -170
- reconcile/utils/oc_connection_parameters.py +40 -51
- reconcile/utils/oc_filters.py +11 -13
- reconcile/utils/oc_map.py +14 -35
- reconcile/utils/ocm/__init__.py +30 -1
- reconcile/utils/ocm/addons.py +228 -0
- reconcile/utils/ocm/base.py +618 -5
- reconcile/utils/ocm/cluster_groups.py +5 -56
- reconcile/utils/ocm/clusters.py +111 -99
- reconcile/utils/ocm/identity_providers.py +66 -0
- reconcile/utils/ocm/label_sources.py +75 -0
- reconcile/utils/ocm/labels.py +139 -54
- reconcile/utils/ocm/manifests.py +39 -0
- reconcile/utils/ocm/ocm.py +182 -928
- reconcile/utils/ocm/products.py +758 -0
- reconcile/utils/ocm/search_filters.py +20 -28
- reconcile/utils/ocm/service_log.py +32 -79
- reconcile/utils/ocm/sre_capability_labels.py +51 -0
- reconcile/utils/ocm/status_board.py +66 -0
- reconcile/utils/ocm/subscriptions.py +49 -59
- reconcile/utils/ocm/syncsets.py +39 -0
- reconcile/utils/ocm/upgrades.py +181 -0
- reconcile/utils/ocm_base_client.py +71 -36
- reconcile/utils/openshift_resource.py +113 -67
- reconcile/utils/output.py +18 -11
- reconcile/utils/pagerduty_api.py +16 -10
- reconcile/utils/parse_dhms_duration.py +13 -1
- reconcile/utils/prometheus.py +123 -0
- reconcile/utils/promotion_state.py +56 -19
- reconcile/utils/promtool.py +5 -8
- reconcile/utils/quay_api.py +13 -25
- reconcile/utils/raw_github_api.py +3 -5
- reconcile/utils/repo_owners.py +2 -8
- reconcile/utils/rest_api_base.py +126 -0
- reconcile/utils/rosa/__init__.py +0 -0
- reconcile/utils/rosa/rosa_cli.py +310 -0
- reconcile/utils/rosa/session.py +201 -0
- reconcile/utils/ruamel.py +16 -0
- reconcile/utils/runtime/__init__.py +0 -1
- reconcile/utils/runtime/desired_state_diff.py +9 -20
- reconcile/utils/runtime/environment.py +33 -8
- reconcile/utils/runtime/integration.py +28 -12
- reconcile/utils/runtime/meta.py +1 -3
- reconcile/utils/runtime/runner.py +8 -11
- reconcile/utils/runtime/sharding.py +93 -36
- reconcile/utils/saasherder/__init__.py +1 -1
- reconcile/utils/saasherder/interfaces.py +143 -138
- reconcile/utils/saasherder/models.py +201 -43
- reconcile/utils/saasherder/saasherder.py +508 -378
- reconcile/utils/secret_reader.py +22 -27
- reconcile/utils/semver_helper.py +15 -1
- reconcile/utils/slack_api.py +124 -36
- reconcile/utils/smtp_client.py +1 -2
- reconcile/utils/sqs_gateway.py +10 -6
- reconcile/utils/state.py +276 -127
- reconcile/utils/terraform/config_client.py +6 -7
- reconcile/utils/terraform_client.py +284 -125
- reconcile/utils/terrascript/cloudflare_client.py +38 -17
- reconcile/utils/terrascript/cloudflare_resources.py +67 -18
- reconcile/utils/terrascript/models.py +2 -3
- reconcile/utils/terrascript/resources.py +1 -2
- reconcile/utils/terrascript_aws_client.py +1292 -540
- reconcile/utils/three_way_diff_strategy.py +157 -0
- reconcile/utils/unleash/__init__.py +11 -0
- reconcile/utils/{unleash.py → unleash/client.py} +35 -29
- reconcile/utils/unleash/server.py +145 -0
- reconcile/utils/vault.py +42 -32
- reconcile/utils/vaultsecretref.py +2 -4
- reconcile/utils/vcs.py +250 -0
- reconcile/vault_replication.py +38 -31
- reconcile/vpc_peerings_validator.py +82 -13
- tools/app_interface_metrics_exporter.py +70 -0
- tools/app_interface_reporter.py +44 -157
- tools/cli_commands/container_images_report.py +154 -0
- tools/cli_commands/cost_report/__init__.py +0 -0
- tools/cli_commands/cost_report/aws.py +137 -0
- tools/cli_commands/cost_report/cost_management_api.py +155 -0
- tools/cli_commands/cost_report/model.py +49 -0
- tools/cli_commands/cost_report/openshift.py +166 -0
- tools/cli_commands/cost_report/openshift_cost_optimization.py +187 -0
- tools/cli_commands/cost_report/response.py +124 -0
- tools/cli_commands/cost_report/util.py +72 -0
- tools/cli_commands/cost_report/view.py +524 -0
- tools/cli_commands/erv2.py +620 -0
- tools/cli_commands/gpg_encrypt.py +5 -8
- tools/cli_commands/systems_and_tools.py +489 -0
- tools/glitchtip_access_revalidation.py +1 -1
- tools/qontract_cli.py +2301 -673
- tools/saas_metrics_exporter/__init__.py +0 -0
- tools/saas_metrics_exporter/commit_distance/__init__.py +0 -0
- tools/saas_metrics_exporter/commit_distance/channel.py +63 -0
- tools/saas_metrics_exporter/commit_distance/commit_distance.py +103 -0
- tools/saas_metrics_exporter/commit_distance/metrics.py +19 -0
- tools/saas_metrics_exporter/main.py +99 -0
- tools/saas_promotion_state/__init__.py +0 -0
- tools/saas_promotion_state/saas_promotion_state.py +105 -0
- tools/sd_app_sre_alert_report.py +145 -0
- tools/template_validation.py +107 -0
- e2e_tests/cli.py +0 -83
- e2e_tests/create_namespace.py +0 -43
- e2e_tests/dedicated_admin_rolebindings.py +0 -44
- e2e_tests/dedicated_admin_test_base.py +0 -39
- e2e_tests/default_network_policies.py +0 -47
- e2e_tests/default_project_labels.py +0 -52
- e2e_tests/network_policy_test_base.py +0 -17
- e2e_tests/test_base.py +0 -56
- qontract_reconcile-0.10.0.dist-info/LICENSE +0 -201
- qontract_reconcile-0.10.0.dist-info/METADATA +0 -63
- qontract_reconcile-0.10.0.dist-info/RECORD +0 -586
- qontract_reconcile-0.10.0.dist-info/top_level.txt +0 -4
- reconcile/ecr_mirror.py +0 -152
- reconcile/github_scanner.py +0 -74
- reconcile/gitlab_integrations.py +0 -63
- reconcile/gql_definitions/ocm_oidc_idp/clusters.py +0 -195
- reconcile/gql_definitions/ocp_release_mirror/ocp_release_mirror.py +0 -287
- reconcile/integrations_validator.py +0 -18
- reconcile/jenkins_plugins.py +0 -129
- reconcile/kafka_clusters.py +0 -208
- reconcile/ocm_cluster_admin.py +0 -42
- reconcile/ocm_oidc_idp.py +0 -198
- reconcile/ocp_release_mirror.py +0 -373
- reconcile/prometheus_rules_tester_old.py +0 -436
- reconcile/saas_auto_promotions_manager/merge_request_manager/merge_request_manager.py +0 -279
- reconcile/saas_auto_promotions_manager/utils/vcs.py +0 -141
- reconcile/sentry_config.py +0 -613
- reconcile/sentry_helper.py +0 -69
- reconcile/test/conftest.py +0 -187
- reconcile/test/fixtures.py +0 -24
- reconcile/test/saas_auto_promotions_manager/conftest.py +0 -69
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/merge_request_manager/conftest.py +0 -110
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/merge_request_manager/data_keys.py +0 -10
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/merge_request_manager/test_housekeeping.py +0 -200
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/merge_request_manager/test_merge_request_manager.py +0 -151
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/conftest.py +0 -63
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/data_keys.py +0 -4
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/test_content_multiple_namespaces.py +0 -46
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/test_content_single_namespace.py +0 -94
- reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/test_content_single_target.py +0 -44
- reconcile/test/saas_auto_promotions_manager/subscriber/conftest.py +0 -74
- reconcile/test/saas_auto_promotions_manager/subscriber/data_keys.py +0 -11
- reconcile/test/saas_auto_promotions_manager/subscriber/test_content_hash.py +0 -155
- reconcile/test/saas_auto_promotions_manager/subscriber/test_diff.py +0 -173
- reconcile/test/saas_auto_promotions_manager/subscriber/test_multiple_channels_config_hash.py +0 -226
- reconcile/test/saas_auto_promotions_manager/subscriber/test_multiple_channels_moving_ref.py +0 -224
- reconcile/test/saas_auto_promotions_manager/subscriber/test_single_channel_with_single_publisher.py +0 -350
- reconcile/test/saas_auto_promotions_manager/test_integration_test.py +0 -129
- reconcile/test/saas_auto_promotions_manager/utils/saas_files_inventory/test_multiple_publishers_for_single_channel.py +0 -70
- reconcile/test/saas_auto_promotions_manager/utils/saas_files_inventory/test_saas_files_use_target_config_hash.py +0 -63
- reconcile/test/saas_auto_promotions_manager/utils/saas_files_inventory/test_saas_files_with_auto_promote.py +0 -74
- reconcile/test/saas_auto_promotions_manager/utils/saas_files_inventory/test_saas_files_without_auto_promote.py +0 -65
- reconcile/test/test_aggregated_list.py +0 -237
- reconcile/test/test_amtool.py +0 -37
- reconcile/test/test_auto_promoter.py +0 -295
- reconcile/test/test_aws_ami_share.py +0 -68
- reconcile/test/test_aws_iam_keys.py +0 -70
- reconcile/test/test_aws_iam_password_reset.py +0 -35
- reconcile/test/test_aws_support_cases_sos.py +0 -23
- reconcile/test/test_checkpoint.py +0 -178
- reconcile/test/test_cli.py +0 -41
- reconcile/test/test_closedbox_endpoint_monitoring.py +0 -207
- reconcile/test/test_gabi_authorized_users.py +0 -72
- reconcile/test/test_github_org.py +0 -154
- reconcile/test/test_github_repo_invites.py +0 -123
- reconcile/test/test_gitlab_housekeeping.py +0 -88
- reconcile/test/test_gitlab_labeler.py +0 -129
- reconcile/test/test_gitlab_members.py +0 -283
- reconcile/test/test_instrumented_wrappers.py +0 -18
- reconcile/test/test_integrations_manager.py +0 -995
- reconcile/test/test_jenkins_worker_fleets.py +0 -55
- reconcile/test/test_jump_host.py +0 -117
- reconcile/test/test_ldap_users.py +0 -123
- reconcile/test/test_make.py +0 -28
- reconcile/test/test_ocm_additional_routers.py +0 -134
- reconcile/test/test_ocm_addons_upgrade_scheduler_org.py +0 -149
- reconcile/test/test_ocm_clusters.py +0 -598
- reconcile/test/test_ocm_clusters_manifest_updates.py +0 -89
- reconcile/test/test_ocm_oidc_idp.py +0 -315
- reconcile/test/test_ocm_update_recommended_version.py +0 -145
- reconcile/test/test_ocm_upgrade_scheduler.py +0 -614
- reconcile/test/test_ocm_upgrade_scheduler_org_updater.py +0 -129
- reconcile/test/test_openshift_base.py +0 -730
- reconcile/test/test_openshift_namespace_labels.py +0 -345
- reconcile/test/test_openshift_namespaces.py +0 -256
- reconcile/test/test_openshift_resource.py +0 -415
- reconcile/test/test_openshift_resources_base.py +0 -440
- reconcile/test/test_openshift_saas_deploy_change_tester.py +0 -310
- reconcile/test/test_openshift_tekton_resources.py +0 -253
- reconcile/test/test_openshift_upgrade_watcher.py +0 -146
- reconcile/test/test_prometheus_rules_tester.py +0 -151
- reconcile/test/test_prometheus_rules_tester_old.py +0 -77
- reconcile/test/test_quay_membership.py +0 -86
- reconcile/test/test_quay_mirror.py +0 -109
- reconcile/test/test_quay_mirror_org.py +0 -70
- reconcile/test/test_quay_repos.py +0 -59
- reconcile/test/test_queries.py +0 -53
- reconcile/test/test_repo_owners.py +0 -47
- reconcile/test/test_requests_sender.py +0 -139
- reconcile/test/test_saasherder.py +0 -1074
- reconcile/test/test_saasherder_allowed_secret_paths.py +0 -127
- reconcile/test/test_secret_reader.py +0 -153
- reconcile/test/test_slack_base.py +0 -185
- reconcile/test/test_slack_usergroups.py +0 -744
- reconcile/test/test_sql_query.py +0 -19
- reconcile/test/test_terraform_cloudflare_dns.py +0 -117
- reconcile/test/test_terraform_cloudflare_resources.py +0 -106
- reconcile/test/test_terraform_cloudflare_users.py +0 -749
- reconcile/test/test_terraform_resources.py +0 -257
- reconcile/test/test_terraform_tgw_attachments.py +0 -631
- reconcile/test/test_terraform_users.py +0 -57
- reconcile/test/test_terraform_vpc_peerings.py +0 -499
- reconcile/test/test_terraform_vpc_peerings_build_desired_state.py +0 -1061
- reconcile/test/test_unleash.py +0 -138
- reconcile/test/test_utils_aws_api.py +0 -240
- reconcile/test/test_utils_aws_helper.py +0 -80
- reconcile/test/test_utils_cluster_version_data.py +0 -177
- reconcile/test/test_utils_data_structures.py +0 -13
- reconcile/test/test_utils_disabled_integrations.py +0 -86
- reconcile/test/test_utils_expiration.py +0 -109
- reconcile/test/test_utils_external_resource_spec.py +0 -383
- reconcile/test/test_utils_external_resources.py +0 -247
- reconcile/test/test_utils_github_api.py +0 -73
- reconcile/test/test_utils_gitlab_api.py +0 -20
- reconcile/test/test_utils_gpg.py +0 -69
- reconcile/test/test_utils_gql.py +0 -81
- reconcile/test/test_utils_helm.py +0 -306
- reconcile/test/test_utils_helpers.py +0 -55
- reconcile/test/test_utils_imap_client.py +0 -65
- reconcile/test/test_utils_jjb_client.py +0 -52
- reconcile/test/test_utils_jsonpath.py +0 -286
- reconcile/test/test_utils_ldap_client.py +0 -51
- reconcile/test/test_utils_mr.py +0 -226
- reconcile/test/test_utils_mr_clusters_updates.py +0 -77
- reconcile/test/test_utils_oc.py +0 -984
- reconcile/test/test_utils_ocm.py +0 -110
- reconcile/test/test_utils_pagerduty_api.py +0 -251
- reconcile/test/test_utils_parse_dhms_duration.py +0 -34
- reconcile/test/test_utils_password_validator.py +0 -155
- reconcile/test/test_utils_quay_api.py +0 -86
- reconcile/test/test_utils_semver_helper.py +0 -19
- reconcile/test/test_utils_sharding.py +0 -56
- reconcile/test/test_utils_slack_api.py +0 -439
- reconcile/test/test_utils_smtp_client.py +0 -73
- reconcile/test/test_utils_state.py +0 -256
- reconcile/test/test_utils_terraform.py +0 -13
- reconcile/test/test_utils_terraform_client.py +0 -585
- reconcile/test/test_utils_terraform_config_client.py +0 -219
- reconcile/test/test_utils_terrascript_aws_client.py +0 -277
- reconcile/test/test_utils_terrascript_cloudflare_client.py +0 -597
- reconcile/test/test_utils_terrascript_cloudflare_resources.py +0 -26
- reconcile/test/test_vault_replication.py +0 -515
- reconcile/test/test_vault_utils.py +0 -47
- reconcile/test/test_version_bump.py +0 -18
- reconcile/test/test_vpc_peerings_validator.py +0 -103
- reconcile/test/test_wrong_region.py +0 -78
- reconcile/typed_queries/glitchtip_settings.py +0 -18
- reconcile/typed_queries/ocp_release_mirror.py +0 -11
- reconcile/unleash_watcher.py +0 -120
- reconcile/utils/git_secrets.py +0 -63
- reconcile/utils/mr/auto_promoter.py +0 -218
- reconcile/utils/sentry_client.py +0 -383
- release/test_version.py +0 -50
- release/version.py +0 -100
- tools/test/test_qontract_cli.py +0 -60
- tools/test/test_sre_checkpoints.py +0 -79
- /e2e_tests/__init__.py → /reconcile/aus/upgrades.py +0 -0
- /reconcile/{gql_definitions/ocp_release_mirror → aws_account_manager}/__init__.py +0 -0
- /reconcile/{test → aws_ami_cleanup}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager → aws_cloudwatch_log_retention}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/merge_request_manager → aws_saml_idp}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/merge_request_manager/merge_request_manager → aws_saml_roles}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/merge_request_manager/renderer → aws_version_sync}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/subscriber → aws_version_sync/merge_request_manager}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/utils → cluster_auth_rhidp}/__init__.py +0 -0
- /reconcile/{test/saas_auto_promotions_manager/utils/saas_files_inventory → dynatrace_token_provider}/__init__.py +0 -0
- {release → reconcile/endpoints_discovery}/__init__.py +0 -0
- {tools/test → reconcile/external_resources}/__init__.py +0 -0
tools/qontract_cli.py
CHANGED
@@ -1,31 +1,40 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
|
+
# ruff: noqa: PLC0415 - `import` should be at the top-level of a file
|
2
3
|
|
3
4
|
import base64
|
4
5
|
import json
|
6
|
+
import logging
|
5
7
|
import os
|
6
8
|
import re
|
7
9
|
import sys
|
10
|
+
import tempfile
|
11
|
+
import textwrap
|
8
12
|
from collections import defaultdict
|
9
|
-
from datetime import
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
Optional,
|
13
|
+
from datetime import (
|
14
|
+
UTC,
|
15
|
+
datetime,
|
16
|
+
timedelta,
|
14
17
|
)
|
18
|
+
from operator import itemgetter
|
19
|
+
from pathlib import Path
|
20
|
+
from statistics import median
|
21
|
+
from textwrap import dedent
|
22
|
+
from typing import Any
|
15
23
|
|
24
|
+
import boto3
|
16
25
|
import click
|
26
|
+
import click.core
|
17
27
|
import requests
|
18
28
|
import yaml
|
19
29
|
from rich import box
|
20
|
-
from rich
|
21
|
-
|
22
|
-
|
23
|
-
)
|
30
|
+
from rich import print as rich_print
|
31
|
+
from rich.console import Console, Group
|
32
|
+
from rich.prompt import Confirm
|
24
33
|
from rich.table import Table
|
25
34
|
from rich.tree import Tree
|
26
|
-
from sretoolbox.utils import threaded
|
27
35
|
|
28
36
|
import reconcile.aus.base as aus
|
37
|
+
import reconcile.change_owners.change_log_tracking as cl
|
29
38
|
import reconcile.openshift_base as ob
|
30
39
|
import reconcile.openshift_resources_base as orb
|
31
40
|
import reconcile.prometheus_rules_tester.integration as ptr
|
@@ -34,31 +43,59 @@ import reconcile.terraform_users as tfu
|
|
34
43
|
import reconcile.terraform_vpc_peerings as tfvpc
|
35
44
|
from reconcile import queries
|
36
45
|
from reconcile.aus.base import (
|
46
|
+
AbstractUpgradePolicy,
|
37
47
|
AdvancedUpgradeSchedulerBaseIntegration,
|
38
48
|
AdvancedUpgradeSchedulerBaseIntegrationParams,
|
49
|
+
addon_upgrade_policy_soonest_next_run,
|
50
|
+
init_addon_service_version,
|
39
51
|
)
|
52
|
+
from reconcile.aus.models import OrganizationUpgradeSpec
|
40
53
|
from reconcile.change_owners.bundle import NoOpFileDiffResolver
|
54
|
+
from reconcile.change_owners.change_log_tracking import (
|
55
|
+
BUNDLE_DIFFS_OBJ,
|
56
|
+
ChangeLog,
|
57
|
+
ChangeLogItem,
|
58
|
+
)
|
41
59
|
from reconcile.change_owners.change_owners import (
|
42
60
|
fetch_change_type_processors,
|
43
61
|
fetch_self_service_roles,
|
44
62
|
)
|
45
63
|
from reconcile.checkpoint import report_invalid_metadata
|
46
64
|
from reconcile.cli import (
|
65
|
+
TERRAFORM_VERSION,
|
66
|
+
TERRAFORM_VERSION_REGEX,
|
67
|
+
cluster_name,
|
47
68
|
config_file,
|
69
|
+
namespace_name,
|
48
70
|
use_jump_host,
|
49
71
|
)
|
72
|
+
from reconcile.cli import (
|
73
|
+
threaded as thread_pool_size,
|
74
|
+
)
|
75
|
+
from reconcile.gql_definitions.advanced_upgrade_service.aus_clusters import (
|
76
|
+
query as aus_clusters_query,
|
77
|
+
)
|
50
78
|
from reconcile.gql_definitions.common.app_interface_vault_settings import (
|
51
79
|
AppInterfaceSettingsV1,
|
52
80
|
)
|
81
|
+
from reconcile.gql_definitions.fragments.aus_organization import AUSOCMOrganization
|
82
|
+
from reconcile.gql_definitions.integrations import integrations as integrations_gql
|
83
|
+
from reconcile.gql_definitions.maintenance import maintenances as maintenances_gql
|
53
84
|
from reconcile.jenkins_job_builder import init_jjb
|
54
|
-
from reconcile.prometheus_rules_tester_old import get_data_from_jinja_test_template
|
55
85
|
from reconcile.slack_base import slackapi_from_queries
|
86
|
+
from reconcile.status_board import StatusBoardExporterIntegration
|
56
87
|
from reconcile.typed_queries.alerting_services_settings import get_alerting_services
|
88
|
+
from reconcile.typed_queries.app_interface_repo_url import get_app_interface_repo_url
|
57
89
|
from reconcile.typed_queries.app_interface_vault_settings import (
|
58
90
|
get_app_interface_vault_settings,
|
59
91
|
)
|
92
|
+
from reconcile.typed_queries.app_quay_repos_escalation_policies import (
|
93
|
+
get_apps_quay_repos_escalation_policies,
|
94
|
+
)
|
60
95
|
from reconcile.typed_queries.clusters import get_clusters
|
61
96
|
from reconcile.typed_queries.saas_files import get_saas_files
|
97
|
+
from reconcile.typed_queries.slo_documents import get_slo_documents
|
98
|
+
from reconcile.typed_queries.status_board import get_status_board
|
62
99
|
from reconcile.utils import (
|
63
100
|
amtool,
|
64
101
|
config,
|
@@ -67,8 +104,18 @@ from reconcile.utils import (
|
|
67
104
|
promtool,
|
68
105
|
)
|
69
106
|
from reconcile.utils.aws_api import AWSApi
|
70
|
-
from reconcile.utils.
|
107
|
+
from reconcile.utils.binary import (
|
108
|
+
binary,
|
109
|
+
binary_version,
|
110
|
+
)
|
111
|
+
from reconcile.utils.early_exit_cache import (
|
112
|
+
CacheKey,
|
113
|
+
CacheKeyWithDigest,
|
114
|
+
CacheValue,
|
115
|
+
EarlyExitCache,
|
116
|
+
)
|
71
117
|
from reconcile.utils.environ import environ
|
118
|
+
from reconcile.utils.external_resource_spec import ExternalResourceSpec
|
72
119
|
from reconcile.utils.external_resources import (
|
73
120
|
PROVIDER_AWS,
|
74
121
|
get_external_resource_specs,
|
@@ -79,18 +126,29 @@ from reconcile.utils.gitlab_api import (
|
|
79
126
|
MRState,
|
80
127
|
MRStatus,
|
81
128
|
)
|
129
|
+
from reconcile.utils.gql import GqlApiSingleton
|
82
130
|
from reconcile.utils.jjb_client import JJB
|
131
|
+
from reconcile.utils.keycloak import (
|
132
|
+
KeycloakAPI,
|
133
|
+
SSOClient,
|
134
|
+
)
|
83
135
|
from reconcile.utils.mr.labels import (
|
136
|
+
AVS,
|
84
137
|
SAAS_FILE_UPDATE,
|
85
138
|
SELF_SERVICEABLE,
|
139
|
+
SHOW_SELF_SERVICEABLE_IN_REVIEW_QUEUE,
|
86
140
|
)
|
87
141
|
from reconcile.utils.oc import (
|
88
142
|
OC_Map,
|
89
143
|
OCLogMsg,
|
90
144
|
)
|
91
|
-
from reconcile.utils.oc_map import
|
92
|
-
|
145
|
+
from reconcile.utils.oc_map import (
|
146
|
+
init_oc_map_from_clusters,
|
147
|
+
)
|
148
|
+
from reconcile.utils.ocm import OCM_PRODUCT_ROSA, OCMMap
|
149
|
+
from reconcile.utils.ocm_base_client import init_ocm_base_client
|
93
150
|
from reconcile.utils.output import print_output
|
151
|
+
from reconcile.utils.saasherder.models import TargetSpec
|
94
152
|
from reconcile.utils.saasherder.saasherder import SaasHerder
|
95
153
|
from reconcile.utils.secret_reader import (
|
96
154
|
SecretReader,
|
@@ -99,10 +157,22 @@ from reconcile.utils.secret_reader import (
|
|
99
157
|
from reconcile.utils.semver_helper import parse_semver
|
100
158
|
from reconcile.utils.state import init_state
|
101
159
|
from reconcile.utils.terraform_client import TerraformClient as Terraform
|
160
|
+
from tools.cli_commands.cost_report.aws import AwsCostReportCommand
|
161
|
+
from tools.cli_commands.cost_report.openshift import OpenShiftCostReportCommand
|
162
|
+
from tools.cli_commands.cost_report.openshift_cost_optimization import (
|
163
|
+
OpenShiftCostOptimizationReportCommand,
|
164
|
+
)
|
165
|
+
from tools.cli_commands.erv2 import (
|
166
|
+
Erv2Cli,
|
167
|
+
TerraformCli,
|
168
|
+
progress_spinner,
|
169
|
+
task,
|
170
|
+
)
|
102
171
|
from tools.cli_commands.gpg_encrypt import (
|
103
172
|
GPGEncryptCommand,
|
104
173
|
GPGEncryptCommandData,
|
105
174
|
)
|
175
|
+
from tools.cli_commands.systems_and_tools import get_systems_and_tools_inventory
|
106
176
|
from tools.sre_checkpoints import (
|
107
177
|
full_name,
|
108
178
|
get_latest_sre_checkpoints,
|
@@ -143,6 +213,11 @@ def root(ctx, configfile):
|
|
143
213
|
gql.init_from_config()
|
144
214
|
|
145
215
|
|
216
|
+
@root.result_callback()
|
217
|
+
def exit_cli(ctx, configfile):
|
218
|
+
GqlApiSingleton.close()
|
219
|
+
|
220
|
+
|
146
221
|
@root.group()
|
147
222
|
@output
|
148
223
|
@sort
|
@@ -258,26 +333,26 @@ def cluster_upgrades(ctx, name):
|
|
258
333
|
def version_history(ctx):
|
259
334
|
import reconcile.aus.ocm_upgrade_scheduler as ous
|
260
335
|
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
dry_run=True,
|
268
|
-
upgrade_policies=[],
|
269
|
-
ocm_map=ocm_map,
|
270
|
-
integration=ous.QONTRACT_INTEGRATION,
|
271
|
-
)
|
336
|
+
clusters = aus_clusters_query(query_func=gql.get_api().query).clusters or []
|
337
|
+
orgs = {
|
338
|
+
c.ocm.org_id: OrganizationUpgradeSpec(org=c.ocm, specs=[])
|
339
|
+
for c in clusters
|
340
|
+
if c.ocm and c.upgrade_policy
|
341
|
+
}
|
272
342
|
|
273
343
|
results = []
|
274
|
-
for
|
344
|
+
for org_spec in orgs.values():
|
345
|
+
version_data = aus.get_version_data_map(
|
346
|
+
dry_run=True,
|
347
|
+
org_upgrade_spec=org_spec,
|
348
|
+
integration=ous.QONTRACT_INTEGRATION,
|
349
|
+
).get(org_spec.org.environment.name, org_spec.org.org_id)
|
275
350
|
for version, version_history in version_data.versions.items():
|
276
351
|
if not version:
|
277
352
|
continue
|
278
353
|
for workload, workload_data in version_history.workloads.items():
|
279
354
|
item = {
|
280
|
-
"ocm":
|
355
|
+
"ocm": f"{org_spec.org.environment.name}/{org_spec.org.org_id}",
|
281
356
|
"version": parse_semver(version),
|
282
357
|
"workload": workload,
|
283
358
|
"soak_days": round(workload_data.soak_days, 2),
|
@@ -289,51 +364,33 @@ def version_history(ctx):
|
|
289
364
|
print_output(ctx.obj["options"], results, columns)
|
290
365
|
|
291
366
|
|
292
|
-
def soaking_days(
|
293
|
-
version_data_map: dict[str, VersionData],
|
294
|
-
upgrades: list[str],
|
295
|
-
workload: str,
|
296
|
-
only_soaking: bool,
|
297
|
-
) -> dict[str, float]:
|
298
|
-
soaking = {}
|
299
|
-
for version in upgrades:
|
300
|
-
for h in version_data_map.values():
|
301
|
-
workload_history = h.workload_history(version, workload)
|
302
|
-
soaking[version] = round(workload_history.soak_days, 2)
|
303
|
-
if not only_soaking and version not in soaking:
|
304
|
-
soaking[version] = 0
|
305
|
-
return soaking
|
306
|
-
|
307
|
-
|
308
367
|
def get_upgrade_policies_data(
|
309
|
-
|
368
|
+
org_upgrade_specs: list[OrganizationUpgradeSpec],
|
310
369
|
md_output,
|
311
370
|
integration,
|
312
371
|
workload=None,
|
313
372
|
show_only_soaking_upgrades=False,
|
314
373
|
by_workload=False,
|
315
|
-
):
|
316
|
-
if not
|
374
|
+
) -> list:
|
375
|
+
if not org_upgrade_specs:
|
317
376
|
return []
|
318
377
|
|
319
|
-
|
320
|
-
|
321
|
-
clusters=clusters,
|
322
|
-
settings=settings,
|
323
|
-
init_version_gates=True,
|
324
|
-
)
|
325
|
-
current_state = aus.fetch_current_state(clusters, ocm_map)
|
326
|
-
desired_state = aus.fetch_desired_state(clusters, ocm_map)
|
327
|
-
|
328
|
-
version_data_map = aus.get_version_data_map(
|
329
|
-
dry_run=True, upgrade_policies=[], ocm_map=ocm_map, integration=integration
|
330
|
-
)
|
378
|
+
vault_settings = get_app_interface_vault_settings()
|
379
|
+
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
331
380
|
|
332
381
|
results = []
|
333
382
|
|
334
|
-
def soaking_str(
|
335
|
-
|
336
|
-
|
383
|
+
def soaking_str(
|
384
|
+
soaking: dict[str, Any],
|
385
|
+
upgrade_policy: AbstractUpgradePolicy | None,
|
386
|
+
upgradeable_version: str | None,
|
387
|
+
) -> str:
|
388
|
+
if upgrade_policy:
|
389
|
+
upgrade_version = upgrade_policy.version
|
390
|
+
upgrade_next_run = upgrade_policy.next_run
|
391
|
+
else:
|
392
|
+
upgrade_version = None
|
393
|
+
upgrade_next_run = None
|
337
394
|
upgrade_emoji = "💫"
|
338
395
|
if upgrade_next_run:
|
339
396
|
dt = datetime.strptime(upgrade_next_run, "%Y-%m-%dT%H:%M:%SZ")
|
@@ -353,88 +410,96 @@ def get_upgrade_policies_data(
|
|
353
410
|
sorted_soaking[i] = (v, f"{s} 🎉")
|
354
411
|
return ", ".join([f"{v} ({s})" for v, s in sorted_soaking])
|
355
412
|
|
356
|
-
for
|
357
|
-
|
358
|
-
|
359
|
-
soakdays = c.get("conditions", {}).get("soakDays")
|
360
|
-
mutexes = c.get("conditions", {}).get("mutexes") or []
|
361
|
-
sector = ""
|
362
|
-
if c.get("conditions", {}).get("sector"):
|
363
|
-
sector = c["conditions"]["sector"].name
|
364
|
-
ocm_org = ocm_map.get(cluster_name)
|
365
|
-
ocm_spec = ocm_org.clusters[cluster_name]
|
366
|
-
item = {
|
367
|
-
"ocm": ocm_org.name,
|
368
|
-
"cluster": cluster_name,
|
369
|
-
"id": ocm_spec.spec.id,
|
370
|
-
"api": ocm_spec.server_url,
|
371
|
-
"console": ocm_spec.console_url,
|
372
|
-
"domain": ocm_spec.domain,
|
373
|
-
"version": version,
|
374
|
-
"channel": channel,
|
375
|
-
"schedule": schedule,
|
376
|
-
"sector": sector,
|
377
|
-
"soak_days": soakdays,
|
378
|
-
"mutexes": ", ".join(mutexes),
|
379
|
-
}
|
413
|
+
for org_spec in org_upgrade_specs:
|
414
|
+
ocm_api = init_ocm_base_client(org_spec.org.environment, secret_reader)
|
415
|
+
current_state = aus.fetch_current_state(ocm_api, org_spec)
|
380
416
|
|
381
|
-
|
382
|
-
|
383
|
-
|
417
|
+
version_data = aus.get_version_data_map(
|
418
|
+
dry_run=True,
|
419
|
+
org_upgrade_spec=org_spec,
|
420
|
+
integration=integration,
|
421
|
+
).get(org_spec.org.environment.name, org_spec.org.org_id)
|
384
422
|
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
423
|
+
for upgrade_spec in org_spec.specs:
|
424
|
+
cluster = upgrade_spec.cluster
|
425
|
+
item = {
|
426
|
+
"ocm": org_spec.org.name,
|
427
|
+
"cluster": cluster.name,
|
428
|
+
"id": cluster.id,
|
429
|
+
"api": cluster.api_url,
|
430
|
+
"console": cluster.console_url,
|
431
|
+
"domain": cluster.base_domain,
|
432
|
+
"version": cluster.version.raw_id,
|
433
|
+
"channel": cluster.version.channel_group,
|
434
|
+
"schedule": upgrade_spec.upgrade_policy.schedule,
|
435
|
+
"sector": upgrade_spec.upgrade_policy.conditions.sector or "",
|
436
|
+
"soak_days": upgrade_spec.upgrade_policy.conditions.soak_days,
|
437
|
+
"mutexes": ", ".join(
|
438
|
+
upgrade_spec.upgrade_policy.conditions.mutexes or []
|
439
|
+
),
|
440
|
+
}
|
390
441
|
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
upgrade_policy = current[0]
|
442
|
+
if not upgrade_spec.upgrade_policy.workloads:
|
443
|
+
results.append(item)
|
444
|
+
continue
|
395
445
|
|
396
|
-
|
397
|
-
|
398
|
-
|
446
|
+
upgrades = [
|
447
|
+
u
|
448
|
+
for u in cluster.available_upgrades()
|
449
|
+
if not upgrade_spec.version_blocked(u)
|
450
|
+
]
|
451
|
+
|
452
|
+
current = [c for c in current_state if c.cluster.name == cluster.name]
|
453
|
+
upgrade_policy = None
|
454
|
+
if current and current[0].schedule_type == "manual":
|
455
|
+
upgrade_policy = current[0]
|
456
|
+
|
457
|
+
sector = (
|
458
|
+
org_spec.sectors.get(upgrade_spec.upgrade_policy.conditions.sector)
|
459
|
+
if upgrade_spec.upgrade_policy.conditions.sector
|
460
|
+
else None
|
461
|
+
)
|
462
|
+
upgradeable_version = aus.upgradeable_version(
|
463
|
+
upgrade_spec, version_data, sector
|
464
|
+
)
|
399
465
|
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
466
|
+
workload_soaking_upgrades = {}
|
467
|
+
for w in upgrade_spec.upgrade_policy.workloads:
|
468
|
+
if not workload or workload == w:
|
469
|
+
s = aus.soaking_days(
|
470
|
+
version_data,
|
471
|
+
upgrades,
|
472
|
+
w,
|
473
|
+
show_only_soaking_upgrades,
|
474
|
+
)
|
475
|
+
workload_soaking_upgrades[w] = s
|
407
476
|
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
{
|
477
|
+
if by_workload:
|
478
|
+
for w, soaking in workload_soaking_upgrades.items():
|
479
|
+
i = item.copy()
|
480
|
+
i.update({
|
413
481
|
"workload": w,
|
414
482
|
"soaking_upgrades": soaking_str(
|
415
483
|
soaking, upgrade_policy, upgradeable_version
|
416
484
|
),
|
417
|
-
}
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
item.update(
|
430
|
-
{
|
485
|
+
})
|
486
|
+
results.append(i)
|
487
|
+
else:
|
488
|
+
workloads = sorted(upgrade_spec.upgrade_policy.workloads)
|
489
|
+
w = ", ".join(workloads)
|
490
|
+
soaking = {}
|
491
|
+
for v in upgrades:
|
492
|
+
soaks = [s.get(v, 0) for s in workload_soaking_upgrades.values()]
|
493
|
+
min_soaks = min(soaks)
|
494
|
+
if not show_only_soaking_upgrades or min_soaks > 0:
|
495
|
+
soaking[v] = min_soaks
|
496
|
+
item.update({
|
431
497
|
"workload": w,
|
432
498
|
"soaking_upgrades": soaking_str(
|
433
499
|
soaking, upgrade_policy, upgradeable_version
|
434
500
|
),
|
435
|
-
}
|
436
|
-
|
437
|
-
results.append(item)
|
501
|
+
})
|
502
|
+
results.append(item)
|
438
503
|
|
439
504
|
return results
|
440
505
|
|
@@ -492,109 +557,15 @@ def cluster_upgrade_policies(
|
|
492
557
|
show_only_soaking_upgrades=False,
|
493
558
|
by_workload=False,
|
494
559
|
):
|
495
|
-
|
496
|
-
|
497
|
-
)
|
498
|
-
|
499
|
-
integration = OCMClusterUpgradeSchedulerIntegration(
|
500
|
-
AdvancedUpgradeSchedulerBaseIntegrationParams()
|
501
|
-
)
|
502
|
-
generate_cluster_upgrade_policies_report(
|
503
|
-
ctx,
|
504
|
-
integration=integration,
|
505
|
-
cluster=cluster,
|
506
|
-
workload=workload,
|
507
|
-
show_only_soaking_upgrades=show_only_soaking_upgrades,
|
508
|
-
by_workload=by_workload,
|
509
|
-
)
|
510
|
-
|
511
|
-
|
512
|
-
def generate_cluster_upgrade_policies_report(
|
513
|
-
ctx,
|
514
|
-
integration: AdvancedUpgradeSchedulerBaseIntegration,
|
515
|
-
cluster: Optional[str],
|
516
|
-
workload: Optional[str],
|
517
|
-
show_only_soaking_upgrades: bool,
|
518
|
-
by_workload: bool,
|
519
|
-
) -> None:
|
520
|
-
md_output = ctx.obj["options"]["output"] == "md"
|
521
|
-
|
522
|
-
upgrade_specs = integration.get_upgrade_specs()
|
523
|
-
clusters = [
|
524
|
-
s.dict(by_alias=True)
|
525
|
-
for org_upgrade_specs in upgrade_specs.values()
|
526
|
-
for org_upgrade_spec in org_upgrade_specs.values()
|
527
|
-
for s in org_upgrade_spec.specs
|
528
|
-
]
|
529
|
-
|
530
|
-
if cluster:
|
531
|
-
clusters = [c for c in clusters if cluster == c["name"]]
|
532
|
-
if workload:
|
533
|
-
clusters = [
|
534
|
-
c for c in clusters if workload in c["upgradePolicy"].get("workloads", [])
|
535
|
-
]
|
536
|
-
|
537
|
-
results = get_upgrade_policies_data(
|
538
|
-
clusters,
|
539
|
-
md_output,
|
540
|
-
integration.name,
|
541
|
-
workload,
|
542
|
-
show_only_soaking_upgrades,
|
543
|
-
by_workload,
|
560
|
+
print(
|
561
|
+
"https://grafana.app-sre.devshift.net/d/ukLXCSwVz/aus-cluster-upgrade-overview"
|
544
562
|
)
|
545
563
|
|
546
|
-
if md_output:
|
547
|
-
fields = [
|
548
|
-
{"key": "cluster", "sortable": True},
|
549
|
-
{"key": "version", "sortable": True},
|
550
|
-
{"key": "channel", "sortable": True},
|
551
|
-
{"key": "schedule"},
|
552
|
-
{"key": "sector", "sortable": True},
|
553
|
-
{"key": "mutexes", "sortable": True},
|
554
|
-
{"key": "soak_days", "sortable": True},
|
555
|
-
{"key": "workload"},
|
556
|
-
{"key": "soaking_upgrades"},
|
557
|
-
]
|
558
|
-
md = """
|
559
|
-
{}
|
560
|
-
|
561
|
-
```json:table
|
562
|
-
{}
|
563
|
-
```
|
564
|
-
"""
|
565
|
-
md = md.format(
|
566
|
-
upgrade_policies_output_description,
|
567
|
-
json.dumps(
|
568
|
-
{"fields": fields, "items": results, "filter": True, "caption": ""},
|
569
|
-
indent=1,
|
570
|
-
),
|
571
|
-
)
|
572
|
-
print(md)
|
573
|
-
else:
|
574
|
-
columns = [
|
575
|
-
"cluster",
|
576
|
-
"version",
|
577
|
-
"channel",
|
578
|
-
"schedule",
|
579
|
-
"sector",
|
580
|
-
"mutexes",
|
581
|
-
"soak_days",
|
582
|
-
"workload",
|
583
|
-
"soaking_upgrades",
|
584
|
-
]
|
585
|
-
ctx.obj["options"]["to_string"] = True
|
586
|
-
print_output(ctx.obj["options"], results, columns)
|
587
|
-
|
588
564
|
|
589
|
-
def inherit_version_data_text(
|
590
|
-
|
591
|
-
if not ocm_specs_for_org:
|
592
|
-
raise ValueError(f"{ocm_org} not found in list of organizations")
|
593
|
-
ocm_spec = ocm_specs_for_org[0]
|
594
|
-
inherit_version_data = ocm_spec["inheritVersionData"]
|
595
|
-
if not inherit_version_data:
|
565
|
+
def inherit_version_data_text(org: AUSOCMOrganization) -> str:
|
566
|
+
if not org.inherit_version_data:
|
596
567
|
return ""
|
597
|
-
inherited_orgs = [f"[{o
|
568
|
+
inherited_orgs = [f"[{o.name}](#{o.name})" for o in org.inherit_version_data]
|
598
569
|
return f"inheriting version data from {', '.join(inherited_orgs)}"
|
599
570
|
|
600
571
|
|
@@ -616,16 +587,37 @@ def ocm_fleet_upgrade_policies(
|
|
616
587
|
|
617
588
|
|
618
589
|
@get.command()
|
590
|
+
@click.option(
|
591
|
+
"--ocm-env",
|
592
|
+
help="The OCM environment AUS should operator on. If none is specified, all environments will be operated on.",
|
593
|
+
required=False,
|
594
|
+
envvar="AUS_OCM_ENV",
|
595
|
+
)
|
596
|
+
@click.option(
|
597
|
+
"--ocm-org-ids",
|
598
|
+
help="A comma seperated list of OCM organization IDs AUS should operator on. If none is specified, all organizations are considered.",
|
599
|
+
required=False,
|
600
|
+
envvar="AUS_OCM_ORG_IDS",
|
601
|
+
)
|
602
|
+
@click.option(
|
603
|
+
"--ignore-sts-clusters",
|
604
|
+
is_flag=True,
|
605
|
+
default=os.environ.get("IGNORE_STS_CLUSTERS", False),
|
606
|
+
help="Ignore STS clusters",
|
607
|
+
)
|
619
608
|
@click.pass_context
|
620
|
-
def aus_fleet_upgrade_policies(
|
621
|
-
ctx,
|
622
|
-
):
|
609
|
+
def aus_fleet_upgrade_policies(ctx, ocm_env, ocm_org_ids, ignore_sts_clusters):
|
623
610
|
from reconcile.aus.advanced_upgrade_service import AdvancedUpgradeServiceIntegration
|
624
611
|
|
612
|
+
parsed_ocm_org_ids = set(ocm_org_ids.split(",")) if ocm_org_ids else None
|
625
613
|
generate_fleet_upgrade_policices_report(
|
626
614
|
ctx,
|
627
615
|
AdvancedUpgradeServiceIntegration(
|
628
|
-
AdvancedUpgradeSchedulerBaseIntegrationParams(
|
616
|
+
AdvancedUpgradeSchedulerBaseIntegrationParams(
|
617
|
+
ocm_environment=ocm_env,
|
618
|
+
ocm_organization_ids=parsed_ocm_org_ids,
|
619
|
+
ignore_sts_clusters=ignore_sts_clusters,
|
620
|
+
)
|
629
621
|
),
|
630
622
|
)
|
631
623
|
|
@@ -633,25 +625,18 @@ def aus_fleet_upgrade_policies(
|
|
633
625
|
def generate_fleet_upgrade_policices_report(
|
634
626
|
ctx, aus_integration: AdvancedUpgradeSchedulerBaseIntegration
|
635
627
|
):
|
636
|
-
|
637
628
|
md_output = ctx.obj["options"]["output"] == "md"
|
638
629
|
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
for s in org_upgrade_spec.specs
|
645
|
-
]
|
646
|
-
|
647
|
-
ocm_org_specs = [
|
648
|
-
org_upgrade_spec.org.dict(by_alias=True)
|
649
|
-
for org_upgrade_specs in upgrade_specs.values()
|
650
|
-
for org_upgrade_spec in org_upgrade_specs.values()
|
651
|
-
]
|
630
|
+
org_upgrade_specs: dict[str, OrganizationUpgradeSpec] = {}
|
631
|
+
for orgs in aus_integration.get_upgrade_specs().values():
|
632
|
+
for org_spec in orgs.values():
|
633
|
+
if org_spec.specs:
|
634
|
+
org_upgrade_specs[org_spec.org.name] = org_spec
|
652
635
|
|
653
636
|
results = get_upgrade_policies_data(
|
654
|
-
|
637
|
+
list(org_upgrade_specs.values()),
|
638
|
+
md_output,
|
639
|
+
aus_integration.name,
|
655
640
|
)
|
656
641
|
|
657
642
|
if md_output:
|
@@ -684,7 +669,7 @@ def generate_fleet_upgrade_policices_report(
|
|
684
669
|
print(
|
685
670
|
ocm_org_section.format(
|
686
671
|
ocm_org,
|
687
|
-
inherit_version_data_text(ocm_org
|
672
|
+
inherit_version_data_text(org_upgrade_specs[ocm_org].org),
|
688
673
|
json_data,
|
689
674
|
)
|
690
675
|
)
|
@@ -708,9 +693,9 @@ def generate_fleet_upgrade_policices_report(
|
|
708
693
|
|
709
694
|
@get.command()
|
710
695
|
@click.pass_context
|
711
|
-
def ocm_addon_upgrade_policies(ctx):
|
712
|
-
|
696
|
+
def ocm_addon_upgrade_policies(ctx: click.core.Context) -> None:
|
713
697
|
import reconcile.aus.ocm_addons_upgrade_scheduler_org as oauso
|
698
|
+
from reconcile.aus.models import ClusterAddonUpgradeSpec
|
714
699
|
|
715
700
|
integration = oauso.OCMAddonsUpgradeSchedulerOrgIntegration(
|
716
701
|
AdvancedUpgradeSchedulerBaseIntegrationParams()
|
@@ -721,39 +706,36 @@ def ocm_addon_upgrade_policies(ctx):
|
|
721
706
|
print("We only support md output for now")
|
722
707
|
sys.exit(1)
|
723
708
|
|
724
|
-
|
709
|
+
org_upgrade_specs: dict[str, OrganizationUpgradeSpec] = {}
|
710
|
+
for orgs in integration.get_upgrade_specs().values():
|
711
|
+
for org_spec in orgs.values():
|
712
|
+
if org_spec.specs:
|
713
|
+
org_upgrade_specs[org_spec.org.name] = org_spec
|
714
|
+
|
715
|
+
output: dict[str, list] = {}
|
716
|
+
|
717
|
+
for org_upgrade_spec in org_upgrade_specs.values():
|
718
|
+
ocm_output = output.setdefault(org_upgrade_spec.org.name, [])
|
719
|
+
for spec in org_upgrade_spec.specs:
|
720
|
+
if isinstance(spec, ClusterAddonUpgradeSpec):
|
721
|
+
available_upgrades = spec.get_available_upgrades()
|
722
|
+
next_version = (
|
723
|
+
available_upgrades[-1] if len(available_upgrades) > 0 else ""
|
724
|
+
)
|
725
|
+
ocm_output.append({
|
726
|
+
"cluster": spec.cluster.name,
|
727
|
+
"addon_id": spec.addon.id,
|
728
|
+
"current_version": spec.current_version,
|
729
|
+
"schedule": spec.upgrade_policy.schedule,
|
730
|
+
"sector": spec.upgrade_policy.conditions.sector,
|
731
|
+
"mutexes": ", ".join(spec.upgrade_policy.conditions.mutexes or []),
|
732
|
+
"soak_days": spec.upgrade_policy.conditions.soak_days,
|
733
|
+
"workloads": ", ".join(spec.upgrade_policy.workloads),
|
734
|
+
"next_version": next_version
|
735
|
+
if next_version != spec.current_version
|
736
|
+
else "",
|
737
|
+
})
|
725
738
|
|
726
|
-
output = {}
|
727
|
-
for upgrade_policies_per_org in upgrade_specs.values():
|
728
|
-
for org_name, org_spec in upgrade_policies_per_org.items():
|
729
|
-
ocm_map, addon_states = oauso.get_state_for_org_spec_per_addon(
|
730
|
-
org_spec, fetch_current_state=False
|
731
|
-
)
|
732
|
-
ocm = ocm_map[org_name]
|
733
|
-
for addon_state in addon_states:
|
734
|
-
next_version = ocm.get_addon_version(addon_state.addon_id)
|
735
|
-
ocm_output = output.setdefault(org_name, [])
|
736
|
-
for d in addon_state.desired_state:
|
737
|
-
sector = ""
|
738
|
-
conditions = d.get("conditions") or {}
|
739
|
-
if conditions.get("sector"):
|
740
|
-
sector = conditions["sector"].name
|
741
|
-
version = d["current_version"]
|
742
|
-
ocm_output.append(
|
743
|
-
{
|
744
|
-
"cluster": d["cluster"],
|
745
|
-
"addon_id": addon_state.addon_id,
|
746
|
-
"current_version": version,
|
747
|
-
"schedule": d["schedule"],
|
748
|
-
"sector": sector,
|
749
|
-
"mutexes": ", ".join(conditions.get("mutexes") or []),
|
750
|
-
"soak_days": conditions.get("soakDays"),
|
751
|
-
"workloads": ", ".join(d["workloads"]),
|
752
|
-
"next_version": next_version
|
753
|
-
if next_version != version
|
754
|
-
else "",
|
755
|
-
}
|
756
|
-
)
|
757
739
|
fields = [
|
758
740
|
{"key": "cluster", "sortable": True},
|
759
741
|
{"key": "addon_id", "sortable": True},
|
@@ -772,11 +754,6 @@ def ocm_addon_upgrade_policies(ctx):
|
|
772
754
|
{}
|
773
755
|
```
|
774
756
|
"""
|
775
|
-
ocm_org_specs = [
|
776
|
-
org_upgrade_spec.org.dict(by_alias=True)
|
777
|
-
for org_upgrade_specs in upgrade_specs.values()
|
778
|
-
for org_upgrade_spec in org_upgrade_specs.values()
|
779
|
-
]
|
780
757
|
for ocm_name in sorted(output.keys()):
|
781
758
|
json_data = json.dumps(
|
782
759
|
{
|
@@ -789,9 +766,96 @@ def ocm_addon_upgrade_policies(ctx):
|
|
789
766
|
)
|
790
767
|
print(
|
791
768
|
section.format(
|
792
|
-
ocm_name,
|
769
|
+
ocm_name,
|
770
|
+
inherit_version_data_text(org_upgrade_specs[ocm_name].org),
|
771
|
+
json_data,
|
772
|
+
)
|
773
|
+
)
|
774
|
+
|
775
|
+
|
776
|
+
@get.command()
|
777
|
+
@click.option(
|
778
|
+
"--days",
|
779
|
+
help="Days to consider for the report. Cannot be used with timestamp options.",
|
780
|
+
type=int,
|
781
|
+
)
|
782
|
+
@click.option(
|
783
|
+
"--from-timestamp",
|
784
|
+
help="Specifies starting Unix time to consider in the report. It requires "
|
785
|
+
"--to-timestamp to be set. It cannot be used with --days option",
|
786
|
+
type=int,
|
787
|
+
)
|
788
|
+
@click.option(
|
789
|
+
"--to-timestamp",
|
790
|
+
help="Specifies ending Unix time to consider in the report. It requires "
|
791
|
+
"--from-timestamp to be set. It cannot be used with --days option",
|
792
|
+
type=int,
|
793
|
+
)
|
794
|
+
@click.pass_context
|
795
|
+
def sd_app_sre_alert_report(
|
796
|
+
ctx: click.core.Context,
|
797
|
+
days: int | None,
|
798
|
+
from_timestamp: int | None,
|
799
|
+
to_timestamp: int | None,
|
800
|
+
) -> None:
|
801
|
+
import tools.sd_app_sre_alert_report as report
|
802
|
+
|
803
|
+
if days:
|
804
|
+
if from_timestamp or to_timestamp:
|
805
|
+
print(
|
806
|
+
"Please don't specify --days or --from-timestamp and --to_timestamp "
|
807
|
+
"options at the same time"
|
793
808
|
)
|
809
|
+
sys.exit(1)
|
810
|
+
|
811
|
+
now = datetime.utcnow()
|
812
|
+
from_timestamp = int((now - timedelta(days=days)).timestamp())
|
813
|
+
to_timestamp = int(now.timestamp())
|
814
|
+
|
815
|
+
if not days:
|
816
|
+
if not (from_timestamp and to_timestamp):
|
817
|
+
print(
|
818
|
+
"Please specify --from-timestamp and --to-timestamp options if --days "
|
819
|
+
"is not set"
|
820
|
+
)
|
821
|
+
sys.exit(1)
|
822
|
+
|
823
|
+
slack = slackapi_from_queries(
|
824
|
+
integration_name=report.QONTRACT_INTEGRATION, init_usergroups=False
|
825
|
+
)
|
826
|
+
alerts = report.group_alerts(
|
827
|
+
slack.get_flat_conversation_history(
|
828
|
+
from_timestamp=from_timestamp, # type: ignore[arg-type]
|
829
|
+
to_timestamp=to_timestamp,
|
794
830
|
)
|
831
|
+
)
|
832
|
+
alert_stats = report.gen_alert_stats(alerts)
|
833
|
+
|
834
|
+
columns = [
|
835
|
+
"Alert name",
|
836
|
+
"Triggered",
|
837
|
+
"Resolved",
|
838
|
+
"Median time to resolve (h:mm:ss)",
|
839
|
+
]
|
840
|
+
table_data: list[dict[str, str]] = []
|
841
|
+
for alert_name, data in sorted(
|
842
|
+
alert_stats.items(), key=lambda i: i[1].triggered_alerts, reverse=True
|
843
|
+
):
|
844
|
+
median_elapsed = ""
|
845
|
+
if data.elapsed_times:
|
846
|
+
seconds = round(median(data.elapsed_times))
|
847
|
+
median_elapsed = str(timedelta(seconds=seconds))
|
848
|
+
|
849
|
+
table_data.append({
|
850
|
+
"Alert name": alert_name,
|
851
|
+
"Triggered": str(data.triggered_alerts),
|
852
|
+
"Resolved": str(data.resolved_alerts),
|
853
|
+
"Median time to resolve (h:mm:ss)": median_elapsed,
|
854
|
+
})
|
855
|
+
|
856
|
+
# TODO(mafriedm, rporres): Fix this
|
857
|
+
ctx.obj["options"]["sort"] = False
|
858
|
+
print_output(ctx.obj["options"], table_data, columns)
|
795
859
|
|
796
860
|
|
797
861
|
@root.command()
|
@@ -861,14 +925,27 @@ def upgrade_cluster_addon(
|
|
861
925
|
)
|
862
926
|
print(["create", ocm_org, cluster, addon, ocm_addon_version])
|
863
927
|
if not dry_run:
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
"
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
928
|
+
# detection addon service version
|
929
|
+
ocm_env_labels = json.loads(ocm_info["environment"].get("labels") or "{}")
|
930
|
+
addon_service_version = (
|
931
|
+
ocm_env_labels.get("feature_flag_addon_service_version") or "v2"
|
932
|
+
)
|
933
|
+
addon_service = init_addon_service_version(addon_service_version)
|
934
|
+
|
935
|
+
addon_service.create_addon_upgrade_policy(
|
936
|
+
ocm_api=ocm._ocm_client,
|
937
|
+
cluster_id=ocm.cluster_ids[cluster],
|
938
|
+
addon_id=ocm_addon["id"],
|
939
|
+
schedule_type="manual",
|
940
|
+
version=ocm_addon_version,
|
941
|
+
next_run=addon_upgrade_policy_soonest_next_run(),
|
942
|
+
)
|
943
|
+
|
944
|
+
|
945
|
+
def has_cluster_account_access(cluster: dict[str, Any]):
|
946
|
+
spec = cluster.get("spec") or {}
|
947
|
+
account = spec.get("account")
|
948
|
+
return account or cluster.get("awsInfrastructureManagementAccounts") is not None
|
872
949
|
|
873
950
|
|
874
951
|
@get.command()
|
@@ -879,8 +956,7 @@ def clusters_network(ctx, name):
|
|
879
956
|
clusters = [
|
880
957
|
c
|
881
958
|
for c in queries.get_clusters()
|
882
|
-
if c.get("ocm") is not None
|
883
|
-
and c.get("awsInfrastructureManagementAccounts") is not None
|
959
|
+
if c.get("ocm") is not None and has_cluster_account_access(c)
|
884
960
|
]
|
885
961
|
if name:
|
886
962
|
clusters = [c for c in clusters if c["name"] == name]
|
@@ -897,17 +973,36 @@ def clusters_network(ctx, name):
|
|
897
973
|
|
898
974
|
for cluster in clusters:
|
899
975
|
cluster_name = cluster["name"]
|
976
|
+
product = cluster.get("spec", {}).get("product", "")
|
900
977
|
management_account = tfvpc._get_default_management_account(cluster)
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
978
|
+
|
979
|
+
# we shouldn't need to check if cluster product is ROSA, but currently to make
|
980
|
+
# accepter side work in a cluster-vpc peering we need to define the
|
981
|
+
# awsInfrastructureManagementAccounts, that make management_account not None
|
982
|
+
# See https://issues.redhat.com/browse/APPSRE-8224
|
983
|
+
if management_account is None or product == "rosa":
|
984
|
+
# This is a CCS/ROSA cluster.
|
985
|
+
# We can access the account directly, without assuming a network-mgmt role
|
986
|
+
account = cluster["spec"]["account"]
|
987
|
+
account.update({
|
988
|
+
"assume_role": "",
|
989
|
+
"assume_region": cluster["spec"]["region"],
|
990
|
+
"assume_cidr": cluster["network"]["vpc"],
|
991
|
+
})
|
992
|
+
else:
|
993
|
+
account = tfvpc._build_infrastructure_assume_role(
|
994
|
+
management_account,
|
995
|
+
cluster,
|
996
|
+
ocm_map.get(cluster_name),
|
997
|
+
provided_assume_role=None,
|
998
|
+
)
|
999
|
+
account["resourcesDefaultRegion"] = management_account[
|
1000
|
+
"resourcesDefaultRegion"
|
1001
|
+
]
|
907
1002
|
with AWSApi(1, [account], settings=settings, init_users=False) as aws_api:
|
908
|
-
vpc_id, _, _ = aws_api.get_cluster_vpc_details(account)
|
1003
|
+
vpc_id, _, _, _ = aws_api.get_cluster_vpc_details(account)
|
909
1004
|
cluster["vpc_id"] = vpc_id
|
910
|
-
egress_ips = aws_api.get_cluster_nat_gateways_egress_ips(account)
|
1005
|
+
egress_ips = aws_api.get_cluster_nat_gateways_egress_ips(account, vpc_id)
|
911
1006
|
cluster["egress_ips"] = ", ".join(sorted(egress_ips))
|
912
1007
|
|
913
1008
|
# TODO(mafriedm): fix this
|
@@ -916,6 +1011,160 @@ def clusters_network(ctx, name):
|
|
916
1011
|
print_output(ctx.obj["options"], clusters, columns)
|
917
1012
|
|
918
1013
|
|
1014
|
+
@get.command()
|
1015
|
+
@click.pass_context
|
1016
|
+
def network_reservations(ctx) -> None:
|
1017
|
+
from reconcile.typed_queries.reserved_networks import get_networks
|
1018
|
+
|
1019
|
+
columns = [
|
1020
|
+
"name",
|
1021
|
+
"network Address",
|
1022
|
+
"parent Network",
|
1023
|
+
"Account Name",
|
1024
|
+
"Account UID",
|
1025
|
+
"Console Login URL",
|
1026
|
+
]
|
1027
|
+
network_table = []
|
1028
|
+
|
1029
|
+
def md_link(url) -> str:
|
1030
|
+
if ctx.obj["options"]["output"] == "md":
|
1031
|
+
return f"[{url}]({url})"
|
1032
|
+
else:
|
1033
|
+
return url
|
1034
|
+
|
1035
|
+
for network in get_networks():
|
1036
|
+
parentAddress = "none"
|
1037
|
+
if network.parent_network:
|
1038
|
+
parentAddress = network.parent_network.network_address
|
1039
|
+
if network.in_use_by and network.in_use_by.vpc:
|
1040
|
+
network_table.append({
|
1041
|
+
"name": network.name,
|
1042
|
+
"network Address": network.network_address,
|
1043
|
+
"parent Network": parentAddress,
|
1044
|
+
"Account Name": network.in_use_by.vpc.account.name,
|
1045
|
+
"Account UID": network.in_use_by.vpc.account.uid,
|
1046
|
+
"Console Login URL": md_link(network.in_use_by.vpc.account.console_url),
|
1047
|
+
})
|
1048
|
+
else:
|
1049
|
+
network_table.append({
|
1050
|
+
"name": network.name,
|
1051
|
+
"network Address": network.network_address,
|
1052
|
+
"parent Network": parentAddress,
|
1053
|
+
"Account Name": "Unclaimed network",
|
1054
|
+
"Account UID": "Unclaimed network",
|
1055
|
+
"Console Login URL": "Unclaimed network",
|
1056
|
+
})
|
1057
|
+
print_output(ctx.obj["options"], network_table, columns)
|
1058
|
+
|
1059
|
+
|
1060
|
+
@get.command()
|
1061
|
+
@click.option(
|
1062
|
+
"--for-cluster",
|
1063
|
+
help="If it is for getting cidr block for a cluster.",
|
1064
|
+
type=bool,
|
1065
|
+
default=False,
|
1066
|
+
)
|
1067
|
+
@click.option(
|
1068
|
+
"--mask",
|
1069
|
+
help="Mask for the latest available CIDR block for AWS resources. A decimal number between 1~32.",
|
1070
|
+
type=int,
|
1071
|
+
default=24,
|
1072
|
+
)
|
1073
|
+
@click.pass_context
|
1074
|
+
def cidr_blocks(ctx, for_cluster: int, mask: int) -> None:
|
1075
|
+
import ipaddress
|
1076
|
+
|
1077
|
+
from reconcile.typed_queries.aws_vpcs import get_aws_vpcs
|
1078
|
+
|
1079
|
+
columns = ["type", "name", "account", "cidr", "from", "to", "hosts", "overlaps"]
|
1080
|
+
|
1081
|
+
clusters = [c for c in queries.get_clusters() if c.get("network")]
|
1082
|
+
cidrs = [
|
1083
|
+
{
|
1084
|
+
"type": "cluster",
|
1085
|
+
"name": c["name"],
|
1086
|
+
"account": ((c.get("spec") or {}).get("account") or {}).get("name"),
|
1087
|
+
"cidr": c["network"]["vpc"],
|
1088
|
+
"from": str(ipaddress.ip_network(c["network"]["vpc"])[0]),
|
1089
|
+
"to": str(ipaddress.ip_network(c["network"]["vpc"])[-1]),
|
1090
|
+
"hosts": str(ipaddress.ip_network(c["network"]["vpc"]).num_addresses),
|
1091
|
+
"description": c.get("description"),
|
1092
|
+
}
|
1093
|
+
for c in clusters
|
1094
|
+
]
|
1095
|
+
|
1096
|
+
tgw_cidrs = [
|
1097
|
+
{
|
1098
|
+
"type": "account-tgw",
|
1099
|
+
"name": connection["account"]["name"],
|
1100
|
+
"account": connection["account"]["name"],
|
1101
|
+
"cidr": cidr,
|
1102
|
+
"from": str(ipaddress.ip_network(cidr)[0]),
|
1103
|
+
"to": str(ipaddress.ip_network(cidr)[-1]),
|
1104
|
+
"hosts": str(ipaddress.ip_network(cidr).num_addresses),
|
1105
|
+
"description": f'CIDR {cidr} routed through account {connection["account"]["name"]} transit gateways',
|
1106
|
+
}
|
1107
|
+
for c in clusters
|
1108
|
+
for connection in (c["peering"] or {}).get("connections") or []
|
1109
|
+
if connection["provider"] == "account-tgw"
|
1110
|
+
for cidr in [connection["cidrBlock"]] + (connection["cidrBlocks"] or [])
|
1111
|
+
if cidr is not None
|
1112
|
+
]
|
1113
|
+
# removing dupes using a set of tuple (since dicts are not hashable)
|
1114
|
+
unique_tgw_cidrs = [dict(t) for t in {tuple(d.items()) for d in tgw_cidrs}]
|
1115
|
+
cidrs.extend(unique_tgw_cidrs)
|
1116
|
+
|
1117
|
+
vpcs = get_aws_vpcs()
|
1118
|
+
cidrs.extend(
|
1119
|
+
{
|
1120
|
+
"type": "vpc",
|
1121
|
+
"name": vpc.name,
|
1122
|
+
"account": vpc.account.name,
|
1123
|
+
"cidr": vpc.cidr_block,
|
1124
|
+
"from": str(ipaddress.ip_network(vpc.cidr_block)[0]),
|
1125
|
+
"to": str(ipaddress.ip_network(vpc.cidr_block)[-1]),
|
1126
|
+
"hosts": str(ipaddress.ip_network(vpc.cidr_block).num_addresses),
|
1127
|
+
"description": vpc.description,
|
1128
|
+
}
|
1129
|
+
for vpc in vpcs
|
1130
|
+
)
|
1131
|
+
|
1132
|
+
for index, cidr in enumerate(cidrs):
|
1133
|
+
network = ipaddress.ip_network(cidr["cidr"])
|
1134
|
+
overlaps = [
|
1135
|
+
f"{c['type']}/{c['name']}"
|
1136
|
+
for i, c in enumerate(cidrs)
|
1137
|
+
if i != index and network.overlaps(ipaddress.ip_network(c["cidr"]))
|
1138
|
+
]
|
1139
|
+
cidr["overlaps"] = ", ".join(overlaps)
|
1140
|
+
|
1141
|
+
cidrs.sort(key=lambda item: ipaddress.ip_network(item["cidr"]))
|
1142
|
+
|
1143
|
+
if for_cluster:
|
1144
|
+
latest_cluster_cidr = next(
|
1145
|
+
(item for item in reversed(cidrs) if item["type"] == "cluster"),
|
1146
|
+
None,
|
1147
|
+
)
|
1148
|
+
|
1149
|
+
if not latest_cluster_cidr:
|
1150
|
+
print("ERROR: Unable to find any existing cluster CIDR block.")
|
1151
|
+
sys.exit(1)
|
1152
|
+
|
1153
|
+
avail_addr = ipaddress.ip_address(latest_cluster_cidr["to"]) + 1
|
1154
|
+
|
1155
|
+
print(f"INFO: Latest available network address: {avail_addr!s}")
|
1156
|
+
try:
|
1157
|
+
result_cidr_block = str(ipaddress.ip_network((avail_addr, mask)))
|
1158
|
+
except ValueError:
|
1159
|
+
print(f"ERROR: Invalid CIDR Mask {mask} Provided.")
|
1160
|
+
sys.exit(1)
|
1161
|
+
print(f"INFO: You are reserving {2 ** (32 - mask)!s} network addresses.")
|
1162
|
+
print(f"\nYou can use: {result_cidr_block!s}")
|
1163
|
+
else:
|
1164
|
+
ctx.obj["options"]["sort"] = False
|
1165
|
+
print_output(ctx.obj["options"], cidrs, columns)
|
1166
|
+
|
1167
|
+
|
919
1168
|
def ocm_aws_infrastructure_access_switch_role_links_data() -> list[dict]:
|
920
1169
|
settings = queries.get_app_interface_settings()
|
921
1170
|
clusters = queries.get_clusters()
|
@@ -972,38 +1221,6 @@ def ocm_aws_infrastructure_access_switch_role_links(ctx):
|
|
972
1221
|
print_output(ctx.obj["options"], by_user[user], columns)
|
973
1222
|
|
974
1223
|
|
975
|
-
@get.command()
|
976
|
-
@click.pass_context
|
977
|
-
def clusters_egress_ips(ctx):
|
978
|
-
settings = queries.get_app_interface_settings()
|
979
|
-
clusters = queries.get_clusters()
|
980
|
-
clusters = [
|
981
|
-
c
|
982
|
-
for c in clusters
|
983
|
-
if c.get("ocm") is not None
|
984
|
-
and c.get("awsInfrastructureManagementAccounts") is not None
|
985
|
-
]
|
986
|
-
ocm_map = OCMMap(clusters=clusters, settings=settings)
|
987
|
-
|
988
|
-
results = []
|
989
|
-
for cluster in clusters:
|
990
|
-
cluster_name = cluster["name"]
|
991
|
-
management_account = tfvpc._get_default_management_account(cluster)
|
992
|
-
account = tfvpc._build_infrastructure_assume_role(
|
993
|
-
management_account, cluster, ocm_map.get(cluster_name)
|
994
|
-
)
|
995
|
-
if not account:
|
996
|
-
continue
|
997
|
-
account["resourcesDefaultRegion"] = management_account["resourcesDefaultRegion"]
|
998
|
-
with AWSApi(1, [account], settings=settings, init_users=False) as aws_api:
|
999
|
-
egress_ips = aws_api.get_cluster_nat_gateways_egress_ips(account)
|
1000
|
-
item = {"cluster": cluster_name, "egress_ips": ", ".join(sorted(egress_ips))}
|
1001
|
-
results.append(item)
|
1002
|
-
|
1003
|
-
columns = ["cluster", "egress_ips"]
|
1004
|
-
print_output(ctx.obj["options"], results, columns)
|
1005
|
-
|
1006
|
-
|
1007
1224
|
@get.command()
|
1008
1225
|
@click.pass_context
|
1009
1226
|
def clusters_aws_account_ids(ctx):
|
@@ -1014,6 +1231,13 @@ def clusters_aws_account_ids(ctx):
|
|
1014
1231
|
results = []
|
1015
1232
|
for cluster in clusters:
|
1016
1233
|
cluster_name = cluster["name"]
|
1234
|
+
if cluster["spec"].get("account"):
|
1235
|
+
item = {
|
1236
|
+
"cluster": cluster_name,
|
1237
|
+
"aws_account_id": cluster["spec"]["account"]["uid"],
|
1238
|
+
}
|
1239
|
+
results.append(item)
|
1240
|
+
continue
|
1017
1241
|
ocm = ocm_map.get(cluster_name)
|
1018
1242
|
aws_account_id = ocm.get_cluster_aws_account_id(cluster_name)
|
1019
1243
|
item = {
|
@@ -1026,71 +1250,8 @@ def clusters_aws_account_ids(ctx):
|
|
1026
1250
|
print_output(ctx.obj["options"], results, columns)
|
1027
1251
|
|
1028
1252
|
|
1029
|
-
@
|
1030
|
-
@click.
|
1031
|
-
def terraform_users_credentials(ctx) -> None:
|
1032
|
-
credentials = []
|
1033
|
-
state = init_state(integration="account-notifier")
|
1034
|
-
|
1035
|
-
skip_accounts, appsre_pgp_key, _ = tfu.get_reencrypt_settings()
|
1036
|
-
|
1037
|
-
if skip_accounts:
|
1038
|
-
accounts, working_dirs, _, aws_api = tfu.setup(
|
1039
|
-
False,
|
1040
|
-
1,
|
1041
|
-
skip_accounts,
|
1042
|
-
account_name=None,
|
1043
|
-
appsre_pgp_key=appsre_pgp_key,
|
1044
|
-
)
|
1045
|
-
|
1046
|
-
tf = Terraform(
|
1047
|
-
tfu.QONTRACT_INTEGRATION,
|
1048
|
-
tfu.QONTRACT_INTEGRATION_VERSION,
|
1049
|
-
tfu.QONTRACT_TF_PREFIX,
|
1050
|
-
accounts,
|
1051
|
-
working_dirs,
|
1052
|
-
10,
|
1053
|
-
aws_api,
|
1054
|
-
init_users=True,
|
1055
|
-
)
|
1056
|
-
for account, output in tf.outputs.items():
|
1057
|
-
if account in skip_accounts:
|
1058
|
-
user_passwords = tf.format_output(output, tf.OUTPUT_TYPE_PASSWORDS)
|
1059
|
-
console_urls = tf.format_output(output, tf.OUTPUT_TYPE_CONSOLEURLS)
|
1060
|
-
for user_name, enc_password in user_passwords.items():
|
1061
|
-
item = {
|
1062
|
-
"account": account,
|
1063
|
-
"console_url": console_urls[account],
|
1064
|
-
"user_name": user_name,
|
1065
|
-
"encrypted_password": enc_password,
|
1066
|
-
}
|
1067
|
-
credentials.append(item)
|
1068
|
-
|
1069
|
-
secrets = state.ls()
|
1070
|
-
|
1071
|
-
def _get_secret(secret_key: str):
|
1072
|
-
if secret_key.startswith("/output/"):
|
1073
|
-
secret_data = state.get(secret_key[1:])
|
1074
|
-
if secret_data["account"] not in skip_accounts:
|
1075
|
-
return secret_data
|
1076
|
-
return None
|
1077
|
-
|
1078
|
-
secret_result = threaded.run(
|
1079
|
-
_get_secret,
|
1080
|
-
secrets,
|
1081
|
-
10,
|
1082
|
-
)
|
1083
|
-
|
1084
|
-
for secret in secret_result:
|
1085
|
-
if secret and secret["account"] not in skip_accounts:
|
1086
|
-
credentials.append(secret)
|
1087
|
-
|
1088
|
-
columns = ["account", "console_url", "user_name", "encrypted_password"]
|
1089
|
-
print_output(ctx.obj["options"], credentials, columns)
|
1090
|
-
|
1091
|
-
|
1092
|
-
@root.command()
|
1093
|
-
@click.argument("account_name")
|
1253
|
+
@root.command()
|
1254
|
+
@click.argument("account_name")
|
1094
1255
|
@click.pass_context
|
1095
1256
|
def user_credentials_migrate_output(ctx, account_name) -> None:
|
1096
1257
|
accounts = queries.get_state_aws_accounts()
|
@@ -1155,8 +1316,9 @@ def aws_route53_zones(ctx):
|
|
1155
1316
|
|
1156
1317
|
@get.command()
|
1157
1318
|
@click.argument("cluster_name")
|
1319
|
+
@click.option("--cluster-admin/--no-cluster-admin", default=False)
|
1158
1320
|
@click.pass_context
|
1159
|
-
def bot_login(ctx, cluster_name):
|
1321
|
+
def bot_login(ctx, cluster_name, cluster_admin):
|
1160
1322
|
settings = queries.get_app_interface_settings()
|
1161
1323
|
secret_reader = SecretReader(settings=settings)
|
1162
1324
|
clusters = queries.get_clusters()
|
@@ -1167,7 +1329,10 @@ def bot_login(ctx, cluster_name):
|
|
1167
1329
|
|
1168
1330
|
cluster = clusters[0]
|
1169
1331
|
server = cluster["serverUrl"]
|
1170
|
-
|
1332
|
+
automation_token_name = (
|
1333
|
+
"clusterAdminAutomationToken" if cluster_admin else "automationToken"
|
1334
|
+
)
|
1335
|
+
token = secret_reader.read(cluster[automation_token_name])
|
1171
1336
|
print(f"oc login --server {server} --token {token}")
|
1172
1337
|
|
1173
1338
|
|
@@ -1218,6 +1383,181 @@ def aws_creds(ctx, account_name):
|
|
1218
1383
|
print(f"export AWS_SECRET_ACCESS_KEY={secret['aws_secret_access_key']}")
|
1219
1384
|
|
1220
1385
|
|
1386
|
+
@root.command()
|
1387
|
+
@click.option(
|
1388
|
+
"--account-uid",
|
1389
|
+
help="account UID of the account that owns the bucket",
|
1390
|
+
required=True,
|
1391
|
+
)
|
1392
|
+
@click.option(
|
1393
|
+
"--source-bucket",
|
1394
|
+
help="aws bucket where the source statefile is stored",
|
1395
|
+
required=True,
|
1396
|
+
)
|
1397
|
+
@click.option(
|
1398
|
+
"--source-object-path",
|
1399
|
+
help="path in the bucket where the statefile is stored",
|
1400
|
+
required=True,
|
1401
|
+
)
|
1402
|
+
@click.option(
|
1403
|
+
"--rename",
|
1404
|
+
help="optionally rename the destination repo, otherwise keep the same name for the new location",
|
1405
|
+
)
|
1406
|
+
@click.option("--region", help="AWS region")
|
1407
|
+
@click.option(
|
1408
|
+
"--force/--no-force",
|
1409
|
+
help="Force the copy even if a statefile already exists at the destination",
|
1410
|
+
default=False,
|
1411
|
+
)
|
1412
|
+
@click.pass_context
|
1413
|
+
def copy_tfstate(
|
1414
|
+
ctx, source_bucket, source_object_path, account_uid, rename, region, force
|
1415
|
+
):
|
1416
|
+
settings = queries.get_app_interface_settings()
|
1417
|
+
secret_reader = SecretReader(settings=settings)
|
1418
|
+
accounts = queries.get_aws_accounts(uid=account_uid, terraform_state=True)
|
1419
|
+
if not accounts:
|
1420
|
+
print(f"{account_uid} not found in App-Interface.")
|
1421
|
+
sys.exit(1)
|
1422
|
+
account = accounts[0]
|
1423
|
+
|
1424
|
+
# terraform repo stores its statefiles within a "folder" in AWS S3 which is defined in App-Interface
|
1425
|
+
dest_folder = [
|
1426
|
+
i
|
1427
|
+
for i in account["terraformState"]["integrations"]
|
1428
|
+
if i["integration"] == "terraform-repo"
|
1429
|
+
]
|
1430
|
+
if not dest_folder:
|
1431
|
+
logging.error(
|
1432
|
+
"terraform-repo is missing a section in this account's '/dependencies/terraform-state-1.yml' file, please add one using the docs in https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/docs/terraform-repo/getting-started.md?ref_type=heads#step-1-setup-aws-account and then try again"
|
1433
|
+
)
|
1434
|
+
return
|
1435
|
+
|
1436
|
+
dest_filename = ""
|
1437
|
+
if rename:
|
1438
|
+
dest_filename = rename.removesuffix(".tfstate")
|
1439
|
+
else:
|
1440
|
+
dest_filename = source_object_path.removesuffix(".tfstate")
|
1441
|
+
|
1442
|
+
dest_key = f"{dest_folder[0]['key']}/{dest_filename}-tf-repo.tfstate"
|
1443
|
+
dest_bucket = account["terraformState"]["bucket"]
|
1444
|
+
|
1445
|
+
with AWSApi(1, accounts, settings, secret_reader) as aws:
|
1446
|
+
session = aws.get_session(account["name"])
|
1447
|
+
s3_client = aws.get_session_client(session, "s3", region)
|
1448
|
+
copy_source = {
|
1449
|
+
"Bucket": source_bucket,
|
1450
|
+
"Key": source_object_path,
|
1451
|
+
}
|
1452
|
+
|
1453
|
+
dest_pretty_path = f"s3://{dest_bucket}/{dest_key}"
|
1454
|
+
# check if dest already exists
|
1455
|
+
response = s3_client.list_objects_v2(
|
1456
|
+
Bucket=dest_bucket, Prefix=dest_key, MaxKeys=1
|
1457
|
+
)
|
1458
|
+
|
1459
|
+
if "Contents" in response:
|
1460
|
+
if force:
|
1461
|
+
logging.warning(
|
1462
|
+
f"Existing object at '{dest_pretty_path}' will be overwritten as --force is set"
|
1463
|
+
)
|
1464
|
+
else:
|
1465
|
+
logging.error(
|
1466
|
+
f"Will not overwrite existing object at '{dest_pretty_path}'. Use --force to overwrite the destination object"
|
1467
|
+
)
|
1468
|
+
return
|
1469
|
+
|
1470
|
+
prompt_text = f"Are you sure you want to copy 's3://{source_bucket}/{source_object_path}' to '{dest_pretty_path}'?"
|
1471
|
+
if click.confirm(prompt_text):
|
1472
|
+
s3_client.copy(copy_source, dest_bucket, dest_key)
|
1473
|
+
print(
|
1474
|
+
textwrap.dedent(f"""
|
1475
|
+
Nicely done! Your tfstate file has been migrated. Now you can create a repo definition in App-Interface like so:
|
1476
|
+
|
1477
|
+
---
|
1478
|
+
$schema: /aws/terraform-repo-1.yml
|
1479
|
+
|
1480
|
+
account:
|
1481
|
+
$ref: {account["path"]}
|
1482
|
+
|
1483
|
+
name: {dest_filename}
|
1484
|
+
repository: <FILL_IN>
|
1485
|
+
projectPath: <FILL_IN>
|
1486
|
+
tfVersion: <FILL_IN>
|
1487
|
+
ref: <FILL_IN>""")
|
1488
|
+
)
|
1489
|
+
|
1490
|
+
|
1491
|
+
@get.command(short_help='obtain "rosa create cluster" command by cluster name')
|
1492
|
+
@click.argument("cluster_name")
|
1493
|
+
@click.pass_context
|
1494
|
+
def rosa_create_cluster_command(ctx, cluster_name):
|
1495
|
+
clusters = [c for c in get_clusters() if c.name == cluster_name]
|
1496
|
+
try:
|
1497
|
+
cluster = clusters[0]
|
1498
|
+
except IndexError:
|
1499
|
+
print(f"{cluster_name} not found.")
|
1500
|
+
sys.exit(1)
|
1501
|
+
|
1502
|
+
if cluster.spec.product != OCM_PRODUCT_ROSA:
|
1503
|
+
print("must be a rosa cluster.")
|
1504
|
+
sys.exit(1)
|
1505
|
+
|
1506
|
+
settings = queries.get_app_interface_settings()
|
1507
|
+
account = cluster.spec.account
|
1508
|
+
|
1509
|
+
if account.billing_account:
|
1510
|
+
billing_account = account.billing_account.uid
|
1511
|
+
else:
|
1512
|
+
with AWSApi(
|
1513
|
+
1, [account.dict(by_alias=True)], settings=settings, init_users=False
|
1514
|
+
) as aws_api:
|
1515
|
+
billing_account = aws_api.get_organization_billing_account(account.name)
|
1516
|
+
|
1517
|
+
print(
|
1518
|
+
" ".join([
|
1519
|
+
"rosa create cluster",
|
1520
|
+
f"--billing-account {billing_account}",
|
1521
|
+
f"--cluster-name {cluster.name}",
|
1522
|
+
"--sts",
|
1523
|
+
("--private" if cluster.spec.private else ""),
|
1524
|
+
("--hosted-cp" if cluster.spec.hypershift else ""),
|
1525
|
+
(
|
1526
|
+
"--private-link"
|
1527
|
+
if cluster.spec.private and not cluster.spec.hypershift
|
1528
|
+
else ""
|
1529
|
+
),
|
1530
|
+
(
|
1531
|
+
"--multi-az"
|
1532
|
+
if cluster.spec.multi_az and not cluster.spec.hypershift
|
1533
|
+
else ""
|
1534
|
+
),
|
1535
|
+
f"--operator-roles-prefix {cluster.name}",
|
1536
|
+
f"--oidc-config-id {cluster.spec.oidc_endpoint_url.split('/')[-1]}",
|
1537
|
+
f"--subnet-ids {','.join(cluster.spec.subnet_ids)}",
|
1538
|
+
f"--region {cluster.spec.region}",
|
1539
|
+
f"--version {cluster.spec.initial_version}",
|
1540
|
+
f"--machine-cidr {cluster.network.vpc}",
|
1541
|
+
f"--service-cidr {cluster.network.service}",
|
1542
|
+
f"--pod-cidr {cluster.network.pod}",
|
1543
|
+
"--host-prefix 23",
|
1544
|
+
"--replicas 3",
|
1545
|
+
f"--compute-machine-type {cluster.machine_pools[0].instance_type}",
|
1546
|
+
(
|
1547
|
+
"--disable-workload-monitoring"
|
1548
|
+
if cluster.spec.disable_user_workload_monitoring
|
1549
|
+
else ""
|
1550
|
+
),
|
1551
|
+
f"--channel-group {cluster.spec.channel}",
|
1552
|
+
(
|
1553
|
+
f"--properties provision_shard_id:{cluster.spec.provision_shard_id}"
|
1554
|
+
if cluster.spec.provision_shard_id
|
1555
|
+
else ""
|
1556
|
+
),
|
1557
|
+
])
|
1558
|
+
)
|
1559
|
+
|
1560
|
+
|
1221
1561
|
@get.command(
|
1222
1562
|
short_help="obtain sshuttle command for "
|
1223
1563
|
"connecting to private clusters via a jump host. "
|
@@ -1227,9 +1567,7 @@ def aws_creds(ctx, account_name):
|
|
1227
1567
|
@click.argument("jumphost_hostname", required=False)
|
1228
1568
|
@click.argument("cluster_name", required=False)
|
1229
1569
|
@click.pass_context
|
1230
|
-
def sshuttle_command(
|
1231
|
-
ctx, jumphost_hostname: Optional[str], cluster_name: Optional[str]
|
1232
|
-
):
|
1570
|
+
def sshuttle_command(ctx, jumphost_hostname: str | None, cluster_name: str | None):
|
1233
1571
|
jumphosts_query_data = queries.get_jumphosts(hostname=jumphost_hostname)
|
1234
1572
|
jumphosts = jumphosts_query_data.jumphosts or []
|
1235
1573
|
for jh in jumphosts:
|
@@ -1350,6 +1688,190 @@ def aws_terraform_resources(ctx):
|
|
1350
1688
|
print_output(ctx.obj["options"], results.values(), columns)
|
1351
1689
|
|
1352
1690
|
|
1691
|
+
def rds_attr(
|
1692
|
+
attr: str, overrides: dict[str, str], defaults: dict[str, str]
|
1693
|
+
) -> str | None:
|
1694
|
+
return overrides.get(attr) or defaults.get(attr)
|
1695
|
+
|
1696
|
+
|
1697
|
+
def region_from_az(az: str | None) -> str | None:
|
1698
|
+
if not az:
|
1699
|
+
return None
|
1700
|
+
return az[:-1]
|
1701
|
+
|
1702
|
+
|
1703
|
+
def rds_region(
|
1704
|
+
spec: ExternalResourceSpec,
|
1705
|
+
overrides: dict[str, str],
|
1706
|
+
defaults: dict[str, str],
|
1707
|
+
accounts: dict[str, Any],
|
1708
|
+
) -> str | None:
|
1709
|
+
return (
|
1710
|
+
spec.resource.get("region")
|
1711
|
+
or rds_attr("region", overrides, defaults)
|
1712
|
+
or region_from_az(spec.resource.get("availability_zone"))
|
1713
|
+
or region_from_az(rds_attr("availability_zone", overrides, defaults))
|
1714
|
+
or accounts[spec.provisioner_name].get("resourcesDefaultRegion")
|
1715
|
+
)
|
1716
|
+
|
1717
|
+
|
1718
|
+
@get.command
|
1719
|
+
@click.pass_context
|
1720
|
+
def rds(ctx):
|
1721
|
+
namespaces = tfr.get_namespaces()
|
1722
|
+
accounts = {a["name"]: a for a in queries.get_aws_accounts()}
|
1723
|
+
results = []
|
1724
|
+
for namespace in namespaces:
|
1725
|
+
specs = [
|
1726
|
+
s
|
1727
|
+
for s in get_external_resource_specs(
|
1728
|
+
namespace.dict(by_alias=True), provision_provider=PROVIDER_AWS
|
1729
|
+
)
|
1730
|
+
if s.provider == "rds"
|
1731
|
+
]
|
1732
|
+
for spec in specs:
|
1733
|
+
defaults = yaml.safe_load(
|
1734
|
+
gql.get_resource(spec.resource["defaults"])["content"]
|
1735
|
+
)
|
1736
|
+
overrides = json.loads(spec.resource.get("overrides") or "{}")
|
1737
|
+
item = {
|
1738
|
+
"identifier": spec.identifier,
|
1739
|
+
"account": spec.provisioner_name,
|
1740
|
+
"account_uid": accounts[spec.provisioner_name]["uid"],
|
1741
|
+
"region": rds_region(spec, overrides, defaults, accounts),
|
1742
|
+
"engine": rds_attr("engine", overrides, defaults),
|
1743
|
+
"engine_version": rds_attr("engine_version", overrides, defaults),
|
1744
|
+
"instance_class": rds_attr("instance_class", overrides, defaults),
|
1745
|
+
"storage_type": rds_attr("storage_type", overrides, defaults),
|
1746
|
+
"ca_cert_identifier": rds_attr(
|
1747
|
+
"ca_cert_identifier", overrides, defaults
|
1748
|
+
),
|
1749
|
+
}
|
1750
|
+
results.append(item)
|
1751
|
+
|
1752
|
+
if ctx.obj["options"]["output"] == "md":
|
1753
|
+
json_table = {
|
1754
|
+
"filter": True,
|
1755
|
+
"fields": [
|
1756
|
+
{"key": "identifier", "sortable": True},
|
1757
|
+
{"key": "account", "sortable": True},
|
1758
|
+
{"key": "account_uid", "sortable": True},
|
1759
|
+
{"key": "region", "sortable": True},
|
1760
|
+
{"key": "engine", "sortable": True},
|
1761
|
+
{"key": "engine_version", "sortable": True},
|
1762
|
+
{"key": "instance_class", "sortable": True},
|
1763
|
+
{"key": "storage_type", "sortable": True},
|
1764
|
+
{"key": "ca_cert_identifier", "sortable": True},
|
1765
|
+
],
|
1766
|
+
"items": results,
|
1767
|
+
}
|
1768
|
+
|
1769
|
+
print(
|
1770
|
+
f"""
|
1771
|
+
You can view the source of this Markdown to extract the JSON data.
|
1772
|
+
|
1773
|
+
{len(results)} RDS instances found.
|
1774
|
+
|
1775
|
+
```json:table
|
1776
|
+
{json.dumps(json_table)}
|
1777
|
+
```
|
1778
|
+
"""
|
1779
|
+
)
|
1780
|
+
else:
|
1781
|
+
columns = [
|
1782
|
+
"identifier",
|
1783
|
+
"account",
|
1784
|
+
"account_uid",
|
1785
|
+
"region",
|
1786
|
+
"engine",
|
1787
|
+
"engine_version",
|
1788
|
+
"instance_class",
|
1789
|
+
"storage_type",
|
1790
|
+
"ca_cert_identifier",
|
1791
|
+
]
|
1792
|
+
ctx.obj["options"]["sort"] = False
|
1793
|
+
print_output(ctx.obj["options"], results, columns)
|
1794
|
+
|
1795
|
+
|
1796
|
+
@get.command
|
1797
|
+
@click.pass_context
|
1798
|
+
def rds_recommendations(ctx):
|
1799
|
+
IGNORED_STATUSES = ("resolved",)
|
1800
|
+
IGNORED_SEVERITIES = ("informational",)
|
1801
|
+
|
1802
|
+
settings = queries.get_app_interface_settings()
|
1803
|
+
|
1804
|
+
# Only check AWS accounts for which we have RDS resources defined
|
1805
|
+
targetted_accounts = []
|
1806
|
+
namespaces = queries.get_namespaces()
|
1807
|
+
for namespace_info in namespaces:
|
1808
|
+
if not managed_external_resources(namespace_info):
|
1809
|
+
continue
|
1810
|
+
for spec in get_external_resource_specs(namespace_info):
|
1811
|
+
if spec.provider == "rds":
|
1812
|
+
targetted_accounts.append(spec.provisioner_name)
|
1813
|
+
|
1814
|
+
accounts = [
|
1815
|
+
a for a in queries.get_aws_accounts() if a["name"] in targetted_accounts
|
1816
|
+
]
|
1817
|
+
accounts.sort(key=lambda a: a["name"])
|
1818
|
+
|
1819
|
+
columns = [
|
1820
|
+
# 'RecommendationId',
|
1821
|
+
# 'TypeId',
|
1822
|
+
# 'ResourceArn',
|
1823
|
+
"ResourceName", # Non-AWS field
|
1824
|
+
"Severity",
|
1825
|
+
"Category",
|
1826
|
+
"Impact",
|
1827
|
+
"Status",
|
1828
|
+
"Detection",
|
1829
|
+
"Recommendation",
|
1830
|
+
"Description",
|
1831
|
+
# 'Source',
|
1832
|
+
# 'TypeDetection',
|
1833
|
+
# 'TypeRecommendation',
|
1834
|
+
# 'AdditionalInfo'
|
1835
|
+
]
|
1836
|
+
|
1837
|
+
ctx.obj["options"]["sort"] = False
|
1838
|
+
|
1839
|
+
print("[TOC]")
|
1840
|
+
for account in accounts:
|
1841
|
+
account_name = account.get("name")
|
1842
|
+
account_deployment_regions = account.get("supportedDeploymentRegions")
|
1843
|
+
for region in account_deployment_regions or []:
|
1844
|
+
with AWSApi(1, [account], settings=settings, init_users=False) as aws:
|
1845
|
+
try:
|
1846
|
+
data = aws.describe_rds_recommendations(account_name, region)
|
1847
|
+
recommendations = data.get("DBRecommendations", [])
|
1848
|
+
except Exception as e:
|
1849
|
+
logging.error(f"Error describing RDS recommendations: {e}")
|
1850
|
+
continue
|
1851
|
+
|
1852
|
+
# Add field ResourceName infered from ResourceArn
|
1853
|
+
recommendations = [
|
1854
|
+
{**rec, "ResourceName": rec["ResourceArn"].split(":")[-1]}
|
1855
|
+
for rec in recommendations
|
1856
|
+
if rec.get("Status") not in IGNORED_STATUSES
|
1857
|
+
and rec.get("Severity") not in IGNORED_SEVERITIES
|
1858
|
+
]
|
1859
|
+
# The Description field has \n that are causing issues with the markdown table
|
1860
|
+
recommendations = [
|
1861
|
+
{**rec, "Description": rec["Description"].replace("\n", " ")}
|
1862
|
+
for rec in recommendations
|
1863
|
+
]
|
1864
|
+
# If we have no recommendations to show, skip
|
1865
|
+
if not recommendations:
|
1866
|
+
continue
|
1867
|
+
# Sort by ResourceName
|
1868
|
+
recommendations.sort(key=lambda r: r["ResourceName"])
|
1869
|
+
|
1870
|
+
print(f"# {account_name} - {region}")
|
1871
|
+
print("Note: Severity informational is not shown.")
|
1872
|
+
print_output(ctx.obj["options"], recommendations, columns)
|
1873
|
+
|
1874
|
+
|
1353
1875
|
@get.command()
|
1354
1876
|
@click.pass_context
|
1355
1877
|
def products(ctx):
|
@@ -1434,17 +1956,8 @@ def roles(ctx, org_username):
|
|
1434
1956
|
|
1435
1957
|
user = users[0]
|
1436
1958
|
|
1437
|
-
|
1438
|
-
|
1439
|
-
def add(d):
|
1440
|
-
for i, r in enumerate(roles):
|
1441
|
-
if all(d[k] == r[k] for k in ("type", "name", "resource")):
|
1442
|
-
roles.insert(
|
1443
|
-
i + 1, {"type": "", "name": "", "resource": "", "ref": d["ref"]}
|
1444
|
-
)
|
1445
|
-
return
|
1446
|
-
|
1447
|
-
roles.append(d)
|
1959
|
+
# type, name, resource, [ref]
|
1960
|
+
roles: dict[(str, str, str), set] = defaultdict(set)
|
1448
1961
|
|
1449
1962
|
for role in user["roles"]:
|
1450
1963
|
role_name = role["path"]
|
@@ -1461,63 +1974,38 @@ def roles(ctx, org_username):
|
|
1461
1974
|
if "team" in p:
|
1462
1975
|
r_name += "/" + p["team"]
|
1463
1976
|
|
1464
|
-
add(
|
1465
|
-
{
|
1466
|
-
"type": "permission",
|
1467
|
-
"name": p["name"],
|
1468
|
-
"resource": r_name,
|
1469
|
-
"ref": role_name,
|
1470
|
-
}
|
1471
|
-
)
|
1977
|
+
roles["permission", p["name"], r_name].add(role_name)
|
1472
1978
|
|
1473
1979
|
for aws in role.get("aws_groups") or []:
|
1474
1980
|
for policy in aws["policies"]:
|
1475
|
-
add(
|
1476
|
-
{
|
1477
|
-
"type": "aws",
|
1478
|
-
"name": policy,
|
1479
|
-
"resource": aws["account"]["name"],
|
1480
|
-
"ref": aws["path"],
|
1481
|
-
}
|
1482
|
-
)
|
1981
|
+
roles["aws", policy, aws["account"]["name"]].add(aws["path"])
|
1483
1982
|
|
1484
1983
|
for a in role.get("access") or []:
|
1485
1984
|
if a["cluster"]:
|
1486
1985
|
cluster_name = a["cluster"]["name"]
|
1487
|
-
add(
|
1488
|
-
{
|
1489
|
-
"type": "cluster",
|
1490
|
-
"name": a["clusterRole"],
|
1491
|
-
"resource": cluster_name,
|
1492
|
-
"ref": role_name,
|
1493
|
-
}
|
1494
|
-
)
|
1986
|
+
roles["cluster", a["clusterRole"], cluster_name].add(role_name)
|
1495
1987
|
elif a["namespace"]:
|
1496
1988
|
ns_name = a["namespace"]["name"]
|
1497
|
-
add(
|
1498
|
-
{
|
1499
|
-
"type": "namespace",
|
1500
|
-
"name": a["role"],
|
1501
|
-
"resource": ns_name,
|
1502
|
-
"ref": role_name,
|
1503
|
-
}
|
1504
|
-
)
|
1989
|
+
roles["namespace", a["role"], ns_name].add(role_name)
|
1505
1990
|
|
1506
1991
|
for s in role.get("self_service") or []:
|
1507
1992
|
for d in s.get("datafiles") or []:
|
1508
1993
|
name = d.get("name")
|
1509
1994
|
if name:
|
1510
|
-
add(
|
1511
|
-
{
|
1512
|
-
"type": "saas_file",
|
1513
|
-
"name": "owner",
|
1514
|
-
"resource": name,
|
1515
|
-
"ref": role_name,
|
1516
|
-
}
|
1517
|
-
)
|
1995
|
+
roles["saas_file", "owner", name].add(role_name)
|
1518
1996
|
|
1519
1997
|
columns = ["type", "name", "resource", "ref"]
|
1520
|
-
|
1998
|
+
rows = [
|
1999
|
+
{
|
2000
|
+
"type": k[0],
|
2001
|
+
"name": k[1],
|
2002
|
+
"resource": k[2],
|
2003
|
+
"ref": ref,
|
2004
|
+
}
|
2005
|
+
for k, v in roles.items()
|
2006
|
+
for ref in v
|
2007
|
+
]
|
2008
|
+
print_output(ctx.obj["options"], rows, columns)
|
1521
2009
|
|
1522
2010
|
|
1523
2011
|
@get.command()
|
@@ -1564,13 +2052,11 @@ def quay_mirrors(ctx):
|
|
1564
2052
|
url = item["mirror"]["url"]
|
1565
2053
|
public = item["public"]
|
1566
2054
|
|
1567
|
-
mirrors.append(
|
1568
|
-
{
|
1569
|
-
|
1570
|
-
|
1571
|
-
|
1572
|
-
}
|
1573
|
-
)
|
2055
|
+
mirrors.append({
|
2056
|
+
"repo": f"quay.io/{org_name}/{name}",
|
2057
|
+
"public": public,
|
2058
|
+
"upstream": url,
|
2059
|
+
})
|
1574
2060
|
|
1575
2061
|
columns = ["repo", "upstream", "public"]
|
1576
2062
|
print_output(ctx.obj["options"], mirrors, columns)
|
@@ -1689,7 +2175,7 @@ def app_interface_merge_queue(ctx):
|
|
1689
2175
|
).total_seconds()
|
1690
2176
|
/ 60,
|
1691
2177
|
"approved_by": mr["approved_by"],
|
1692
|
-
"labels": ", ".join(mr["
|
2178
|
+
"labels": ", ".join(mr["mr"].labels),
|
1693
2179
|
}
|
1694
2180
|
merge_queue_data.append(item)
|
1695
2181
|
|
@@ -1727,14 +2213,14 @@ def app_interface_review_queue(ctx) -> None:
|
|
1727
2213
|
|
1728
2214
|
queue_data = []
|
1729
2215
|
for mr in merge_requests:
|
1730
|
-
if mr.
|
2216
|
+
if mr.draft:
|
1731
2217
|
continue
|
1732
2218
|
if len(mr.commits()) == 0:
|
1733
2219
|
continue
|
1734
|
-
if mr.merge_status in
|
2220
|
+
if mr.merge_status in {
|
1735
2221
|
MRStatus.CANNOT_BE_MERGED,
|
1736
2222
|
MRStatus.CANNOT_BE_MERGED_RECHECK,
|
1737
|
-
|
2223
|
+
}:
|
1738
2224
|
continue
|
1739
2225
|
|
1740
2226
|
labels = mr.attributes.get("labels")
|
@@ -1744,10 +2230,14 @@ def app_interface_review_queue(ctx) -> None:
|
|
1744
2230
|
continue
|
1745
2231
|
if SAAS_FILE_UPDATE in labels:
|
1746
2232
|
continue
|
1747
|
-
if
|
2233
|
+
if (
|
2234
|
+
SELF_SERVICEABLE in labels
|
2235
|
+
and SHOW_SELF_SERVICEABLE_IN_REVIEW_QUEUE not in labels
|
2236
|
+
and AVS not in labels
|
2237
|
+
):
|
1748
2238
|
continue
|
1749
2239
|
|
1750
|
-
pipelines =
|
2240
|
+
pipelines = gl.get_merge_request_pipelines(mr)
|
1751
2241
|
if not pipelines:
|
1752
2242
|
continue
|
1753
2243
|
running_pipelines = [p for p in pipelines if p["status"] == "running"]
|
@@ -1799,7 +2289,10 @@ def app_interface_review_queue(ctx) -> None:
|
|
1799
2289
|
|
1800
2290
|
queue_data.sort(key=itemgetter("updated_at"))
|
1801
2291
|
ctx.obj["options"]["sort"] = False # do not sort
|
1802
|
-
print_output(ctx.obj["options"], queue_data, columns)
|
2292
|
+
text = print_output(ctx.obj["options"], queue_data, columns)
|
2293
|
+
if text:
|
2294
|
+
slack = slackapi_from_queries("app-interface-review-queue")
|
2295
|
+
slack.chat_post_message("```\n" + text + "\n```")
|
1803
2296
|
|
1804
2297
|
|
1805
2298
|
@get.command()
|
@@ -1819,7 +2312,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx):
|
|
1819
2312
|
]
|
1820
2313
|
queue_data = []
|
1821
2314
|
for mr in merge_requests:
|
1822
|
-
if mr.
|
2315
|
+
if mr.draft:
|
1823
2316
|
continue
|
1824
2317
|
if len(mr.commits()) == 0:
|
1825
2318
|
continue
|
@@ -1841,7 +2334,7 @@ def app_interface_open_selfserviceable_mr_queue(ctx):
|
|
1841
2334
|
continue
|
1842
2335
|
|
1843
2336
|
# skip MRs where the pipeline is still running or where it failed
|
1844
|
-
pipelines =
|
2337
|
+
pipelines = gl.get_merge_request_pipelines(mr)
|
1845
2338
|
if not pipelines:
|
1846
2339
|
continue
|
1847
2340
|
running_pipelines = [p for p in pipelines if p["status"] == "running"]
|
@@ -1879,14 +2372,12 @@ def change_types(ctx) -> None:
|
|
1879
2372
|
usage_statistics[ss.change_type.name] += nr_files
|
1880
2373
|
data = []
|
1881
2374
|
for ct in change_types:
|
1882
|
-
data.append(
|
1883
|
-
|
1884
|
-
|
1885
|
-
|
1886
|
-
|
1887
|
-
|
1888
|
-
}
|
1889
|
-
)
|
2375
|
+
data.append({
|
2376
|
+
"name": ct.name,
|
2377
|
+
"description": ct.description,
|
2378
|
+
"applicable to": f"{ct.context_type.value} {ct.context_schema or ''}",
|
2379
|
+
"# usages": usage_statistics[ct.name],
|
2380
|
+
})
|
1890
2381
|
columns = ["name", "description", "applicable to", "# usages"]
|
1891
2382
|
print_output(ctx.obj["options"], data, columns)
|
1892
2383
|
|
@@ -1897,7 +2388,11 @@ def app_interface_merge_history(ctx):
|
|
1897
2388
|
settings = queries.get_app_interface_settings()
|
1898
2389
|
instance = queries.get_gitlab_instance()
|
1899
2390
|
gl = GitLabApi(instance, project_url=settings["repoUrl"], settings=settings)
|
1900
|
-
merge_requests = gl.project.mergerequests.list(
|
2391
|
+
merge_requests = gl.project.mergerequests.list(
|
2392
|
+
state=MRState.MERGED,
|
2393
|
+
per_page=100,
|
2394
|
+
get_all=False,
|
2395
|
+
)
|
1901
2396
|
|
1902
2397
|
columns = [
|
1903
2398
|
"id",
|
@@ -2043,69 +2538,637 @@ def selectorsyncset_managed_hypershift_resources(ctx, use_jump_host):
|
|
2043
2538
|
print_output(ctx.obj["options"], data, columns)
|
2044
2539
|
|
2045
2540
|
|
2046
|
-
@
|
2047
|
-
@
|
2541
|
+
@get.command()
|
2542
|
+
@click.option(
|
2543
|
+
"--aws-access-key-id",
|
2544
|
+
help="AWS access key id",
|
2545
|
+
default=os.environ.get("QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_ACCESS_KEY_ID", None),
|
2546
|
+
)
|
2547
|
+
@click.option(
|
2548
|
+
"--aws-secret-access-key",
|
2549
|
+
help="AWS secret access key",
|
2550
|
+
default=os.environ.get(
|
2551
|
+
"QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_SECRET_ACCESS_KEY", None
|
2552
|
+
),
|
2553
|
+
)
|
2554
|
+
@click.option(
|
2555
|
+
"--aws-region",
|
2556
|
+
help="AWS region",
|
2557
|
+
default=os.environ.get("QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_REGION", "us-east-1"),
|
2558
|
+
)
|
2048
2559
|
@click.pass_context
|
2049
|
-
def
|
2050
|
-
|
2560
|
+
def ec2_jenkins_workers(ctx, aws_access_key_id, aws_secret_access_key, aws_region):
|
2561
|
+
"""Prints a list of jenkins workers and their status."""
|
2562
|
+
if not aws_access_key_id or not aws_secret_access_key:
|
2563
|
+
raise click.ClickException(
|
2564
|
+
"AWS credentials not provided. Either set them in the environment "
|
2565
|
+
"QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_ACCESS_KEY_ID "
|
2566
|
+
"and QONTRACT_CLI_EC2_JENKINS_WORKER_AWS_SECRET_ACCESS_KEY "
|
2567
|
+
"or pass them as arguments."
|
2568
|
+
)
|
2051
2569
|
|
2570
|
+
boto3.setup_default_session(
|
2571
|
+
aws_access_key_id=aws_access_key_id,
|
2572
|
+
aws_secret_access_key=aws_secret_access_key,
|
2573
|
+
region_name=aws_region,
|
2574
|
+
)
|
2575
|
+
client = boto3.client("autoscaling")
|
2576
|
+
ec2 = boto3.resource("ec2")
|
2577
|
+
results = []
|
2578
|
+
now = datetime.now(UTC)
|
2579
|
+
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
2580
|
+
columns = [
|
2581
|
+
"type",
|
2582
|
+
"id",
|
2583
|
+
"IP",
|
2584
|
+
"instance type",
|
2585
|
+
"launch time (utc)",
|
2586
|
+
"OS",
|
2587
|
+
"AMI",
|
2588
|
+
]
|
2052
2589
|
|
2053
|
-
|
2054
|
-
|
2055
|
-
|
2056
|
-
|
2057
|
-
|
2058
|
-
|
2059
|
-
|
2060
|
-
|
2061
|
-
|
2062
|
-
|
2063
|
-
|
2064
|
-
|
2065
|
-
|
2066
|
-
|
2067
|
-
|
2068
|
-
|
2069
|
-
|
2070
|
-
|
2071
|
-
|
2590
|
+
auto_scaling_groups = client.describe_auto_scaling_groups()["AutoScalingGroups"]
|
2591
|
+
for a in auto_scaling_groups:
|
2592
|
+
for i in a["Instances"]:
|
2593
|
+
lifecycle_state = i["LifecycleState"]
|
2594
|
+
if lifecycle_state != "InService":
|
2595
|
+
logging.info(
|
2596
|
+
f"instance is in lifecycle state {lifecycle_state} - ignoring instance"
|
2597
|
+
)
|
2598
|
+
continue
|
2599
|
+
instance = ec2.Instance(i["InstanceId"])
|
2600
|
+
state = instance.state["Name"]
|
2601
|
+
if state != "running":
|
2602
|
+
continue
|
2603
|
+
os = ""
|
2604
|
+
url = ""
|
2605
|
+
for t in instance.tags:
|
2606
|
+
if t.get("Key") == "os":
|
2607
|
+
os = t.get("Value")
|
2608
|
+
if t.get("Key") == "jenkins_controller":
|
2609
|
+
url = f"https://{t.get('Value').replace('-', '.')}.devshift.net/computer/{instance.id}"
|
2610
|
+
image = ec2.Image(instance.image_id)
|
2611
|
+
commit_url = ""
|
2612
|
+
for t in image.tags:
|
2613
|
+
if t.get("Key") == "infra_commit":
|
2614
|
+
commit_url = f"https://gitlab.cee.redhat.com/app-sre/infra/-/tree/{t.get('Value')}"
|
2615
|
+
launch_emoji = "💫"
|
2616
|
+
launch_hours = (now - instance.launch_time).total_seconds() / 3600
|
2617
|
+
if launch_hours > 24:
|
2618
|
+
launch_emoji = "⏰"
|
2619
|
+
item = {
|
2620
|
+
"type": a["AutoScalingGroupName"],
|
2621
|
+
"id": f"[{instance.id}]({url})",
|
2622
|
+
"IP": instance.private_ip_address,
|
2623
|
+
"instance type": instance.instance_type,
|
2624
|
+
"launch time (utc)": f"{instance.launch_time.strftime(DATE_FORMAT)} {launch_emoji}",
|
2625
|
+
"OS": os,
|
2626
|
+
"AMI": f"[{image.name}]({commit_url})",
|
2627
|
+
}
|
2628
|
+
results.append(item)
|
2072
2629
|
|
2630
|
+
print_output(ctx.obj["options"], results, columns)
|
2073
2631
|
|
2074
|
-
|
2075
|
-
@
|
2076
|
-
@click.argument("
|
2632
|
+
|
2633
|
+
@get.command()
|
2634
|
+
@click.argument("status-board-instance")
|
2077
2635
|
@click.pass_context
|
2078
|
-
def
|
2079
|
-
|
2080
|
-
|
2081
|
-
|
2636
|
+
def slo_document_services(ctx, status_board_instance):
|
2637
|
+
"""Print SLO Documents Services"""
|
2638
|
+
columns = [
|
2639
|
+
"slo_doc_name",
|
2640
|
+
"product",
|
2641
|
+
"app",
|
2642
|
+
"slo",
|
2643
|
+
"sli_type",
|
2644
|
+
"sli_specification",
|
2645
|
+
"slo_details",
|
2646
|
+
"target",
|
2647
|
+
"target_unit",
|
2648
|
+
"window",
|
2649
|
+
"statusBoardEnabled",
|
2082
2650
|
]
|
2083
|
-
ocm_map = OCMMap(ocms=ocms, settings=settings)
|
2084
|
-
ocm = ocm_map[org_name]
|
2085
|
-
enabled = ocm.is_cluster_admin_enabled(cluster_name)
|
2086
|
-
if not enabled:
|
2087
|
-
ocm.enable_cluster_admin(cluster_name)
|
2088
2651
|
|
2652
|
+
try:
|
2653
|
+
[sb] = [sb for sb in get_status_board() if sb.name == status_board_instance]
|
2654
|
+
except ValueError:
|
2655
|
+
print(f"Status-board instance '{status_board_instance}' not found.")
|
2656
|
+
sys.exit(1)
|
2089
2657
|
|
2090
|
-
|
2091
|
-
|
2092
|
-
|
2093
|
-
def state(ctx):
|
2094
|
-
pass
|
2658
|
+
desired_product_apps: dict[str, set[str]] = (
|
2659
|
+
StatusBoardExporterIntegration.get_product_apps(sb)
|
2660
|
+
)
|
2095
2661
|
|
2662
|
+
slodocs = []
|
2663
|
+
for slodoc in get_slo_documents():
|
2664
|
+
products = [ns.namespace.environment.product.name for ns in slodoc.namespaces]
|
2665
|
+
for slo in slodoc.slos:
|
2666
|
+
for product in products:
|
2667
|
+
if slodoc.app.parent_app:
|
2668
|
+
app = f"{slodoc.app.parent_app.name}-{slodoc.app.name}"
|
2669
|
+
else:
|
2670
|
+
app = slodoc.app.name
|
2671
|
+
|
2672
|
+
# Skip if the (product, app) is not being generated by the status-board inventory
|
2673
|
+
if (
|
2674
|
+
product not in desired_product_apps
|
2675
|
+
or app not in desired_product_apps[product]
|
2676
|
+
):
|
2677
|
+
continue
|
2096
2678
|
|
2097
|
-
|
2098
|
-
|
2679
|
+
item = {
|
2680
|
+
"slo_doc_name": slodoc.name,
|
2681
|
+
"product": product,
|
2682
|
+
"app": app,
|
2683
|
+
"slo": slo.name,
|
2684
|
+
"sli_type": slo.sli_type,
|
2685
|
+
"sli_specification": slo.sli_specification,
|
2686
|
+
"slo_details": slo.slo_details,
|
2687
|
+
"target": slo.slo_target,
|
2688
|
+
"target_unit": slo.slo_target_unit,
|
2689
|
+
"window": slo.slo_parameters.window,
|
2690
|
+
"statusBoardService": f"{product}/{slodoc.app.name}/{slo.name}",
|
2691
|
+
"statusBoardEnabled": "statusBoard" in slodoc.labels,
|
2692
|
+
}
|
2693
|
+
slodocs.append(item)
|
2694
|
+
|
2695
|
+
print_output(ctx.obj["options"], slodocs, columns)
|
2696
|
+
|
2697
|
+
|
2698
|
+
@get.command()
|
2699
|
+
@click.argument("file_path")
|
2099
2700
|
@click.pass_context
|
2100
|
-
def
|
2101
|
-
|
2102
|
-
|
2103
|
-
|
2104
|
-
|
2105
|
-
|
2106
|
-
|
2107
|
-
|
2108
|
-
|
2701
|
+
def alerts(ctx, file_path):
|
2702
|
+
BIG_NUMBER = 10
|
2703
|
+
|
2704
|
+
def sort_by_threshold(item: dict[str, str]) -> int:
|
2705
|
+
threshold = item["threshold"]
|
2706
|
+
if not threshold:
|
2707
|
+
return BIG_NUMBER * 60 * 24
|
2708
|
+
value = int(threshold[:-1])
|
2709
|
+
unit = threshold[-1]
|
2710
|
+
match unit:
|
2711
|
+
case "m":
|
2712
|
+
return value
|
2713
|
+
case "h":
|
2714
|
+
return value * 60
|
2715
|
+
case "d":
|
2716
|
+
return value * 60 * 24
|
2717
|
+
case _:
|
2718
|
+
return BIG_NUMBER * 60 * 24
|
2719
|
+
|
2720
|
+
def sort_by_severity(item: dict[str, str]) -> int:
|
2721
|
+
match item["severity"].lower():
|
2722
|
+
case "critical":
|
2723
|
+
return 0
|
2724
|
+
case "warning":
|
2725
|
+
return 1
|
2726
|
+
case "info":
|
2727
|
+
return 2
|
2728
|
+
case _:
|
2729
|
+
return BIG_NUMBER
|
2730
|
+
|
2731
|
+
with open(file_path, encoding="locale") as f:
|
2732
|
+
content = json.loads(f.read())
|
2733
|
+
|
2734
|
+
columns = [
|
2735
|
+
"name",
|
2736
|
+
"summary",
|
2737
|
+
"severity",
|
2738
|
+
"threshold",
|
2739
|
+
"description",
|
2740
|
+
]
|
2741
|
+
data = []
|
2742
|
+
prometheus_rules = content["items"]
|
2743
|
+
for prom_rule in prometheus_rules:
|
2744
|
+
groups = prom_rule["spec"]["groups"]
|
2745
|
+
for group in groups:
|
2746
|
+
rules = group["rules"]
|
2747
|
+
for rule in rules:
|
2748
|
+
name = rule.get("alert")
|
2749
|
+
summary = rule.get("annotations", {}).get("summary")
|
2750
|
+
message = rule.get("annotations", {}).get("message")
|
2751
|
+
severity = rule.get("labels", {}).get("severity")
|
2752
|
+
description = rule.get("annotations", {}).get("description")
|
2753
|
+
threshold = rule.get("for")
|
2754
|
+
if name:
|
2755
|
+
data.append({
|
2756
|
+
"name": name,
|
2757
|
+
"summary": "`" + (summary or message).replace("\n", " ") + "`"
|
2758
|
+
if summary or message
|
2759
|
+
else "",
|
2760
|
+
"severity": severity,
|
2761
|
+
"threshold": threshold,
|
2762
|
+
"description": "`" + description.replace("\n", " ") + "`"
|
2763
|
+
if description
|
2764
|
+
else "",
|
2765
|
+
})
|
2766
|
+
ctx.obj["options"]["sort"] = False
|
2767
|
+
data = sorted(data, key=sort_by_threshold)
|
2768
|
+
data = sorted(data, key=sort_by_severity)
|
2769
|
+
print_output(ctx.obj["options"], data, columns)
|
2770
|
+
|
2771
|
+
|
2772
|
+
@get.command()
|
2773
|
+
@click.pass_context
|
2774
|
+
def aws_cost_report(ctx):
|
2775
|
+
command = AwsCostReportCommand.create()
|
2776
|
+
print(command.execute())
|
2777
|
+
|
2778
|
+
|
2779
|
+
@get.command()
|
2780
|
+
@click.pass_context
|
2781
|
+
def openshift_cost_report(ctx):
|
2782
|
+
command = OpenShiftCostReportCommand.create()
|
2783
|
+
print(command.execute())
|
2784
|
+
|
2785
|
+
|
2786
|
+
@get.command()
|
2787
|
+
@click.pass_context
|
2788
|
+
def openshift_cost_optimization_report(ctx):
|
2789
|
+
command = OpenShiftCostOptimizationReportCommand.create()
|
2790
|
+
print(command.execute())
|
2791
|
+
|
2792
|
+
|
2793
|
+
@get.command()
|
2794
|
+
@click.pass_context
|
2795
|
+
def osd_component_versions(ctx):
|
2796
|
+
osd_environments = [
|
2797
|
+
e["name"] for e in queries.get_environments() if e["product"]["name"] == "OSDv4"
|
2798
|
+
]
|
2799
|
+
data = []
|
2800
|
+
saas_files = get_saas_files()
|
2801
|
+
for sf in saas_files:
|
2802
|
+
for rt in sf.resource_templates:
|
2803
|
+
for t in rt.targets:
|
2804
|
+
if t.namespace.environment.name not in osd_environments:
|
2805
|
+
continue
|
2806
|
+
item = {
|
2807
|
+
"environment": t.namespace.environment.name,
|
2808
|
+
"namespace": t.namespace.name,
|
2809
|
+
"cluster": t.namespace.cluster.name,
|
2810
|
+
"app": sf.app.name,
|
2811
|
+
"saas_file": sf.name,
|
2812
|
+
"resource_template": rt.name,
|
2813
|
+
"ref": f"[{t.ref}]({rt.url}/blob/{t.ref}{rt.path})",
|
2814
|
+
}
|
2815
|
+
data.append(item)
|
2816
|
+
|
2817
|
+
columns = [
|
2818
|
+
"environment",
|
2819
|
+
"namespace",
|
2820
|
+
"cluster",
|
2821
|
+
"app",
|
2822
|
+
"saas_file",
|
2823
|
+
"resource_template",
|
2824
|
+
"ref",
|
2825
|
+
]
|
2826
|
+
print_output(ctx.obj["options"], data, columns)
|
2827
|
+
|
2828
|
+
|
2829
|
+
@get.command()
|
2830
|
+
@click.pass_context
|
2831
|
+
def maintenances(ctx):
|
2832
|
+
now = datetime.now(UTC)
|
2833
|
+
maintenances = maintenances_gql.query(gql.get_api().query).maintenances or []
|
2834
|
+
data = [
|
2835
|
+
{
|
2836
|
+
**m.dict(),
|
2837
|
+
"services": ", ".join(a.name for a in m.affected_services),
|
2838
|
+
}
|
2839
|
+
for m in maintenances
|
2840
|
+
if datetime.fromisoformat(m.scheduled_start) > now
|
2841
|
+
]
|
2842
|
+
columns = [
|
2843
|
+
"name",
|
2844
|
+
"scheduled_start",
|
2845
|
+
"scheduled_end",
|
2846
|
+
"services",
|
2847
|
+
]
|
2848
|
+
print_output(ctx.obj["options"], data, columns)
|
2849
|
+
|
2850
|
+
|
2851
|
+
class MigrationStatusCount:
|
2852
|
+
def __init__(self, app: str) -> None:
|
2853
|
+
self.app = app
|
2854
|
+
self._source = 0
|
2855
|
+
self._target = 0
|
2856
|
+
|
2857
|
+
def inc(self, source_or_target: str) -> None:
|
2858
|
+
match source_or_target:
|
2859
|
+
case "source":
|
2860
|
+
self._source += 1
|
2861
|
+
case "target":
|
2862
|
+
self._target += 1
|
2863
|
+
case _:
|
2864
|
+
raise ValueError("hcp migration label must be source or target")
|
2865
|
+
|
2866
|
+
@property
|
2867
|
+
def classic(self) -> int:
|
2868
|
+
return self._source
|
2869
|
+
|
2870
|
+
@property
|
2871
|
+
def hcp(self) -> int:
|
2872
|
+
return self._target
|
2873
|
+
|
2874
|
+
@property
|
2875
|
+
def total(self) -> int:
|
2876
|
+
return self.classic + self.hcp
|
2877
|
+
|
2878
|
+
@property
|
2879
|
+
def progress(self) -> float:
|
2880
|
+
return round(self.hcp / self.total * 100, 0)
|
2881
|
+
|
2882
|
+
@property
|
2883
|
+
def item(self) -> dict[str, Any]:
|
2884
|
+
return {
|
2885
|
+
"app": self.app,
|
2886
|
+
"classic": self.classic or "0",
|
2887
|
+
"hcp": self.hcp or "0",
|
2888
|
+
"progress": self.progress or "0",
|
2889
|
+
}
|
2890
|
+
|
2891
|
+
|
2892
|
+
@get.command()
|
2893
|
+
@click.pass_context
|
2894
|
+
def hcp_migration_status(ctx):
|
2895
|
+
counts: dict[str, MigrationStatusCount] = {}
|
2896
|
+
total_count = MigrationStatusCount("total")
|
2897
|
+
saas_files = get_saas_files()
|
2898
|
+
for sf in saas_files:
|
2899
|
+
if sf.publish_job_logs:
|
2900
|
+
# ignore post deployment test saas files
|
2901
|
+
continue
|
2902
|
+
for rt in sf.resource_templates:
|
2903
|
+
if rt.provider == "directory" or "dashboard" in rt.name:
|
2904
|
+
# ignore grafana dashboards
|
2905
|
+
continue
|
2906
|
+
for t in rt.targets:
|
2907
|
+
if t.namespace.name.startswith("openshift-"):
|
2908
|
+
# ignore openshift namespaces
|
2909
|
+
continue
|
2910
|
+
if t.namespace.path.startswith("/openshift/"):
|
2911
|
+
# ignore per-cluster namespaces
|
2912
|
+
continue
|
2913
|
+
if t.delete:
|
2914
|
+
continue
|
2915
|
+
if hcp_migration := t.namespace.cluster.labels.get("hcp_migration"):
|
2916
|
+
app = sf.app.parent_app.name if sf.app.parent_app else sf.app.name
|
2917
|
+
counts.setdefault(app, MigrationStatusCount(app))
|
2918
|
+
counts[app].inc(hcp_migration)
|
2919
|
+
total_count.inc(hcp_migration)
|
2920
|
+
|
2921
|
+
data = [c.item for c in counts.values()]
|
2922
|
+
print(
|
2923
|
+
f"SUMMARY: {total_count.hcp} / {total_count.total} COMPLETED ({total_count.progress}%)"
|
2924
|
+
)
|
2925
|
+
columns = ["app", "classic", "hcp", "progress"]
|
2926
|
+
print_output(ctx.obj["options"], data, columns)
|
2927
|
+
|
2928
|
+
|
2929
|
+
@get.command()
|
2930
|
+
@click.pass_context
|
2931
|
+
def systems_and_tools(ctx):
|
2932
|
+
print(
|
2933
|
+
f"This report is obtained from app-interface Graphql endpoint available at: {config.get_config()['graphql']['server']}"
|
2934
|
+
)
|
2935
|
+
inventory = get_systems_and_tools_inventory()
|
2936
|
+
print_output(ctx.obj["options"], inventory.data, inventory.columns)
|
2937
|
+
|
2938
|
+
|
2939
|
+
@get.command(short_help="get integration logs")
|
2940
|
+
@click.argument("integration_name")
|
2941
|
+
@click.option(
|
2942
|
+
"--environment_name", default="production", help="environment to get logs from"
|
2943
|
+
)
|
2944
|
+
@click.pass_context
|
2945
|
+
def logs(ctx, integration_name: str, environment_name: str):
|
2946
|
+
integrations = [
|
2947
|
+
i
|
2948
|
+
for i in integrations_gql.query(query_func=gql.get_api().query).integrations
|
2949
|
+
or []
|
2950
|
+
if i.name == integration_name
|
2951
|
+
]
|
2952
|
+
if not integrations:
|
2953
|
+
print("integration not found")
|
2954
|
+
return
|
2955
|
+
integration = integrations[0]
|
2956
|
+
vault_settings = get_app_interface_vault_settings()
|
2957
|
+
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
2958
|
+
managed = integration.managed
|
2959
|
+
if not managed:
|
2960
|
+
print("integration is not managed")
|
2961
|
+
return
|
2962
|
+
namespaces = [
|
2963
|
+
m.namespace
|
2964
|
+
for m in managed
|
2965
|
+
if m.namespace.cluster.labels
|
2966
|
+
and m.namespace.cluster.labels.get("environment") == environment_name
|
2967
|
+
]
|
2968
|
+
if not namespaces:
|
2969
|
+
print(f"no managed {environment_name} namespace found")
|
2970
|
+
return
|
2971
|
+
namespace = namespaces[0]
|
2972
|
+
cluster = namespaces[0].cluster
|
2973
|
+
if not cluster.automation_token:
|
2974
|
+
print("cluster automation token not found")
|
2975
|
+
return
|
2976
|
+
token = secret_reader.read_secret(cluster.automation_token)
|
2977
|
+
|
2978
|
+
command = f"oc --server {cluster.server_url} --token {token} --namespace {namespace.name} logs -c int -l app=qontract-reconcile-{integration.name}"
|
2979
|
+
print(command)
|
2980
|
+
|
2981
|
+
|
2982
|
+
@get.command
|
2983
|
+
@click.pass_context
|
2984
|
+
def jenkins_jobs(ctx):
|
2985
|
+
jenkins_configs = queries.get_jenkins_configs()
|
2986
|
+
|
2987
|
+
# stats dicts
|
2988
|
+
apps = {}
|
2989
|
+
totals = {"rhel8": 0, "other": 0}
|
2990
|
+
|
2991
|
+
for jc in jenkins_configs:
|
2992
|
+
app_name = jc["app"]["name"]
|
2993
|
+
|
2994
|
+
if app_name not in apps:
|
2995
|
+
apps[app_name] = {"rhel8": 0, "other": 0}
|
2996
|
+
|
2997
|
+
config = json.loads(jc["config"]) if jc["config"] else []
|
2998
|
+
for c in config:
|
2999
|
+
if "project" not in c:
|
3000
|
+
continue
|
3001
|
+
|
3002
|
+
project = c["project"]
|
3003
|
+
root_node = project.get("node") or ""
|
3004
|
+
if "jobs" not in project:
|
3005
|
+
continue
|
3006
|
+
|
3007
|
+
for pj in project["jobs"]:
|
3008
|
+
for job in pj.values():
|
3009
|
+
node = job.get("node", root_node)
|
3010
|
+
if node in {"rhel8", "rhel8-app-interface"}:
|
3011
|
+
apps[app_name]["rhel8"] += 1
|
3012
|
+
totals["rhel8"] += 1
|
3013
|
+
else:
|
3014
|
+
apps[app_name]["other"] += 1
|
3015
|
+
totals["other"] += 1
|
3016
|
+
|
3017
|
+
results = [
|
3018
|
+
{"app": app} | stats
|
3019
|
+
for app, stats in sorted(apps.items(), key=lambda i: i[0].lower())
|
3020
|
+
if not (stats["other"] == 0 and stats["rhel8"] == 0)
|
3021
|
+
]
|
3022
|
+
results.append({"app": "TOTALS"} | totals)
|
3023
|
+
|
3024
|
+
if ctx.obj["options"]["output"] == "md":
|
3025
|
+
json_table = {
|
3026
|
+
"filter": True,
|
3027
|
+
"fields": [
|
3028
|
+
{"key": "app"},
|
3029
|
+
{"key": "other"},
|
3030
|
+
{"key": "rhel8"},
|
3031
|
+
],
|
3032
|
+
"items": results,
|
3033
|
+
}
|
3034
|
+
|
3035
|
+
print(
|
3036
|
+
f"""
|
3037
|
+
You can view the source of this Markdown to extract the JSON data.
|
3038
|
+
|
3039
|
+
{len(results)} apps with Jenkins jobs
|
3040
|
+
|
3041
|
+
```json:table
|
3042
|
+
{json.dumps(json_table)}
|
3043
|
+
```
|
3044
|
+
"""
|
3045
|
+
)
|
3046
|
+
else:
|
3047
|
+
columns = ["app", "other", "rhel8"]
|
3048
|
+
ctx.obj["options"]["sort"] = False
|
3049
|
+
print_output(ctx.obj["options"], results, columns)
|
3050
|
+
|
3051
|
+
|
3052
|
+
@get.command
|
3053
|
+
@click.pass_context
|
3054
|
+
def container_image_details(ctx):
|
3055
|
+
apps = get_apps_quay_repos_escalation_policies()
|
3056
|
+
data: list[dict[str, str]] = []
|
3057
|
+
for app in apps:
|
3058
|
+
app_name = f"{app.parent_app.name}/{app.name}" if app.parent_app else app.name
|
3059
|
+
ep_channels = app.escalation_policy.channels
|
3060
|
+
email = ep_channels.email
|
3061
|
+
slack = ep_channels.slack_user_group[0].handle
|
3062
|
+
for org_items in app.quay_repos or []:
|
3063
|
+
org_name = org_items.org.name
|
3064
|
+
for repo in org_items.items or []:
|
3065
|
+
if repo.mirror:
|
3066
|
+
continue
|
3067
|
+
repository = f"quay.io/{org_name}/{repo.name}"
|
3068
|
+
item = {
|
3069
|
+
"app": app_name,
|
3070
|
+
"repository": repository,
|
3071
|
+
"email": email,
|
3072
|
+
"slack": slack,
|
3073
|
+
}
|
3074
|
+
data.append(item)
|
3075
|
+
columns = ["app", "repository", "email", "slack"]
|
3076
|
+
print_output(ctx.obj["options"], data, columns)
|
3077
|
+
|
3078
|
+
|
3079
|
+
@get.command
|
3080
|
+
@click.pass_context
|
3081
|
+
def change_log_tracking(ctx):
|
3082
|
+
repo_url = get_app_interface_repo_url()
|
3083
|
+
change_types = fetch_change_type_processors(gql.get_api(), NoOpFileDiffResolver())
|
3084
|
+
state = init_state(integration=cl.QONTRACT_INTEGRATION)
|
3085
|
+
change_log = ChangeLog(**state.get(BUNDLE_DIFFS_OBJ))
|
3086
|
+
data: list[dict[str, str]] = []
|
3087
|
+
for item in change_log.items:
|
3088
|
+
change_log_item = ChangeLogItem(**item)
|
3089
|
+
commit = change_log_item.commit
|
3090
|
+
covered_change_types_descriptions = [
|
3091
|
+
ct.description
|
3092
|
+
for ct in change_types
|
3093
|
+
if ct.name in change_log_item.change_types
|
3094
|
+
]
|
3095
|
+
item = {
|
3096
|
+
"commit": f"[{commit[:7]}]({repo_url}/commit/{commit})",
|
3097
|
+
"merged_at": change_log_item.merged_at,
|
3098
|
+
"apps": ", ".join(change_log_item.apps),
|
3099
|
+
"changes": ", ".join(covered_change_types_descriptions),
|
3100
|
+
}
|
3101
|
+
data.append(item)
|
3102
|
+
|
3103
|
+
# TODO(mafriedm): Fix this
|
3104
|
+
ctx.obj["options"]["sort"] = False
|
3105
|
+
columns = ["commit", "merged_at", "apps", "changes"]
|
3106
|
+
print_output(ctx.obj["options"], data, columns)
|
3107
|
+
|
3108
|
+
|
3109
|
+
@root.group(name="set")
|
3110
|
+
@output
|
3111
|
+
@click.pass_context
|
3112
|
+
def set_command(ctx, output):
|
3113
|
+
ctx.obj["output"] = output
|
3114
|
+
|
3115
|
+
|
3116
|
+
@set_command.command()
|
3117
|
+
@click.argument("workspace")
|
3118
|
+
@click.argument("usergroup")
|
3119
|
+
@click.argument("username")
|
3120
|
+
@click.pass_context
|
3121
|
+
def slack_usergroup(ctx, workspace, usergroup, username):
|
3122
|
+
"""Update users in a slack usergroup.
|
3123
|
+
Use an org_username as the username.
|
3124
|
+
To empty a slack usergroup, pass '' (empty string) as the username.
|
3125
|
+
"""
|
3126
|
+
settings = queries.get_app_interface_settings()
|
3127
|
+
slack = slackapi_from_queries("qontract-cli")
|
3128
|
+
ugid = slack.get_usergroup_id(usergroup)
|
3129
|
+
if username:
|
3130
|
+
mail_address = settings["smtp"]["mailAddress"]
|
3131
|
+
users = [slack.get_user_id_by_name(username, mail_address)]
|
3132
|
+
else:
|
3133
|
+
users = [slack.get_random_deleted_user()]
|
3134
|
+
slack.update_usergroup_users(ugid, users)
|
3135
|
+
|
3136
|
+
|
3137
|
+
@set_command.command()
|
3138
|
+
@click.argument("org_name")
|
3139
|
+
@click.argument("cluster_name")
|
3140
|
+
@click.pass_context
|
3141
|
+
def cluster_admin(ctx, org_name, cluster_name):
|
3142
|
+
settings = queries.get_app_interface_settings()
|
3143
|
+
ocms = [
|
3144
|
+
o for o in queries.get_openshift_cluster_managers() if o["name"] == org_name
|
3145
|
+
]
|
3146
|
+
ocm_map = OCMMap(ocms=ocms, settings=settings)
|
3147
|
+
ocm = ocm_map[org_name]
|
3148
|
+
enabled = ocm.is_cluster_admin_enabled(cluster_name)
|
3149
|
+
if not enabled:
|
3150
|
+
ocm.enable_cluster_admin(cluster_name)
|
3151
|
+
|
3152
|
+
|
3153
|
+
@root.group()
|
3154
|
+
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
3155
|
+
@click.pass_context
|
3156
|
+
def state(ctx):
|
3157
|
+
pass
|
3158
|
+
|
3159
|
+
|
3160
|
+
@state.command()
|
3161
|
+
@click.argument("integration", default="")
|
3162
|
+
@click.pass_context
|
3163
|
+
def ls(ctx, integration):
|
3164
|
+
state = init_state(integration=integration)
|
3165
|
+
keys = state.ls()
|
3166
|
+
# if integration in not defined the 2th token will be the integration name
|
3167
|
+
key_index = 1 if integration else 2
|
3168
|
+
table_content = [
|
3169
|
+
{
|
3170
|
+
"integration": integration or k.split("/")[1],
|
3171
|
+
"key": "/".join(k.split("/")[key_index:]),
|
2109
3172
|
}
|
2110
3173
|
for k in keys
|
2111
3174
|
]
|
@@ -2114,11 +3177,11 @@ def ls(ctx, integration):
|
|
2114
3177
|
)
|
2115
3178
|
|
2116
3179
|
|
2117
|
-
@state.command()
|
3180
|
+
@state.command(name="get")
|
2118
3181
|
@click.argument("integration")
|
2119
3182
|
@click.argument("key")
|
2120
3183
|
@click.pass_context
|
2121
|
-
def
|
3184
|
+
def state_get(ctx, integration, key):
|
2122
3185
|
state = init_state(integration=integration)
|
2123
3186
|
value = state.get(key)
|
2124
3187
|
print(value)
|
@@ -2133,12 +3196,12 @@ def add(ctx, integration, key):
|
|
2133
3196
|
state.add(key)
|
2134
3197
|
|
2135
3198
|
|
2136
|
-
@state.command()
|
3199
|
+
@state.command(name="set")
|
2137
3200
|
@click.argument("integration")
|
2138
3201
|
@click.argument("key")
|
2139
3202
|
@click.argument("value")
|
2140
3203
|
@click.pass_context
|
2141
|
-
def
|
3204
|
+
def state_set(ctx, integration, key, value):
|
2142
3205
|
state = init_state(integration=integration)
|
2143
3206
|
state.add(key, value=value, force=True)
|
2144
3207
|
|
@@ -2152,6 +3215,259 @@ def rm(ctx, integration, key):
|
|
2152
3215
|
state.rm(key)
|
2153
3216
|
|
2154
3217
|
|
3218
|
+
@root.group()
|
3219
|
+
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
3220
|
+
@click.pass_context
|
3221
|
+
def early_exit_cache(ctx):
|
3222
|
+
pass
|
3223
|
+
|
3224
|
+
|
3225
|
+
@early_exit_cache.command(name="head")
|
3226
|
+
@click.option(
|
3227
|
+
"-i",
|
3228
|
+
"--integration",
|
3229
|
+
help="Integration name.",
|
3230
|
+
required=True,
|
3231
|
+
)
|
3232
|
+
@click.option(
|
3233
|
+
"-v",
|
3234
|
+
"--integration-version",
|
3235
|
+
help="Integration version.",
|
3236
|
+
required=True,
|
3237
|
+
)
|
3238
|
+
@click.option(
|
3239
|
+
"--dry-run/--no-dry-run",
|
3240
|
+
help="",
|
3241
|
+
default=False,
|
3242
|
+
)
|
3243
|
+
@click.option(
|
3244
|
+
"-c",
|
3245
|
+
"--cache-source",
|
3246
|
+
help="Cache source. It should be a JSON string.",
|
3247
|
+
required=True,
|
3248
|
+
)
|
3249
|
+
@click.option(
|
3250
|
+
"-s",
|
3251
|
+
"--shard",
|
3252
|
+
help="Shard",
|
3253
|
+
default="",
|
3254
|
+
)
|
3255
|
+
@click.pass_context
|
3256
|
+
def early_exit_cache_head(
|
3257
|
+
ctx,
|
3258
|
+
integration,
|
3259
|
+
integration_version,
|
3260
|
+
dry_run,
|
3261
|
+
cache_source,
|
3262
|
+
shard,
|
3263
|
+
):
|
3264
|
+
with EarlyExitCache.build() as cache:
|
3265
|
+
cache_key = CacheKey(
|
3266
|
+
integration=integration,
|
3267
|
+
integration_version=integration_version,
|
3268
|
+
dry_run=dry_run,
|
3269
|
+
cache_source=json.loads(cache_source),
|
3270
|
+
shard=shard,
|
3271
|
+
)
|
3272
|
+
print(f"cache_source_digest: {cache_key.cache_source_digest}")
|
3273
|
+
result = cache.head(cache_key)
|
3274
|
+
print(result)
|
3275
|
+
|
3276
|
+
|
3277
|
+
@early_exit_cache.command(name="get")
|
3278
|
+
@click.option(
|
3279
|
+
"-i",
|
3280
|
+
"--integration",
|
3281
|
+
help="Integration name.",
|
3282
|
+
required=True,
|
3283
|
+
)
|
3284
|
+
@click.option(
|
3285
|
+
"-v",
|
3286
|
+
"--integration-version",
|
3287
|
+
help="Integration version.",
|
3288
|
+
required=True,
|
3289
|
+
)
|
3290
|
+
@click.option(
|
3291
|
+
"--dry-run/--no-dry-run",
|
3292
|
+
help="",
|
3293
|
+
default=False,
|
3294
|
+
)
|
3295
|
+
@click.option(
|
3296
|
+
"-c",
|
3297
|
+
"--cache-source",
|
3298
|
+
help="Cache source. It should be a JSON string.",
|
3299
|
+
required=True,
|
3300
|
+
)
|
3301
|
+
@click.option(
|
3302
|
+
"-s",
|
3303
|
+
"--shard",
|
3304
|
+
help="Shard",
|
3305
|
+
default="",
|
3306
|
+
)
|
3307
|
+
@click.pass_context
|
3308
|
+
def early_exit_cache_get(
|
3309
|
+
ctx,
|
3310
|
+
integration,
|
3311
|
+
integration_version,
|
3312
|
+
dry_run,
|
3313
|
+
cache_source,
|
3314
|
+
shard,
|
3315
|
+
):
|
3316
|
+
with EarlyExitCache.build() as cache:
|
3317
|
+
cache_key = CacheKey(
|
3318
|
+
integration=integration,
|
3319
|
+
integration_version=integration_version,
|
3320
|
+
dry_run=dry_run,
|
3321
|
+
cache_source=json.loads(cache_source),
|
3322
|
+
shard=shard,
|
3323
|
+
)
|
3324
|
+
value = cache.get(cache_key)
|
3325
|
+
print(value)
|
3326
|
+
|
3327
|
+
|
3328
|
+
@early_exit_cache.command(name="set")
|
3329
|
+
@click.option(
|
3330
|
+
"-i",
|
3331
|
+
"--integration",
|
3332
|
+
help="Integration name.",
|
3333
|
+
required=True,
|
3334
|
+
)
|
3335
|
+
@click.option(
|
3336
|
+
"-v",
|
3337
|
+
"--integration-version",
|
3338
|
+
help="Integration version.",
|
3339
|
+
required=True,
|
3340
|
+
)
|
3341
|
+
@click.option(
|
3342
|
+
"--dry-run/--no-dry-run",
|
3343
|
+
help="",
|
3344
|
+
default=False,
|
3345
|
+
)
|
3346
|
+
@click.option(
|
3347
|
+
"-c",
|
3348
|
+
"--cache-source",
|
3349
|
+
help="Cache source. It should be a JSON string.",
|
3350
|
+
required=True,
|
3351
|
+
)
|
3352
|
+
@click.option(
|
3353
|
+
"-s",
|
3354
|
+
"--shard",
|
3355
|
+
help="Shard",
|
3356
|
+
default="",
|
3357
|
+
)
|
3358
|
+
@click.option(
|
3359
|
+
"-p",
|
3360
|
+
"--payload",
|
3361
|
+
help="Payload in Cache value. It should be a JSON string.",
|
3362
|
+
required=True,
|
3363
|
+
)
|
3364
|
+
@click.option(
|
3365
|
+
"-l",
|
3366
|
+
"--log-output",
|
3367
|
+
help="Log output.",
|
3368
|
+
default="",
|
3369
|
+
)
|
3370
|
+
@click.option(
|
3371
|
+
"-a",
|
3372
|
+
"--applied-count",
|
3373
|
+
help="Log output.",
|
3374
|
+
default=0,
|
3375
|
+
type=int,
|
3376
|
+
)
|
3377
|
+
@click.option(
|
3378
|
+
"-t",
|
3379
|
+
"--ttl",
|
3380
|
+
help="TTL, in seconds.",
|
3381
|
+
default=60,
|
3382
|
+
type=int,
|
3383
|
+
)
|
3384
|
+
@click.option(
|
3385
|
+
"-d",
|
3386
|
+
"--latest-cache-source-digest",
|
3387
|
+
help="Latest cache source digest.",
|
3388
|
+
default="",
|
3389
|
+
)
|
3390
|
+
@click.pass_context
|
3391
|
+
def early_exit_cache_set(
|
3392
|
+
ctx,
|
3393
|
+
integration,
|
3394
|
+
integration_version,
|
3395
|
+
dry_run,
|
3396
|
+
cache_source,
|
3397
|
+
shard,
|
3398
|
+
payload,
|
3399
|
+
log_output,
|
3400
|
+
applied_count,
|
3401
|
+
ttl,
|
3402
|
+
latest_cache_source_digest,
|
3403
|
+
):
|
3404
|
+
with EarlyExitCache.build() as cache:
|
3405
|
+
cache_key = CacheKey(
|
3406
|
+
integration=integration,
|
3407
|
+
integration_version=integration_version,
|
3408
|
+
dry_run=dry_run,
|
3409
|
+
cache_source=json.loads(cache_source),
|
3410
|
+
shard=shard,
|
3411
|
+
)
|
3412
|
+
cache_value = CacheValue(
|
3413
|
+
payload=json.loads(payload),
|
3414
|
+
log_output=log_output,
|
3415
|
+
applied_count=applied_count,
|
3416
|
+
)
|
3417
|
+
cache.set(cache_key, cache_value, ttl, latest_cache_source_digest)
|
3418
|
+
|
3419
|
+
|
3420
|
+
@early_exit_cache.command(name="delete")
|
3421
|
+
@click.option(
|
3422
|
+
"-i",
|
3423
|
+
"--integration",
|
3424
|
+
help="Integration name.",
|
3425
|
+
required=True,
|
3426
|
+
)
|
3427
|
+
@click.option(
|
3428
|
+
"-v",
|
3429
|
+
"--integration-version",
|
3430
|
+
help="Integration version.",
|
3431
|
+
required=True,
|
3432
|
+
)
|
3433
|
+
@click.option(
|
3434
|
+
"--dry-run/--no-dry-run",
|
3435
|
+
help="",
|
3436
|
+
default=False,
|
3437
|
+
)
|
3438
|
+
@click.option(
|
3439
|
+
"-d",
|
3440
|
+
"--cache-source-digest",
|
3441
|
+
help="Cache source digest.",
|
3442
|
+
required=True,
|
3443
|
+
)
|
3444
|
+
@click.option(
|
3445
|
+
"-s",
|
3446
|
+
"--shard",
|
3447
|
+
help="Shard",
|
3448
|
+
default="",
|
3449
|
+
)
|
3450
|
+
@click.pass_context
|
3451
|
+
def early_exit_cache_delete(
|
3452
|
+
ctx,
|
3453
|
+
integration,
|
3454
|
+
integration_version,
|
3455
|
+
dry_run,
|
3456
|
+
cache_source_digest,
|
3457
|
+
shard,
|
3458
|
+
):
|
3459
|
+
with EarlyExitCache.build() as cache:
|
3460
|
+
cache_key_with_digest = CacheKeyWithDigest(
|
3461
|
+
integration=integration,
|
3462
|
+
integration_version=integration_version,
|
3463
|
+
dry_run=dry_run,
|
3464
|
+
cache_source_digest=cache_source_digest,
|
3465
|
+
shard=shard,
|
3466
|
+
)
|
3467
|
+
cache.delete(cache_key_with_digest)
|
3468
|
+
print("deleted")
|
3469
|
+
|
3470
|
+
|
2155
3471
|
@root.command()
|
2156
3472
|
@click.argument("cluster")
|
2157
3473
|
@click.argument("namespace")
|
@@ -2205,6 +3521,13 @@ def template(ctx, cluster, namespace, kind, name, path, secret_reader):
|
|
2205
3521
|
|
2206
3522
|
|
2207
3523
|
@root.command()
|
3524
|
+
@binary(["promtool"])
|
3525
|
+
@binary_version(
|
3526
|
+
"promtool",
|
3527
|
+
["--version"],
|
3528
|
+
promtool.PROMTOOL_VERSION_REGEX,
|
3529
|
+
promtool.PROMTOOL_VERSION,
|
3530
|
+
)
|
2208
3531
|
@click.argument("path")
|
2209
3532
|
@click.argument("cluster")
|
2210
3533
|
@click.option(
|
@@ -2231,7 +3554,7 @@ def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
|
|
2231
3554
|
|
2232
3555
|
namespace_with_prom_rules, _ = orb.get_namespaces(
|
2233
3556
|
["prometheus-rule"],
|
2234
|
-
|
3557
|
+
cluster_names=[cluster] if cluster else [],
|
2235
3558
|
namespace_name=namespace,
|
2236
3559
|
)
|
2237
3560
|
|
@@ -2260,113 +3583,10 @@ def run_prometheus_test(ctx, path, cluster, namespace, secret_reader):
|
|
2260
3583
|
|
2261
3584
|
|
2262
3585
|
@root.command()
|
2263
|
-
@
|
2264
|
-
@
|
2265
|
-
|
2266
|
-
"-n",
|
2267
|
-
"--namespace",
|
2268
|
-
default="openshift-customer-monitoring",
|
2269
|
-
help="Cluster namespace where the rules are deployed. It defaults to "
|
2270
|
-
"openshift-customer-monitoring.",
|
3586
|
+
@binary(["amtool"])
|
3587
|
+
@binary_version(
|
3588
|
+
"amtool", ["--version"], amtool.AMTOOL_VERSION_REGEX, amtool.AMTOOL_VERSION
|
2271
3589
|
)
|
2272
|
-
@click.option(
|
2273
|
-
"-s",
|
2274
|
-
"--secret-reader",
|
2275
|
-
default="vault",
|
2276
|
-
help="Location to read secrets.",
|
2277
|
-
type=click.Choice(["config", "vault"]),
|
2278
|
-
)
|
2279
|
-
@click.pass_context
|
2280
|
-
def run_prometheus_test_old(ctx, path, cluster, namespace, secret_reader):
|
2281
|
-
"""Run prometheus tests in PATH loading associated rules from CLUSTER."""
|
2282
|
-
gqlapi = gql.get_api()
|
2283
|
-
|
2284
|
-
if path.startswith("resources"):
|
2285
|
-
path = path.replace("resources", "", 1)
|
2286
|
-
|
2287
|
-
try:
|
2288
|
-
resource = gqlapi.get_resource(path)
|
2289
|
-
except gql.GqlGetResourceError as e:
|
2290
|
-
print(f"Error in provided PATH: {e}.")
|
2291
|
-
sys.exit(1)
|
2292
|
-
|
2293
|
-
test = resource["content"]
|
2294
|
-
data = get_data_from_jinja_test_template(test, ["rule_files", "target_clusters"])
|
2295
|
-
target_clusters = data["target_clusters"]
|
2296
|
-
if len(target_clusters) > 0 and cluster not in target_clusters:
|
2297
|
-
print(
|
2298
|
-
f"Skipping test: {path}, cluster {cluster} not in target_clusters {target_clusters}"
|
2299
|
-
)
|
2300
|
-
rule_files = data["rule_files"]
|
2301
|
-
if not rule_files:
|
2302
|
-
print(f"Cannot parse test in {path}.")
|
2303
|
-
sys.exit(1)
|
2304
|
-
|
2305
|
-
if len(rule_files) > 1:
|
2306
|
-
print("Only 1 rule file per test")
|
2307
|
-
sys.exit(1)
|
2308
|
-
|
2309
|
-
rule_file_path = rule_files[0]
|
2310
|
-
|
2311
|
-
namespace_info = [
|
2312
|
-
n
|
2313
|
-
for n in gqlapi.query(orb.NAMESPACES_QUERY)["namespaces"]
|
2314
|
-
if n["cluster"]["name"] == cluster and n["name"] == namespace
|
2315
|
-
]
|
2316
|
-
if len(namespace_info) != 1:
|
2317
|
-
print(f"{cluster}/{namespace} does not exist.")
|
2318
|
-
sys.exit(1)
|
2319
|
-
|
2320
|
-
settings = queries.get_app_interface_settings()
|
2321
|
-
settings["vault"] = secret_reader == "vault"
|
2322
|
-
|
2323
|
-
ni = namespace_info[0]
|
2324
|
-
ob.aggregate_shared_resources(ni, "openshiftResources")
|
2325
|
-
openshift_resources = ni.get("openshiftResources")
|
2326
|
-
rule_spec = {}
|
2327
|
-
for r in openshift_resources:
|
2328
|
-
resource_path = r.get("resource", {}).get("path")
|
2329
|
-
if resource_path != rule_file_path:
|
2330
|
-
continue
|
2331
|
-
|
2332
|
-
if "add_path_to_prom_rules" not in r:
|
2333
|
-
r["add_path_to_prom_rules"] = False
|
2334
|
-
|
2335
|
-
openshift_resource = orb.fetch_openshift_resource(r, ni, settings)
|
2336
|
-
if openshift_resource.kind.lower() != "prometheusrule":
|
2337
|
-
print(f"Object in {rule_file_path} is not a PrometheusRule.")
|
2338
|
-
sys.exit(1)
|
2339
|
-
|
2340
|
-
rule_spec = openshift_resource.body["spec"]
|
2341
|
-
variables = json.loads(r.get("variables") or "{}")
|
2342
|
-
variables["resource"] = r
|
2343
|
-
break
|
2344
|
-
|
2345
|
-
if not rule_spec:
|
2346
|
-
print(
|
2347
|
-
f"Rules file referenced in {path} does not exist in namespace "
|
2348
|
-
f"{namespace} from cluster {cluster}."
|
2349
|
-
)
|
2350
|
-
sys.exit(1)
|
2351
|
-
|
2352
|
-
test_yaml_spec = yaml.safe_load(
|
2353
|
-
orb.process_extracurlyjinja2_template(
|
2354
|
-
body=test, vars=variables, settings=settings
|
2355
|
-
)
|
2356
|
-
)
|
2357
|
-
test_yaml_spec.pop("$schema")
|
2358
|
-
|
2359
|
-
result = promtool.run_test(
|
2360
|
-
test_yaml_spec=test_yaml_spec, rule_files={rule_file_path: rule_spec}
|
2361
|
-
)
|
2362
|
-
|
2363
|
-
print(result)
|
2364
|
-
|
2365
|
-
if not result:
|
2366
|
-
sys.exit(1)
|
2367
|
-
|
2368
|
-
|
2369
|
-
@root.command()
|
2370
3590
|
@click.argument("cluster")
|
2371
3591
|
@click.argument("namespace")
|
2372
3592
|
@click.argument("rules_path")
|
@@ -2422,7 +3642,6 @@ def alert_to_receiver(
|
|
2422
3642
|
secret_reader,
|
2423
3643
|
additional_label,
|
2424
3644
|
):
|
2425
|
-
|
2426
3645
|
additional_labels = {}
|
2427
3646
|
for al in additional_label:
|
2428
3647
|
try:
|
@@ -2500,28 +3719,26 @@ def alert_to_receiver(
|
|
2500
3719
|
for group in rule_spec["groups"]:
|
2501
3720
|
for rule in group["rules"]:
|
2502
3721
|
try:
|
3722
|
+
# alertname label is added automatically by Prometheus.
|
2503
3723
|
alert_labels.append(
|
2504
|
-
{
|
2505
|
-
"name": rule["alert"],
|
2506
|
-
"labels": rule["labels"] | additional_labels,
|
2507
|
-
}
|
3724
|
+
{"alertname": rule["alert"]} | rule["labels"] | additional_labels
|
2508
3725
|
)
|
2509
3726
|
except KeyError:
|
2510
3727
|
print("Skipping rule with no alert and/or labels", file=sys.stderr)
|
2511
3728
|
|
2512
3729
|
if alert_name:
|
2513
|
-
alert_labels = [al for al in alert_labels if al["
|
3730
|
+
alert_labels = [al for al in alert_labels if al["alertname"] == alert_name]
|
2514
3731
|
|
2515
3732
|
if not alert_labels:
|
2516
3733
|
print(f"Cannot find alert {alert_name} in rules {rules_path}")
|
2517
3734
|
sys.exit(1)
|
2518
3735
|
|
2519
3736
|
for al in alert_labels:
|
2520
|
-
result = amtool.config_routes_test(am_config, al
|
3737
|
+
result = amtool.config_routes_test(am_config, al)
|
2521
3738
|
if not result:
|
2522
3739
|
print(f"Error running amtool: {result}")
|
2523
3740
|
sys.exit(1)
|
2524
|
-
print("|".join([al["
|
3741
|
+
print("|".join([al["alertname"], str(result)]))
|
2525
3742
|
|
2526
3743
|
|
2527
3744
|
@root.command()
|
@@ -2530,7 +3747,7 @@ def alert_to_receiver(
|
|
2530
3747
|
@click.option("--env-name", default=None, help="environment to use for parameters.")
|
2531
3748
|
@click.pass_context
|
2532
3749
|
def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
|
2533
|
-
if env_name
|
3750
|
+
if not env_name:
|
2534
3751
|
print("env-name must be defined")
|
2535
3752
|
return
|
2536
3753
|
saas_files = get_saas_files(saas_file_name, env_name, app_name)
|
@@ -2544,21 +3761,17 @@ def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
|
|
2544
3761
|
if target.namespace.environment.name != env_name:
|
2545
3762
|
continue
|
2546
3763
|
|
2547
|
-
parameters
|
2548
|
-
|
2549
|
-
|
2550
|
-
|
2551
|
-
|
2552
|
-
|
2553
|
-
|
2554
|
-
|
2555
|
-
|
2556
|
-
|
2557
|
-
|
2558
|
-
if not isinstance(v, str):
|
2559
|
-
continue
|
2560
|
-
if replace_pattern in v:
|
2561
|
-
parameters[k] = v.replace(replace_pattern, replace_value)
|
3764
|
+
parameters = TargetSpec(
|
3765
|
+
saas_file=saas_file,
|
3766
|
+
resource_template=rt,
|
3767
|
+
target=target,
|
3768
|
+
# process_template options
|
3769
|
+
image_auth=None, # type: ignore[arg-type]
|
3770
|
+
hash_length=None, # type: ignore[arg-type]
|
3771
|
+
github=None, # type: ignore[arg-type]
|
3772
|
+
target_config_hash=None, # type: ignore[arg-type]
|
3773
|
+
secret_reader=None, # type: ignore[arg-type]
|
3774
|
+
).parameters()
|
2562
3775
|
|
2563
3776
|
parameters_cmd = ""
|
2564
3777
|
for k, v in parameters.items():
|
@@ -2581,7 +3794,7 @@ def saas_dev(ctx, app_name=None, saas_file_name=None, env_name=None) -> None:
|
|
2581
3794
|
@click.option("--app-name", default=None, help="app to act on.")
|
2582
3795
|
@click.pass_context
|
2583
3796
|
def saas_targets(
|
2584
|
-
ctx, saas_file_name:
|
3797
|
+
ctx, saas_file_name: str | None = None, app_name: str | None = None
|
2585
3798
|
) -> None:
|
2586
3799
|
"""Resolve namespaceSelectors and print all resulting targets of a saas file."""
|
2587
3800
|
console = Console()
|
@@ -2613,7 +3826,7 @@ def saas_targets(
|
|
2613
3826
|
if target.parameters:
|
2614
3827
|
param_table = Table("Key", "Value", box=box.MINIMAL)
|
2615
3828
|
for k, v in target.parameters.items():
|
2616
|
-
param_table.add_row(k, v)
|
3829
|
+
param_table.add_row(k, str(v))
|
2617
3830
|
info.add_row("Parameters", param_table)
|
2618
3831
|
|
2619
3832
|
if target.secret_parameters:
|
@@ -2770,6 +3983,51 @@ def gpg_encrypt(
|
|
2770
3983
|
).execute()
|
2771
3984
|
|
2772
3985
|
|
3986
|
+
@root.command()
|
3987
|
+
@click.option("--channel", help="the channel that state is part of")
|
3988
|
+
@click.option("--sha", help="the commit sha we want state for")
|
3989
|
+
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
3990
|
+
def get_promotion_state(channel: str, sha: str):
|
3991
|
+
from tools.saas_promotion_state.saas_promotion_state import (
|
3992
|
+
SaasPromotionState,
|
3993
|
+
)
|
3994
|
+
|
3995
|
+
bucket = os.environ.get("APP_INTERFACE_STATE_BUCKET")
|
3996
|
+
region = os.environ.get("APP_INTERFACE_STATE_BUCKET_REGION", "us-east-1")
|
3997
|
+
promotion_state = SaasPromotionState.create(promotion_state=None, saas_files=None)
|
3998
|
+
for publisher_id, state in promotion_state.get(channel=channel, sha=sha).items():
|
3999
|
+
print()
|
4000
|
+
if not state:
|
4001
|
+
print(f"No state found for {publisher_id=}")
|
4002
|
+
else:
|
4003
|
+
print(f"{publisher_id=}")
|
4004
|
+
print(
|
4005
|
+
f"State link: https://{region}.console.aws.amazon.com/s3/object/{bucket}?region={region}&bucketType=general&prefix=state/openshift-saas-deploy/promotions_v2/{channel}/{publisher_id}/{sha}"
|
4006
|
+
)
|
4007
|
+
print(f"Content: {state}")
|
4008
|
+
|
4009
|
+
|
4010
|
+
@root.command()
|
4011
|
+
@click.option("--channel", help="the channel that state is part of")
|
4012
|
+
@click.option("--sha", help="the commit sha we want state for")
|
4013
|
+
@click.option("--publisher-id", help="the publisher id we want state for")
|
4014
|
+
@environ(["APP_INTERFACE_STATE_BUCKET"])
|
4015
|
+
def mark_promotion_state_successful(channel: str, sha: str, publisher_id: str):
|
4016
|
+
from tools.saas_promotion_state.saas_promotion_state import (
|
4017
|
+
SaasPromotionState,
|
4018
|
+
)
|
4019
|
+
|
4020
|
+
promotion_state = SaasPromotionState.create(promotion_state=None, saas_files=None)
|
4021
|
+
print(f"Current states for {publisher_id=}")
|
4022
|
+
print(promotion_state.get(channel=channel, sha=sha).get(publisher_id, None))
|
4023
|
+
print()
|
4024
|
+
print("Pushing new state ...")
|
4025
|
+
promotion_state.set_successful(channel=channel, sha=sha, publisher_uid=publisher_id)
|
4026
|
+
print()
|
4027
|
+
print(f"New state for {publisher_id=}")
|
4028
|
+
print(promotion_state.get(channel=channel, sha=sha).get(publisher_id, None))
|
4029
|
+
|
4030
|
+
|
2773
4031
|
@root.command()
|
2774
4032
|
@click.option("--change-type-name")
|
2775
4033
|
@click.option("--role-name")
|
@@ -2785,5 +4043,375 @@ def test_change_type(change_type_name: str, role_name: str, app_interface_path:
|
|
2785
4043
|
tester.test_change_type_in_context(change_type_name, role_name, app_interface_path)
|
2786
4044
|
|
2787
4045
|
|
4046
|
+
@root.group()
|
4047
|
+
@click.pass_context
|
4048
|
+
def sso_client(ctx):
|
4049
|
+
"""SSO client commands"""
|
4050
|
+
|
4051
|
+
|
4052
|
+
@sso_client.command()
|
4053
|
+
@click.argument("client-name", required=True)
|
4054
|
+
@click.option(
|
4055
|
+
"--contact-email",
|
4056
|
+
default="sd-app-sre+auth@redhat.com",
|
4057
|
+
help="Specify the contact email address",
|
4058
|
+
required=True,
|
4059
|
+
show_default=True,
|
4060
|
+
)
|
4061
|
+
@click.option(
|
4062
|
+
"--keycloak-instance-vault-path",
|
4063
|
+
help="Path to the keycloak secret in vault",
|
4064
|
+
default="app-sre/creds/rhidp/auth.redhat.com",
|
4065
|
+
required=True,
|
4066
|
+
show_default=True,
|
4067
|
+
)
|
4068
|
+
@click.option(
|
4069
|
+
"--request-uri",
|
4070
|
+
help="Specify an allowed request URL; first one will be used as the initial one URL. Can be specified multiple times",
|
4071
|
+
multiple=True,
|
4072
|
+
required=True,
|
4073
|
+
prompt=True,
|
4074
|
+
)
|
4075
|
+
@click.option(
|
4076
|
+
"--redirect-uri",
|
4077
|
+
help="Specify an allowed redirect URL. Can be specified multiple times",
|
4078
|
+
multiple=True,
|
4079
|
+
required=True,
|
4080
|
+
prompt=True,
|
4081
|
+
)
|
4082
|
+
@click.pass_context
|
4083
|
+
def create(
|
4084
|
+
ctx,
|
4085
|
+
client_name: str,
|
4086
|
+
contact_email: str,
|
4087
|
+
keycloak_instance_vault_path: str,
|
4088
|
+
request_uri: tuple[str],
|
4089
|
+
redirect_uri: tuple[str],
|
4090
|
+
) -> None:
|
4091
|
+
"""Create a new SSO client"""
|
4092
|
+
vault_settings = get_app_interface_vault_settings()
|
4093
|
+
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
4094
|
+
|
4095
|
+
keycloak_secret = secret_reader.read_all({"path": keycloak_instance_vault_path})
|
4096
|
+
keycloak_api = KeycloakAPI(
|
4097
|
+
url=keycloak_secret["url"],
|
4098
|
+
initial_access_token=keycloak_secret["initial-access-token"],
|
4099
|
+
)
|
4100
|
+
sso_client = keycloak_api.register_client(
|
4101
|
+
client_name=client_name,
|
4102
|
+
redirect_uris=redirect_uri,
|
4103
|
+
initiate_login_uri=request_uri[0],
|
4104
|
+
request_uris=request_uri,
|
4105
|
+
contacts=[contact_email],
|
4106
|
+
)
|
4107
|
+
click.secho(
|
4108
|
+
"SSO client created successfully. Please save the following JSON in Vault!",
|
4109
|
+
bg="red",
|
4110
|
+
fg="white",
|
4111
|
+
)
|
4112
|
+
print(sso_client.json(by_alias=True, indent=2))
|
4113
|
+
|
4114
|
+
|
4115
|
+
@sso_client.command()
|
4116
|
+
@click.argument("sso-client-vault-secret-path", required=True)
|
4117
|
+
@click.pass_context
|
4118
|
+
def remove(ctx, sso_client_vault_secret_path: str):
|
4119
|
+
"""Remove an existing SSO client"""
|
4120
|
+
vault_settings = get_app_interface_vault_settings()
|
4121
|
+
secret_reader = create_secret_reader(use_vault=vault_settings.vault)
|
4122
|
+
|
4123
|
+
sso_client = SSOClient(
|
4124
|
+
**secret_reader.read_all({"path": sso_client_vault_secret_path})
|
4125
|
+
)
|
4126
|
+
keycloak_api = KeycloakAPI()
|
4127
|
+
keycloak_api.delete_client(
|
4128
|
+
registration_client_uri=sso_client.registration_client_uri,
|
4129
|
+
registration_access_token=sso_client.registration_access_token,
|
4130
|
+
)
|
4131
|
+
click.secho(
|
4132
|
+
"SSO client removed successfully. Please remove the secret from Vault!",
|
4133
|
+
bg="red",
|
4134
|
+
fg="white",
|
4135
|
+
)
|
4136
|
+
|
4137
|
+
|
4138
|
+
@root.group()
|
4139
|
+
@click.option(
|
4140
|
+
"--provision-provider",
|
4141
|
+
required=True,
|
4142
|
+
help="externalResources.provider",
|
4143
|
+
default="aws",
|
4144
|
+
)
|
4145
|
+
@click.option(
|
4146
|
+
"--provisioner",
|
4147
|
+
required=True,
|
4148
|
+
help="externalResources.provisioner.name. E.g. app-sre-stage",
|
4149
|
+
prompt=True,
|
4150
|
+
)
|
4151
|
+
@click.option(
|
4152
|
+
"--provider",
|
4153
|
+
required=True,
|
4154
|
+
help="externalResources.resources.provider. E.g. rds, msk, ...",
|
4155
|
+
prompt=True,
|
4156
|
+
)
|
4157
|
+
@click.option(
|
4158
|
+
"--identifier",
|
4159
|
+
required=True,
|
4160
|
+
help="externalResources.resources.identifier. E.g. erv2-example",
|
4161
|
+
prompt=True,
|
4162
|
+
)
|
4163
|
+
@click.pass_context
|
4164
|
+
def external_resources(
|
4165
|
+
ctx, provision_provider: str, provisioner: str, provider: str, identifier: str
|
4166
|
+
):
|
4167
|
+
"""External resources commands"""
|
4168
|
+
ctx.obj["provision_provider"] = provision_provider
|
4169
|
+
ctx.obj["provisioner"] = provisioner
|
4170
|
+
ctx.obj["provider"] = provider
|
4171
|
+
ctx.obj["identifier"] = identifier
|
4172
|
+
vault_settings = get_app_interface_vault_settings()
|
4173
|
+
ctx.obj["secret_reader"] = create_secret_reader(use_vault=vault_settings.vault)
|
4174
|
+
|
4175
|
+
|
4176
|
+
@external_resources.command()
|
4177
|
+
@click.pass_context
|
4178
|
+
def get_input(ctx):
|
4179
|
+
"""Gets the input data for an external resource asset. Input data is what is used
|
4180
|
+
in the Reconciliation Job to manage the resource."""
|
4181
|
+
erv2cli = Erv2Cli(
|
4182
|
+
provision_provider=ctx.obj["provision_provider"],
|
4183
|
+
provisioner=ctx.obj["provisioner"],
|
4184
|
+
provider=ctx.obj["provider"],
|
4185
|
+
identifier=ctx.obj["identifier"],
|
4186
|
+
secret_reader=ctx.obj["secret_reader"],
|
4187
|
+
)
|
4188
|
+
print(erv2cli.input_data)
|
4189
|
+
|
4190
|
+
|
4191
|
+
@external_resources.command()
|
4192
|
+
@click.pass_context
|
4193
|
+
def request_reconciliation(ctx):
|
4194
|
+
"""Marks a resource as it needs to get reconciled. The itegration will reconcile the resource at
|
4195
|
+
its next iteration."""
|
4196
|
+
erv2cli = Erv2Cli(
|
4197
|
+
provision_provider=ctx.obj["provision_provider"],
|
4198
|
+
provisioner=ctx.obj["provisioner"],
|
4199
|
+
provider=ctx.obj["provider"],
|
4200
|
+
identifier=ctx.obj["identifier"],
|
4201
|
+
secret_reader=ctx.obj["secret_reader"],
|
4202
|
+
)
|
4203
|
+
erv2cli.reconcile()
|
4204
|
+
|
4205
|
+
|
4206
|
+
@external_resources.command()
|
4207
|
+
@binary(["terraform", "docker"])
|
4208
|
+
@binary_version("terraform", ["version"], TERRAFORM_VERSION_REGEX, TERRAFORM_VERSION)
|
4209
|
+
@click.option(
|
4210
|
+
"--dry-run/--no-dry-run",
|
4211
|
+
help="Enable/Disable dry-run. Default: dry-run enabled!",
|
4212
|
+
default=True,
|
4213
|
+
)
|
4214
|
+
@click.option(
|
4215
|
+
"--skip-build/--no-skip-build",
|
4216
|
+
help="Skip/Do not skip the terraform and CDKTF builds. Default: build everything!",
|
4217
|
+
default=False,
|
4218
|
+
)
|
4219
|
+
@click.pass_context
|
4220
|
+
def migrate(ctx, dry_run: bool, skip_build: bool) -> None:
|
4221
|
+
"""Migrate an existing external resource managed by terraform-resources to ERv2.
|
4222
|
+
|
4223
|
+
|
4224
|
+
E.g: qontract-reconcile --config=<config> external-resources migrate aws app-sre-stage rds dashdotdb-stage
|
4225
|
+
"""
|
4226
|
+
if ctx.obj["provider"] == "rds":
|
4227
|
+
# The "random_password" is not an AWS resource. It's just in the outputs and can't be migrated :(
|
4228
|
+
raise NotImplementedError("RDS migration is not supported yet!")
|
4229
|
+
|
4230
|
+
if not Confirm.ask(
|
4231
|
+
dedent("""
|
4232
|
+
Please ensure [red]terraform-resources[/] is disabled before proceeding!
|
4233
|
+
|
4234
|
+
Do you want to proceed?"""),
|
4235
|
+
default=True,
|
4236
|
+
):
|
4237
|
+
sys.exit(0)
|
4238
|
+
|
4239
|
+
# use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
|
4240
|
+
tempdir = Path.home() / ".erv2-migration"
|
4241
|
+
rich_print(f"Using temporary directory: [b]{tempdir}[/]")
|
4242
|
+
tempdir.mkdir(exist_ok=True)
|
4243
|
+
temp_erv2 = Path(tempdir) / "erv2"
|
4244
|
+
temp_erv2.mkdir(exist_ok=True)
|
4245
|
+
temp_tfr = tempdir / "terraform-resources"
|
4246
|
+
temp_tfr.mkdir(exist_ok=True)
|
4247
|
+
|
4248
|
+
with progress_spinner() as progress:
|
4249
|
+
with task(progress, "Preparing AWS credentials for CDKTF and local terraform"):
|
4250
|
+
# prepare AWS credentials for CDKTF and local terraform
|
4251
|
+
credentials_file = tempdir / "credentials"
|
4252
|
+
credentials_file.write_text(
|
4253
|
+
ctx.obj["secret_reader"].read_with_parameters(
|
4254
|
+
path=f"app-sre/external-resources/{ctx.obj['provisioner']}",
|
4255
|
+
field="credentials",
|
4256
|
+
format=None,
|
4257
|
+
version=None,
|
4258
|
+
)
|
4259
|
+
)
|
4260
|
+
os.environ["AWS_SHARED_CREDENTIALS_FILE"] = str(credentials_file)
|
4261
|
+
|
4262
|
+
erv2cli = Erv2Cli(
|
4263
|
+
provision_provider=ctx.obj["provision_provider"],
|
4264
|
+
provisioner=ctx.obj["provisioner"],
|
4265
|
+
provider=ctx.obj["provider"],
|
4266
|
+
identifier=ctx.obj["identifier"],
|
4267
|
+
secret_reader=ctx.obj["secret_reader"],
|
4268
|
+
temp_dir=temp_erv2,
|
4269
|
+
progress_spinner=progress,
|
4270
|
+
)
|
4271
|
+
|
4272
|
+
with task(progress, "(erv2) Building the terraform configuration"):
|
4273
|
+
if not skip_build:
|
4274
|
+
# build the CDKTF output
|
4275
|
+
erv2cli.build_cdktf(credentials_file)
|
4276
|
+
erv2_tf_cli = TerraformCli(
|
4277
|
+
temp_erv2, dry_run=dry_run, progress_spinner=progress
|
4278
|
+
)
|
4279
|
+
if not skip_build:
|
4280
|
+
erv2_tf_cli.init()
|
4281
|
+
|
4282
|
+
with task(
|
4283
|
+
progress, "(terraform-resources) Building the terraform configuration"
|
4284
|
+
):
|
4285
|
+
# build the terraform-resources output
|
4286
|
+
conf_tf = temp_tfr / "conf.tf.json"
|
4287
|
+
if not skip_build:
|
4288
|
+
tfr.run(
|
4289
|
+
dry_run=True,
|
4290
|
+
print_to_file=str(conf_tf),
|
4291
|
+
account_name=[ctx.obj["provisioner"]],
|
4292
|
+
)
|
4293
|
+
# remove comments
|
4294
|
+
conf_tf.write_text(
|
4295
|
+
"\n".join(
|
4296
|
+
line
|
4297
|
+
for line in conf_tf.read_text().splitlines()
|
4298
|
+
if not line.startswith("#")
|
4299
|
+
)
|
4300
|
+
)
|
4301
|
+
tfr_tf_cli = TerraformCli(
|
4302
|
+
temp_tfr, dry_run=dry_run, progress_spinner=progress
|
4303
|
+
)
|
4304
|
+
if not skip_build:
|
4305
|
+
tfr_tf_cli.init()
|
4306
|
+
|
4307
|
+
with progress_spinner() as progress:
|
4308
|
+
# start a new spinner instance for clean output
|
4309
|
+
erv2_tf_cli.progress_spinner = progress
|
4310
|
+
with task(
|
4311
|
+
progress,
|
4312
|
+
"Migrating the resources from terraform-resources to ERv2",
|
4313
|
+
):
|
4314
|
+
if ctx.obj["provider"] == "elasticache":
|
4315
|
+
# Elasticache migration is a bit different
|
4316
|
+
erv2_tf_cli.migrate_elasticache_resources(source=tfr_tf_cli)
|
4317
|
+
else:
|
4318
|
+
erv2_tf_cli.migrate_resources(source=tfr_tf_cli)
|
4319
|
+
|
4320
|
+
rich_print(f"[b red]Please remove the temporary directory ({tempdir}) manually!")
|
4321
|
+
|
4322
|
+
|
4323
|
+
@external_resources.command()
|
4324
|
+
@binary(["docker"])
|
4325
|
+
@click.pass_context
|
4326
|
+
def debug_shell(ctx) -> None:
|
4327
|
+
"""Enter an ERv2 debug shell to manually migrate resources."""
|
4328
|
+
# use a temporary directory in $HOME. The MacOS colima default configuration allows docker mounts from $HOME.
|
4329
|
+
with tempfile.TemporaryDirectory(dir=Path.home(), prefix="erv2-debug.") as _tempdir:
|
4330
|
+
tempdir = Path(_tempdir)
|
4331
|
+
with progress_spinner() as progress:
|
4332
|
+
with task(progress, "Preparing environment ..."):
|
4333
|
+
credentials_file = tempdir / "credentials"
|
4334
|
+
credentials_file.write_text(
|
4335
|
+
ctx.obj["secret_reader"].read_with_parameters(
|
4336
|
+
path=f"app-sre/external-resources/{ctx.obj['provisioner']}",
|
4337
|
+
field="credentials",
|
4338
|
+
format=None,
|
4339
|
+
version=None,
|
4340
|
+
)
|
4341
|
+
)
|
4342
|
+
os.environ["AWS_SHARED_CREDENTIALS_FILE"] = str(credentials_file)
|
4343
|
+
|
4344
|
+
erv2cli = Erv2Cli(
|
4345
|
+
provision_provider=ctx.obj["provision_provider"],
|
4346
|
+
provisioner=ctx.obj["provisioner"],
|
4347
|
+
provider=ctx.obj["provider"],
|
4348
|
+
identifier=ctx.obj["identifier"],
|
4349
|
+
secret_reader=ctx.obj["secret_reader"],
|
4350
|
+
temp_dir=tempdir,
|
4351
|
+
progress_spinner=progress,
|
4352
|
+
)
|
4353
|
+
erv2cli.enter_shell(credentials_file)
|
4354
|
+
|
4355
|
+
|
4356
|
+
@get.command(help="Get all container images in app-interface defined namespaces")
|
4357
|
+
@cluster_name
|
4358
|
+
@namespace_name
|
4359
|
+
@thread_pool_size()
|
4360
|
+
@use_jump_host()
|
4361
|
+
@click.option("--exclude-pattern", help="Exclude images that match this pattern")
|
4362
|
+
@click.option("--include-pattern", help="Only include images that match this pattern")
|
4363
|
+
@click.pass_context
|
4364
|
+
def container_images(
|
4365
|
+
ctx,
|
4366
|
+
cluster_name,
|
4367
|
+
namespace_name,
|
4368
|
+
thread_pool_size,
|
4369
|
+
use_jump_host,
|
4370
|
+
exclude_pattern,
|
4371
|
+
include_pattern,
|
4372
|
+
):
|
4373
|
+
from tools.cli_commands.container_images_report import get_all_pods_images
|
4374
|
+
|
4375
|
+
results = get_all_pods_images(
|
4376
|
+
cluster_name=cluster_name,
|
4377
|
+
namespace_name=namespace_name,
|
4378
|
+
thread_pool_size=thread_pool_size,
|
4379
|
+
use_jump_host=use_jump_host,
|
4380
|
+
exclude_pattern=exclude_pattern,
|
4381
|
+
include_pattern=include_pattern,
|
4382
|
+
)
|
4383
|
+
|
4384
|
+
if ctx.obj["options"]["output"] == "md":
|
4385
|
+
json_table = {
|
4386
|
+
"filter": True,
|
4387
|
+
"fields": [
|
4388
|
+
{"key": "name", "sortable": True},
|
4389
|
+
{"key": "namespaces", "sortable": True},
|
4390
|
+
{"key": "count", "sortable": True},
|
4391
|
+
],
|
4392
|
+
"items": results,
|
4393
|
+
}
|
4394
|
+
|
4395
|
+
print(
|
4396
|
+
f"""
|
4397
|
+
You can view the source of this Markdown to extract the JSON data.
|
4398
|
+
|
4399
|
+
{len(results)} container images found.
|
4400
|
+
|
4401
|
+
```json:table
|
4402
|
+
{json.dumps(json_table)}
|
4403
|
+
```
|
4404
|
+
"""
|
4405
|
+
)
|
4406
|
+
else:
|
4407
|
+
columns = [
|
4408
|
+
"name",
|
4409
|
+
"namespaces",
|
4410
|
+
"count",
|
4411
|
+
]
|
4412
|
+
ctx.obj["options"]["sort"] = False
|
4413
|
+
print_output(ctx.obj["options"], results, columns)
|
4414
|
+
|
4415
|
+
|
2788
4416
|
if __name__ == "__main__":
|
2789
4417
|
root() # pylint: disable=no-value-for-parameter
|