prowler-cloud 5.13.1__py3-none-any.whl → 5.14.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dashboard/__main__.py +2 -1
- dashboard/compliance/c5_azure.py +43 -0
- dashboard/compliance/fedramp_20x_ksi_low_aws.py +46 -0
- dashboard/compliance/fedramp_20x_ksi_low_azure.py +46 -0
- dashboard/compliance/fedramp_20x_ksi_low_gcp.py +46 -0
- dashboard/compliance/hipaa_gcp.py +25 -0
- dashboard/compliance/nist_csf_2_0_aws.py +24 -0
- dashboard/compliance/prowler_threatscore_kubernetes.py +28 -0
- prowler/AGENTS.md +366 -0
- prowler/CHANGELOG.md +85 -2
- prowler/__main__.py +54 -7
- prowler/compliance/aws/ens_rd2022_aws.json +1 -1
- prowler/compliance/aws/fedramp_20x_ksi_low_aws.json +347 -0
- prowler/compliance/aws/nis2_aws.json +1 -1
- prowler/compliance/aws/nist_csf_2.0_aws.json +1781 -0
- prowler/compliance/azure/c5_azure.json +9471 -0
- prowler/compliance/azure/ens_rd2022_azure.json +1 -1
- prowler/compliance/azure/fedramp_20x_ksi_low_azure.json +358 -0
- prowler/compliance/azure/nis2_azure.json +1 -1
- prowler/compliance/gcp/c5_gcp.json +9401 -0
- prowler/compliance/gcp/ens_rd2022_gcp.json +1 -1
- prowler/compliance/gcp/fedramp_20x_ksi_low_gcp.json +293 -0
- prowler/compliance/gcp/hipaa_gcp.json +415 -0
- prowler/compliance/gcp/nis2_gcp.json +1 -1
- prowler/compliance/github/cis_1.0_github.json +6 -2
- prowler/compliance/kubernetes/prowler_threatscore_kubernetes.json +1269 -0
- prowler/compliance/m365/prowler_threatscore_m365.json +6 -6
- prowler/compliance/{oci/cis_3.0_oci.json → oraclecloud/cis_3.0_oraclecloud.json} +1 -1
- prowler/config/config.py +59 -5
- prowler/config/config.yaml +3 -0
- prowler/lib/check/check.py +1 -9
- prowler/lib/check/checks_loader.py +65 -1
- prowler/lib/check/models.py +12 -2
- prowler/lib/check/utils.py +1 -7
- prowler/lib/cli/parser.py +17 -7
- prowler/lib/mutelist/mutelist.py +15 -7
- prowler/lib/outputs/compliance/c5/c5_azure.py +92 -0
- prowler/lib/outputs/compliance/c5/c5_gcp.py +92 -0
- prowler/lib/outputs/compliance/c5/models.py +54 -0
- prowler/lib/outputs/compliance/cis/{cis_oci.py → cis_oraclecloud.py} +7 -7
- prowler/lib/outputs/compliance/cis/models.py +3 -3
- prowler/lib/outputs/compliance/prowler_threatscore/models.py +29 -0
- prowler/lib/outputs/compliance/prowler_threatscore/prowler_threatscore_kubernetes.py +98 -0
- prowler/lib/outputs/finding.py +16 -5
- prowler/lib/outputs/html/html.py +10 -8
- prowler/lib/outputs/outputs.py +1 -1
- prowler/lib/outputs/summary_table.py +1 -1
- prowler/lib/powershell/powershell.py +12 -11
- prowler/lib/scan/scan.py +105 -24
- prowler/lib/utils/utils.py +1 -1
- prowler/providers/aws/aws_regions_by_service.json +73 -15
- prowler/providers/aws/lib/quick_inventory/quick_inventory.py +1 -1
- prowler/providers/aws/lib/security_hub/security_hub.py +1 -1
- prowler/providers/aws/services/account/account_service.py +1 -1
- prowler/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.metadata.json +1 -3
- prowler/providers/aws/services/cloudwatch/cloudwatch_alarm_actions_alarm_state_configured/cloudwatch_alarm_actions_alarm_state_configured.metadata.json +23 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_alarm_actions_enabled/cloudwatch_alarm_actions_enabled.metadata.json +21 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.metadata.json +23 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.metadata.json +24 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.metadata.json +21 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.metadata.json +17 -11
- prowler/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.metadata.json +20 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.metadata.json +22 -13
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_no_secrets_in_logs/cloudwatch_log_group_no_secrets_in_logs.metadata.json +22 -17
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_not_publicly_accessible/cloudwatch_log_group_not_publicly_accessible.metadata.json +18 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.metadata.json +27 -13
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.metadata.json +20 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.metadata.json +22 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.metadata.json +25 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.metadata.json +23 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.metadata.json +17 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.metadata.json +21 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.metadata.json +21 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.metadata.json +27 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.metadata.json +22 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.metadata.json +26 -12
- prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.metadata.json +25 -12
- prowler/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.metadata.json +20 -11
- prowler/providers/aws/services/codebuild/codebuild_project_logging_enabled/codebuild_project_logging_enabled.metadata.json +22 -12
- prowler/providers/aws/services/codebuild/codebuild_project_no_secrets_in_variables/codebuild_project_no_secrets_in_variables.metadata.json +28 -12
- prowler/providers/aws/services/codebuild/codebuild_project_not_publicly_accessible/codebuild_project_not_publicly_accessible.metadata.json +22 -12
- prowler/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.metadata.json +15 -10
- prowler/providers/aws/services/codebuild/codebuild_project_s3_logs_encrypted/codebuild_project_s3_logs_encrypted.metadata.json +19 -11
- prowler/providers/aws/services/codebuild/codebuild_project_source_repo_url_no_sensitive_credentials/codebuild_project_source_repo_url_no_sensitive_credentials.metadata.json +21 -12
- prowler/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.metadata.json +19 -12
- prowler/providers/aws/services/codebuild/codebuild_project_uses_allowed_github_organizations/codebuild_project_uses_allowed_github_organizations.metadata.json +24 -13
- prowler/providers/aws/services/codebuild/codebuild_report_group_export_encrypted/codebuild_report_group_export_encrypted.metadata.json +35 -13
- prowler/providers/aws/services/codepipeline/__init__.py +0 -0
- prowler/providers/aws/services/codepipeline/codepipeline_client.py +6 -0
- prowler/providers/aws/services/codepipeline/codepipeline_project_repo_private/__init__.py +0 -0
- prowler/providers/aws/services/codepipeline/codepipeline_project_repo_private/codepipeline_project_repo_private.metadata.json +30 -0
- prowler/providers/aws/services/codepipeline/codepipeline_project_repo_private/codepipeline_project_repo_private.py +95 -0
- prowler/providers/aws/services/codepipeline/codepipeline_service.py +164 -0
- prowler/providers/aws/services/directconnect/directconnect_connection_redundancy/directconnect_connection_redundancy.metadata.json +18 -12
- prowler/providers/aws/services/directconnect/directconnect_virtual_interface_redundancy/directconnect_virtual_interface_redundancy.metadata.json +18 -12
- prowler/providers/aws/services/documentdb/documentdb_cluster_backup_enabled/documentdb_cluster_backup_enabled.metadata.json +24 -13
- prowler/providers/aws/services/documentdb/documentdb_cluster_cloudwatch_log_export/documentdb_cluster_cloudwatch_log_export.metadata.json +23 -13
- prowler/providers/aws/services/documentdb/documentdb_cluster_deletion_protection/documentdb_cluster_deletion_protection.metadata.json +24 -13
- prowler/providers/aws/services/documentdb/documentdb_cluster_multi_az_enabled/documentdb_cluster_multi_az_enabled.metadata.json +19 -13
- prowler/providers/aws/services/documentdb/documentdb_cluster_public_snapshot/documentdb_cluster_public_snapshot.metadata.json +20 -10
- prowler/providers/aws/services/documentdb/documentdb_cluster_storage_encrypted/documentdb_cluster_storage_encrypted.metadata.json +26 -13
- prowler/providers/aws/services/drs/drs_job_exist/drs_job_exist.metadata.json +20 -10
- prowler/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.metadata.json +18 -11
- prowler/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_in_transit_encryption_enabled/dynamodb_accelerator_cluster_in_transit_encryption_enabled.metadata.json +16 -11
- prowler/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_multi_az/dynamodb_accelerator_cluster_multi_az.metadata.json +21 -13
- prowler/providers/aws/services/dynamodb/dynamodb_table_autoscaling_enabled/dynamodb_table_autoscaling_enabled.metadata.json +20 -12
- prowler/providers/aws/services/dynamodb/dynamodb_table_cross_account_access/dynamodb_table_cross_account_access.metadata.json +17 -10
- prowler/providers/aws/services/dynamodb/dynamodb_table_deletion_protection_enabled/dynamodb_table_deletion_protection_enabled.metadata.json +21 -13
- prowler/providers/aws/services/dynamodb/dynamodb_table_protected_by_backup_plan/dynamodb_table_protected_by_backup_plan.metadata.json +18 -12
- prowler/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.metadata.json +18 -12
- prowler/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.metadata.json +19 -12
- prowler/providers/aws/services/ecr/ecr_registry_scan_images_on_push_enabled/ecr_registry_scan_images_on_push_enabled.metadata.json +16 -11
- prowler/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.metadata.json +22 -13
- prowler/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.metadata.json +19 -13
- prowler/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.metadata.json +21 -13
- prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.metadata.json +22 -12
- prowler/providers/aws/services/ecr/ecr_repositories_tag_immutability/ecr_repositories_tag_immutability.metadata.json +20 -12
- prowler/providers/aws/services/ecs/ecs_cluster_container_insights_enabled/ecs_cluster_container_insights_enabled.metadata.json +21 -11
- prowler/providers/aws/services/ecs/ecs_service_fargate_latest_platform_version/ecs_service_fargate_latest_platform_version.metadata.json +20 -11
- prowler/providers/aws/services/ecs/ecs_service_no_assign_public_ip/ecs_service_no_assign_public_ip.metadata.json +18 -12
- prowler/providers/aws/services/ecs/ecs_task_definitions_containers_readonly_access/ecs_task_definitions_containers_readonly_access.metadata.json +20 -13
- prowler/providers/aws/services/ecs/ecs_task_definitions_host_namespace_not_shared/ecs_task_definitions_host_namespace_not_shared.metadata.json +21 -13
- prowler/providers/aws/services/ecs/ecs_task_definitions_host_networking_mode_users/ecs_task_definitions_host_networking_mode_users.metadata.json +26 -13
- prowler/providers/aws/services/ecs/ecs_task_definitions_logging_block_mode/ecs_task_definitions_logging_block_mode.metadata.json +19 -12
- prowler/providers/aws/services/ecs/ecs_task_definitions_logging_enabled/ecs_task_definitions_logging_enabled.metadata.json +18 -12
- prowler/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.metadata.json +16 -12
- prowler/providers/aws/services/ecs/ecs_task_definitions_no_privileged_containers/ecs_task_definitions_no_privileged_containers.metadata.json +21 -14
- prowler/providers/aws/services/ecs/ecs_task_set_no_assign_public_ip/ecs_task_set_no_assign_public_ip.metadata.json +19 -13
- prowler/providers/aws/services/eks/eks_cluster_deletion_protection_enabled/eks_cluster_deletion_protection_enabled.metadata.json +20 -13
- prowler/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json +20 -13
- prowler/providers/aws/services/eks/eks_cluster_network_policy_enabled/eks_cluster_network_policy_enabled.metadata.json +20 -14
- prowler/providers/aws/services/eks/eks_cluster_not_publicly_accessible/eks_cluster_not_publicly_accessible.metadata.json +22 -13
- prowler/providers/aws/services/eks/eks_cluster_private_nodes_enabled/eks_cluster_private_nodes_enabled.metadata.json +19 -13
- prowler/providers/aws/services/eks/eks_cluster_uses_a_supported_version/eks_cluster_uses_a_supported_version.metadata.json +21 -12
- prowler/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json +20 -13
- prowler/providers/aws/services/elasticache/elasticache_cluster_uses_public_subnet/elasticache_cluster_uses_public_subnet.metadata.json +20 -12
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_auto_minor_version_upgrades/elasticache_redis_cluster_auto_minor_version_upgrades.metadata.json +21 -12
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_automatic_failover_enabled/elasticache_redis_cluster_automatic_failover_enabled.metadata.json +20 -13
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_backup_enabled/elasticache_redis_cluster_backup_enabled.metadata.json +23 -13
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_in_transit_encryption_enabled/elasticache_redis_cluster_in_transit_encryption_enabled.metadata.json +21 -12
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_multi_az_enabled/elasticache_redis_cluster_multi_az_enabled.metadata.json +22 -14
- prowler/providers/aws/services/elasticache/elasticache_redis_cluster_rest_encryption_enabled/elasticache_redis_cluster_rest_encryption_enabled.metadata.json +20 -11
- prowler/providers/aws/services/elasticache/elasticache_redis_replication_group_auth_enabled/elasticache_redis_replication_group_auth_enabled.metadata.json +23 -13
- prowler/providers/aws/services/elasticbeanstalk/elasticbeanstalk_environment_cloudwatch_logging_enabled/elasticbeanstalk_environment_cloudwatch_logging_enabled.metadata.json +18 -12
- prowler/providers/aws/services/elasticbeanstalk/elasticbeanstalk_environment_enhanced_health_reporting/elasticbeanstalk_environment_enhanced_health_reporting.metadata.json +17 -12
- prowler/providers/aws/services/elasticbeanstalk/elasticbeanstalk_environment_managed_updates_enabled/elasticbeanstalk_environment_managed_updates_enabled.metadata.json +17 -11
- prowler/providers/aws/services/elb/elb_connection_draining_enabled/elb_connection_draining_enabled.metadata.json +22 -13
- prowler/providers/aws/services/elb/elb_cross_zone_load_balancing_enabled/elb_cross_zone_load_balancing_enabled.metadata.json +24 -13
- prowler/providers/aws/services/elb/elb_desync_mitigation_mode/elb_desync_mitigation_mode.metadata.json +20 -11
- prowler/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.metadata.json +20 -10
- prowler/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.metadata.json +20 -11
- prowler/providers/aws/services/elb/elb_is_in_multiple_az/elb_is_in_multiple_az.metadata.json +20 -12
- prowler/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.metadata.json +19 -12
- prowler/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.metadata.json +19 -11
- prowler/providers/aws/services/elb/elb_ssl_listeners_use_acm_certificate/elb_ssl_listeners_use_acm_certificate.metadata.json +17 -12
- prowler/providers/aws/services/elbv2/elbv2_cross_zone_load_balancing_enabled/elbv2_cross_zone_load_balancing_enabled.metadata.json +21 -13
- prowler/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.metadata.json +19 -11
- prowler/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.metadata.json +21 -12
- prowler/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.metadata.json +18 -11
- prowler/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.metadata.json +17 -10
- prowler/providers/aws/services/elbv2/elbv2_is_in_multiple_az/elbv2_is_in_multiple_az.metadata.json +22 -13
- prowler/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.metadata.json +18 -12
- prowler/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.metadata.json +17 -12
- prowler/providers/aws/services/elbv2/elbv2_nlb_tls_termination_enabled/elbv2_nlb_tls_termination_enabled.metadata.json +18 -11
- prowler/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.metadata.json +18 -12
- prowler/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.metadata.json +16 -11
- prowler/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.metadata.json +21 -13
- prowler/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.metadata.json +24 -11
- prowler/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.metadata.json +18 -11
- prowler/providers/aws/services/eventbridge/eventbridge_bus_cross_account_access/eventbridge_bus_cross_account_access.metadata.json +26 -13
- prowler/providers/aws/services/eventbridge/eventbridge_bus_exposed/eventbridge_bus_exposed.metadata.json +21 -11
- prowler/providers/aws/services/eventbridge/eventbridge_global_endpoint_event_replication_enabled/eventbridge_global_endpoint_event_replication_enabled.metadata.json +24 -13
- prowler/providers/aws/services/eventbridge/eventbridge_schema_registry_cross_account_access/eventbridge_schema_registry_cross_account_access.metadata.json +26 -14
- prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.metadata.json +26 -15
- prowler/providers/aws/services/firehose/firehose_stream_encrypted_at_rest/firehose_stream_encrypted_at_rest.py +15 -16
- prowler/providers/aws/services/fms/fms_policy_compliant/fms_policy_compliant.metadata.json +23 -11
- prowler/providers/aws/services/fsx/fsx_file_system_copy_tags_to_backups_enabled/fsx_file_system_copy_tags_to_backups_enabled.metadata.json +19 -12
- prowler/providers/aws/services/fsx/fsx_file_system_copy_tags_to_volumes_enabled/fsx_file_system_copy_tags_to_volumes_enabled.metadata.json +17 -12
- prowler/providers/aws/services/fsx/fsx_windows_file_system_multi_az_enabled/fsx_windows_file_system_multi_az_enabled.metadata.json +22 -13
- prowler/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.metadata.json +21 -12
- prowler/providers/aws/services/iam/lib/policy.py +24 -16
- prowler/providers/aws/services/kinesis/kinesis_stream_data_retention_period/kinesis_stream_data_retention_period.metadata.json +21 -13
- prowler/providers/aws/services/kinesis/kinesis_stream_encrypted_at_rest/kinesis_stream_encrypted_at_rest.metadata.json +22 -13
- prowler/providers/azure/services/cosmosdb/cosmosdb_service.py +7 -2
- prowler/providers/azure/services/defender/defender_service.py +4 -2
- prowler/providers/azure/services/postgresql/postgresql_flexible_server_entra_id_authentication_enabled/__init__.py +0 -0
- prowler/providers/azure/services/postgresql/postgresql_flexible_server_entra_id_authentication_enabled/postgresql_flexible_server_entra_id_authentication_enabled.metadata.json +36 -0
- prowler/providers/azure/services/postgresql/postgresql_flexible_server_entra_id_authentication_enabled/postgresql_flexible_server_entra_id_authentication_enabled.py +43 -0
- prowler/providers/azure/services/postgresql/postgresql_service.py +66 -9
- prowler/providers/azure/services/storage/storage_service.py +13 -4
- prowler/providers/azure/services/vm/vm_service.py +4 -7
- prowler/providers/common/arguments.py +19 -16
- prowler/providers/common/provider.py +2 -18
- prowler/providers/gcp/services/artifacts/artifacts_container_analysis_enabled/artifacts_container_analysis_enabled.metadata.json +16 -15
- prowler/providers/gcp/services/cloudresourcemanager/cloudresourcemanager_service.py +30 -4
- prowler/providers/gcp/services/cloudstorage/cloudstorage_audit_logs_enabled/__init__.py +0 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_audit_logs_enabled/cloudstorage_audit_logs_enabled.metadata.json +36 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_audit_logs_enabled/cloudstorage_audit_logs_enabled.py +61 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.metadata.json +12 -9
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.py +10 -3
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_logging_enabled/__init__.py +0 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_logging_enabled/cloudstorage_bucket_logging_enabled.metadata.json +36 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_logging_enabled/cloudstorage_bucket_logging_enabled.py +40 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_soft_delete_enabled/__init__.py +0 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_soft_delete_enabled/cloudstorage_bucket_soft_delete_enabled.metadata.json +36 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_soft_delete_enabled/cloudstorage_bucket_soft_delete_enabled.py +31 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_sufficient_retention_period/__init__.py +0 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_sufficient_retention_period/cloudstorage_bucket_sufficient_retention_period.metadata.json +35 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_sufficient_retention_period/cloudstorage_bucket_sufficient_retention_period.py +55 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_versioning_enabled/__init__.py +0 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_versioning_enabled/cloudstorage_bucket_versioning_enabled.metadata.json +36 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_versioning_enabled/cloudstorage_bucket_versioning_enabled.py +30 -0
- prowler/providers/gcp/services/cloudstorage/cloudstorage_service.py +48 -2
- prowler/providers/github/services/organization/organization_default_repository_permission_strict/__init__.py +0 -0
- prowler/providers/github/services/organization/organization_default_repository_permission_strict/organization_default_repository_permission_strict.metadata.json +35 -0
- prowler/providers/github/services/organization/organization_default_repository_permission_strict/organization_default_repository_permission_strict.py +36 -0
- prowler/providers/github/services/organization/organization_members_mfa_required/organization_members_mfa_required.metadata.json +14 -8
- prowler/providers/github/services/organization/organization_repository_creation_limited/__init__.py +0 -0
- prowler/providers/github/services/organization/organization_repository_creation_limited/organization_repository_creation_limited.metadata.json +30 -0
- prowler/providers/github/services/organization/organization_repository_creation_limited/organization_repository_creation_limited.py +106 -0
- prowler/providers/github/services/organization/organization_service.py +84 -10
- prowler/providers/iac/iac_provider.py +279 -55
- prowler/providers/kubernetes/services/etcd/etcd_client_cert_auth/etcd_client_cert_auth.metadata.json +18 -13
- prowler/providers/kubernetes/services/etcd/etcd_no_auto_tls/etcd_no_auto_tls.metadata.json +16 -11
- prowler/providers/kubernetes/services/etcd/etcd_no_peer_auto_tls/etcd_no_peer_auto_tls.metadata.json +16 -11
- prowler/providers/kubernetes/services/etcd/etcd_peer_client_cert_auth/etcd_peer_client_cert_auth.metadata.json +18 -13
- prowler/providers/kubernetes/services/etcd/etcd_peer_tls_config/etcd_peer_tls_config.metadata.json +16 -12
- prowler/providers/kubernetes/services/etcd/etcd_tls_encryption/etcd_tls_encryption.metadata.json +16 -11
- prowler/providers/kubernetes/services/etcd/etcd_unique_ca/etcd_unique_ca.metadata.json +16 -10
- prowler/providers/m365/lib/powershell/m365_powershell.py +80 -93
- prowler/providers/m365/m365_provider.py +1 -6
- prowler/providers/mongodbatlas/exceptions/exceptions.py +16 -0
- prowler/providers/mongodbatlas/mongodbatlas_provider.py +15 -3
- prowler/providers/mongodbatlas/services/projects/projects_auditing_enabled/projects_auditing_enabled.metadata.json +20 -9
- prowler/providers/mongodbatlas/services/projects/projects_network_access_list_exposed_to_internet/projects_network_access_list_exposed_to_internet.metadata.json +14 -9
- prowler/providers/oraclecloud/lib/arguments/arguments.py +4 -13
- prowler/providers/oraclecloud/lib/service/service.py +3 -3
- prowler/providers/oraclecloud/{oci_provider.py → oraclecloud_provider.py} +15 -15
- prowler/providers/oraclecloud/services/analytics/analytics_instance_access_restricted/analytics_instance_access_restricted.metadata.json +20 -16
- prowler/providers/oraclecloud/services/audit/audit_log_retention_period_365_days/audit_log_retention_period_365_days.metadata.json +17 -17
- prowler/providers/oraclecloud/services/blockstorage/blockstorage_block_volume_encrypted_with_cmk/blockstorage_block_volume_encrypted_with_cmk.metadata.json +17 -19
- prowler/providers/oraclecloud/services/blockstorage/blockstorage_boot_volume_encrypted_with_cmk/blockstorage_boot_volume_encrypted_with_cmk.metadata.json +18 -18
- prowler/providers/oraclecloud/services/cloudguard/cloudguard_enabled/cloudguard_enabled.metadata.json +17 -18
- prowler/providers/oraclecloud/services/compute/compute_instance_in_transit_encryption_enabled/compute_instance_in_transit_encryption_enabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/compute/compute_instance_legacy_metadata_endpoint_disabled/compute_instance_legacy_metadata_endpoint_disabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/compute/compute_instance_secure_boot_enabled/compute_instance_secure_boot_enabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/database/database_autonomous_database_access_restricted/database_autonomous_database_access_restricted.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_notification_topic_and_subscription_exists/events_notification_topic_and_subscription_exists.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_cloudguard_problems/events_rule_cloudguard_problems.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_iam_group_changes/events_rule_iam_group_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_iam_policy_changes/events_rule_iam_policy_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_identity_provider_changes/events_rule_identity_provider_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_idp_group_mapping_changes/events_rule_idp_group_mapping_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_local_user_authentication/events_rule_local_user_authentication.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_network_gateway_changes/events_rule_network_gateway_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_network_security_group_changes/events_rule_network_security_group_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_route_table_changes/events_rule_route_table_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_security_list_changes/events_rule_security_list_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_user_changes/events_rule_user_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/events/events_rule_vcn_changes/events_rule_vcn_changes.metadata.json +1 -1
- prowler/providers/oraclecloud/services/filestorage/filestorage_file_system_encrypted_with_cmk/filestorage_file_system_encrypted_with_cmk.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_iam_admins_cannot_update_tenancy_admins/identity_iam_admins_cannot_update_tenancy_admins.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_instance_principal_used/identity_instance_principal_used.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_no_resources_in_root_compartment/identity_no_resources_in_root_compartment.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_non_root_compartment_exists/identity_non_root_compartment_exists.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_password_policy_expires_within_365_days/identity_password_policy_expires_within_365_days.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_password_policy_minimum_length_14/identity_password_policy_minimum_length_14.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_password_policy_prevents_reuse/identity_password_policy_prevents_reuse.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_service_level_admins_exist/identity_service_level_admins_exist.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_tenancy_admin_permissions_limited/identity_tenancy_admin_permissions_limited.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_tenancy_admin_users_no_api_keys/identity_tenancy_admin_users_no_api_keys.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_api_keys_rotated_90_days/identity_user_api_keys_rotated_90_days.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_auth_tokens_rotated_90_days/identity_user_auth_tokens_rotated_90_days.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_customer_secret_keys_rotated_90_days/identity_user_customer_secret_keys_rotated_90_days.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_db_passwords_rotated_90_days/identity_user_db_passwords_rotated_90_days.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_mfa_enabled_console_access/identity_user_mfa_enabled_console_access.metadata.json +1 -1
- prowler/providers/oraclecloud/services/identity/identity_user_valid_email_address/identity_user_valid_email_address.metadata.json +1 -1
- prowler/providers/oraclecloud/services/integration/integration_instance_access_restricted/integration_instance_access_restricted.metadata.json +1 -1
- prowler/providers/oraclecloud/services/kms/kms_key_rotation_enabled/kms_key_rotation_enabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_default_security_list_restricts_traffic/network_default_security_list_restricts_traffic.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_security_group_ingress_from_internet_to_rdp_port/network_security_group_ingress_from_internet_to_rdp_port.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_security_group_ingress_from_internet_to_ssh_port/network_security_group_ingress_from_internet_to_ssh_port.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_security_list_ingress_from_internet_to_rdp_port/network_security_list_ingress_from_internet_to_rdp_port.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_security_list_ingress_from_internet_to_ssh_port/network_security_list_ingress_from_internet_to_ssh_port.metadata.json +1 -1
- prowler/providers/oraclecloud/services/network/network_vcn_subnet_flow_logs_enabled/network_vcn_subnet_flow_logs_enabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/objectstorage/objectstorage_bucket_encrypted_with_cmk/objectstorage_bucket_encrypted_with_cmk.metadata.json +1 -1
- prowler/providers/oraclecloud/services/objectstorage/objectstorage_bucket_logging_enabled/objectstorage_bucket_logging_enabled.metadata.json +1 -1
- prowler/providers/oraclecloud/services/objectstorage/objectstorage_bucket_not_publicly_accessible/objectstorage_bucket_not_publicly_accessible.metadata.json +1 -1
- prowler/providers/oraclecloud/services/objectstorage/objectstorage_bucket_versioning_enabled/objectstorage_bucket_versioning_enabled.metadata.json +1 -1
- {prowler_cloud-5.13.1.dist-info → prowler_cloud-5.14.0.dist-info}/METADATA +17 -16
- {prowler_cloud-5.13.1.dist-info → prowler_cloud-5.14.0.dist-info}/RECORD +295 -246
- /prowler/compliance/{oci → oraclecloud}/__init__.py +0 -0
- {prowler_cloud-5.13.1.dist-info → prowler_cloud-5.14.0.dist-info}/LICENSE +0 -0
- {prowler_cloud-5.13.1.dist-info → prowler_cloud-5.14.0.dist-info}/WHEEL +0 -0
- {prowler_cloud-5.13.1.dist-info → prowler_cloud-5.14.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,1269 @@
|
|
|
1
|
+
{
|
|
2
|
+
"Framework": "ProwlerThreatScore",
|
|
3
|
+
"Name": "Prowler ThreatScore Compliance Framework for Kubernetes",
|
|
4
|
+
"Version": "1.0",
|
|
5
|
+
"Provider": "Kubernetes",
|
|
6
|
+
"Description": "Prowler ThreatScore Compliance Framework for Kubernetes ensures that Kubernetes clusters are compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Logging and Monitoring, and Encryption. This framework provides a comprehensive security assessment for Kubernetes environments focusing on workload security, RBAC configurations, container isolation, audit logging, and encryption controls that are directly controllable by users in both managed and self-managed Kubernetes deployments.",
|
|
7
|
+
"Requirements": [
|
|
8
|
+
{
|
|
9
|
+
"Id": "1.1.1",
|
|
10
|
+
"Description": "Ensure anonymous authentication is disabled for the API server",
|
|
11
|
+
"Checks": [
|
|
12
|
+
"apiserver_anonymous_requests"
|
|
13
|
+
],
|
|
14
|
+
"Attributes": [
|
|
15
|
+
{
|
|
16
|
+
"Title": "API Server anonymous authentication disabled",
|
|
17
|
+
"Section": "1. IAM",
|
|
18
|
+
"SubSection": "1.1 Authentication",
|
|
19
|
+
"AttributeDescription": "The Kubernetes API server is the central control plane component that exposes the Kubernetes API. Anonymous authentication allows unauthenticated users to make requests to the API server, which can expose the cluster to unauthorized access. The --anonymous-auth argument should be set to false to ensure that all requests to the API server are authenticated.",
|
|
20
|
+
"AdditionalInformation": "Enabling anonymous authentication on the API server allows unauthenticated users to discover information about the cluster, potentially including sensitive configuration details and resources. Attackers can exploit this to gather intelligence for further attacks or to access resources without proper authorization. Disabling anonymous authentication ensures that all API requests are properly authenticated, reducing the attack surface and enforcing the principle of least privilege.",
|
|
21
|
+
"LevelOfRisk": 5,
|
|
22
|
+
"Weight": 1000
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
"Id": "1.1.2",
|
|
28
|
+
"Description": "Ensure that the API server authorization mode is not set to AlwaysAllow",
|
|
29
|
+
"Checks": [
|
|
30
|
+
"apiserver_auth_mode_not_always_allow"
|
|
31
|
+
],
|
|
32
|
+
"Attributes": [
|
|
33
|
+
{
|
|
34
|
+
"Title": "API Server authorization mode not AlwaysAllow",
|
|
35
|
+
"Section": "1. IAM",
|
|
36
|
+
"SubSection": "1.1 Authentication",
|
|
37
|
+
"AttributeDescription": "The Kubernetes API server supports multiple authorization modes to control access to cluster resources. The AlwaysAllow mode bypasses all authorization checks, granting full access to all requests without any validation. This mode should never be used in production environments as it completely disables access control mechanisms.",
|
|
38
|
+
"AdditionalInformation": "Using the AlwaysAllow authorization mode effectively disables all authorization checks on the API server, allowing any authenticated or unauthenticated user to perform any action on the cluster. This poses an extreme security risk, as it removes all access control barriers and allows unrestricted access to sensitive cluster resources, secrets, and configuration. Ensuring that AlwaysAllow is not used is critical for maintaining cluster security and implementing proper access controls.",
|
|
39
|
+
"LevelOfRisk": 5,
|
|
40
|
+
"Weight": 1000
|
|
41
|
+
}
|
|
42
|
+
]
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"Id": "1.1.3",
|
|
46
|
+
"Description": "Ensure that the --client-ca-file argument is set for the API server",
|
|
47
|
+
"Checks": [
|
|
48
|
+
"apiserver_client_ca_file_set"
|
|
49
|
+
],
|
|
50
|
+
"Attributes": [
|
|
51
|
+
{
|
|
52
|
+
"Title": "API Server client CA file configured",
|
|
53
|
+
"Section": "1. IAM",
|
|
54
|
+
"SubSection": "1.1 Authentication",
|
|
55
|
+
"AttributeDescription": "The API server uses client certificates to authenticate requests from users and components. The --client-ca-file argument specifies the Certificate Authority (CA) bundle that the API server uses to validate client certificates. When this is properly configured, the API server can verify the authenticity of client certificates and ensure that only authorized entities can access the cluster.",
|
|
56
|
+
"AdditionalInformation": "Without proper client certificate authentication, the API server cannot verify the identity of clients making requests, potentially allowing unauthorized access to cluster resources. Client certificate authentication provides strong mutual TLS authentication, ensuring that both the server and client can verify each other's identity. This is essential for securing communication between cluster components and preventing man-in-the-middle attacks.",
|
|
57
|
+
"LevelOfRisk": 5,
|
|
58
|
+
"Weight": 1000
|
|
59
|
+
}
|
|
60
|
+
]
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"Id": "1.1.4",
|
|
64
|
+
"Description": "Ensure that the --token-auth-file parameter is not set for the API server",
|
|
65
|
+
"Checks": [
|
|
66
|
+
"apiserver_no_token_auth_file"
|
|
67
|
+
],
|
|
68
|
+
"Attributes": [
|
|
69
|
+
{
|
|
70
|
+
"Title": "API Server token authentication file not used",
|
|
71
|
+
"Section": "1. IAM",
|
|
72
|
+
"SubSection": "1.1 Authentication",
|
|
73
|
+
"AttributeDescription": "The --token-auth-file parameter allows the API server to authenticate requests using static tokens defined in a CSV file. This authentication method is insecure because tokens are long-lived, stored in plaintext, and cannot be rotated without restarting the API server. Modern Kubernetes clusters should use more secure authentication methods such as service account tokens, OIDC, or client certificates.",
|
|
74
|
+
"AdditionalInformation": "Using static token files for authentication creates significant security risks. Tokens cannot be easily rotated, and if the token file is compromised, attackers gain persistent access to the cluster. Additionally, token files are often stored without proper encryption, making them vulnerable to unauthorized access. Disabling static token authentication and using modern authentication mechanisms like service account tokens with automatic rotation or OIDC integration significantly improves cluster security.",
|
|
75
|
+
"LevelOfRisk": 5,
|
|
76
|
+
"Weight": 1000
|
|
77
|
+
}
|
|
78
|
+
]
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"Id": "1.1.5",
|
|
82
|
+
"Description": "Ensure that the --service-account-key-file argument is set for the API server",
|
|
83
|
+
"Checks": [
|
|
84
|
+
"apiserver_service_account_key_file_set"
|
|
85
|
+
],
|
|
86
|
+
"Attributes": [
|
|
87
|
+
{
|
|
88
|
+
"Title": "API Server service account key file configured",
|
|
89
|
+
"Section": "1. IAM",
|
|
90
|
+
"SubSection": "1.1 Authentication",
|
|
91
|
+
"AttributeDescription": "Service accounts provide an identity for processes running in pods to authenticate with the API server. The --service-account-key-file argument specifies the public key used to verify service account tokens. When properly configured, the API server uses this key to validate that service account tokens are legitimate and have not been tampered with.",
|
|
92
|
+
"AdditionalInformation": "Service account token validation is critical for ensuring that only authorized pods can access cluster resources. Without proper key validation, attackers could forge service account tokens and gain unauthorized access to the cluster. Using a dedicated key file for service account validation, separate from the TLS certificate key, follows security best practices and allows for independent key rotation without affecting TLS communication.",
|
|
93
|
+
"LevelOfRisk": 4,
|
|
94
|
+
"Weight": 100
|
|
95
|
+
}
|
|
96
|
+
]
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"Id": "1.1.6",
|
|
100
|
+
"Description": "Ensure kubelet certificate authority is configured for the API server",
|
|
101
|
+
"Checks": [
|
|
102
|
+
"apiserver_kubelet_cert_auth"
|
|
103
|
+
],
|
|
104
|
+
"Attributes": [
|
|
105
|
+
{
|
|
106
|
+
"Title": "API Server kubelet certificate authority configured",
|
|
107
|
+
"Section": "1. IAM",
|
|
108
|
+
"SubSection": "1.1 Authentication",
|
|
109
|
+
"AttributeDescription": "The API server communicates with kubelet on worker nodes to manage pods and retrieve logs and metrics. The --kubelet-certificate-authority argument specifies the CA certificate used to verify the kubelet's serving certificate. This ensures that the API server can authenticate the kubelet and establish a secure connection.",
|
|
110
|
+
"AdditionalInformation": "Without proper certificate validation, the API server cannot verify that it is communicating with a legitimate kubelet. This creates a risk of man-in-the-middle attacks where an attacker could intercept communications between the API server and kubelets, potentially exposing sensitive pod data, logs, and cluster information. Configuring certificate authority validation ensures secure, authenticated communication between control plane and worker nodes.",
|
|
111
|
+
"LevelOfRisk": 4,
|
|
112
|
+
"Weight": 100
|
|
113
|
+
}
|
|
114
|
+
]
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
"Id": "1.1.7",
|
|
118
|
+
"Description": "Ensure kubelet client certificate and key are configured for the API server",
|
|
119
|
+
"Checks": [
|
|
120
|
+
"apiserver_kubelet_tls_auth"
|
|
121
|
+
],
|
|
122
|
+
"Attributes": [
|
|
123
|
+
{
|
|
124
|
+
"Title": "API Server kubelet client certificates configured",
|
|
125
|
+
"Section": "1. IAM",
|
|
126
|
+
"SubSection": "1.1 Authentication",
|
|
127
|
+
"AttributeDescription": "The API server requires client certificates to authenticate when connecting to kubelets on worker nodes. The --kubelet-client-certificate and --kubelet-client-key arguments specify the certificate and private key that the API server uses to authenticate itself to kubelets. This establishes mutual TLS authentication between the control plane and worker nodes.",
|
|
128
|
+
"AdditionalInformation": "Mutual TLS authentication between the API server and kubelets ensures that both parties can verify each other's identity. Without proper client certificate authentication, kubelets cannot verify that requests are coming from a legitimate API server, potentially allowing unauthorized components to communicate with kubelets and manipulate pod lifecycle, extract logs, or execute commands in containers. Configuring client certificates provides strong authentication and prevents impersonation attacks.",
|
|
129
|
+
"LevelOfRisk": 4,
|
|
130
|
+
"Weight": 100
|
|
131
|
+
}
|
|
132
|
+
]
|
|
133
|
+
},
|
|
134
|
+
{
|
|
135
|
+
"Id": "1.1.8",
|
|
136
|
+
"Description": "Ensure that the etcd certfile and keyfile are set appropriately for the API server",
|
|
137
|
+
"Checks": [
|
|
138
|
+
"apiserver_etcd_tls_config"
|
|
139
|
+
],
|
|
140
|
+
"Attributes": [
|
|
141
|
+
{
|
|
142
|
+
"Title": "API Server etcd TLS configuration set",
|
|
143
|
+
"Section": "1. IAM",
|
|
144
|
+
"SubSection": "1.1 Authentication",
|
|
145
|
+
"AttributeDescription": "The API server stores all cluster state in etcd, the distributed key-value store. The --etcd-certfile and --etcd-keyfile arguments specify the client certificate and private key that the API server uses to authenticate to etcd. This ensures secure, authenticated communication between the API server and etcd, protecting sensitive cluster data including secrets, configuration, and state.",
|
|
146
|
+
"AdditionalInformation": "Communication between the API server and etcd contains all cluster data, including Kubernetes secrets, pod specifications, and cluster configuration. Without proper TLS authentication, this communication could be intercepted or manipulated, leading to data breaches, unauthorized access to secrets, or cluster compromise. Using client certificates for etcd authentication ensures that only authorized API servers can access and modify cluster state stored in etcd.",
|
|
147
|
+
"LevelOfRisk": 4,
|
|
148
|
+
"Weight": 100
|
|
149
|
+
}
|
|
150
|
+
]
|
|
151
|
+
},
|
|
152
|
+
{
|
|
153
|
+
"Id": "1.1.9",
|
|
154
|
+
"Description": "Ensure that the etcd cafile is set appropriately for the API server",
|
|
155
|
+
"Checks": [
|
|
156
|
+
"apiserver_etcd_cafile_set"
|
|
157
|
+
],
|
|
158
|
+
"Attributes": [
|
|
159
|
+
{
|
|
160
|
+
"Title": "API Server etcd CA file configured",
|
|
161
|
+
"Section": "1. IAM",
|
|
162
|
+
"SubSection": "1.1 Authentication",
|
|
163
|
+
"AttributeDescription": "The --etcd-cafile argument specifies the Certificate Authority bundle that the API server uses to verify the identity of etcd servers. This ensures that the API server can validate etcd's server certificate and establish a trusted connection, preventing man-in-the-middle attacks when accessing cluster state data.",
|
|
164
|
+
"AdditionalInformation": "Without proper CA validation, the API server cannot verify that it is connecting to a legitimate etcd server. An attacker could potentially impersonate etcd and intercept or manipulate all cluster state data, including secrets, configuration, and resource definitions. Configuring the etcd CA file ensures that the API server only communicates with verified etcd instances, protecting the integrity and confidentiality of all cluster data.",
|
|
165
|
+
"LevelOfRisk": 4,
|
|
166
|
+
"Weight": 100
|
|
167
|
+
}
|
|
168
|
+
]
|
|
169
|
+
},
|
|
170
|
+
{
|
|
171
|
+
"Id": "1.1.10",
|
|
172
|
+
"Description": "Ensure that client certificate authentication is enabled for etcd",
|
|
173
|
+
"Checks": [
|
|
174
|
+
"etcd_client_cert_auth"
|
|
175
|
+
],
|
|
176
|
+
"Attributes": [
|
|
177
|
+
{
|
|
178
|
+
"Title": "Etcd client certificate authentication enabled",
|
|
179
|
+
"Section": "1. IAM",
|
|
180
|
+
"SubSection": "1.1 Authentication",
|
|
181
|
+
"AttributeDescription": "Etcd is the key-value store that holds all Kubernetes cluster data, including secrets, configurations, and state. The --client-cert-auth argument enables client certificate authentication, requiring all clients (including the API server) to present valid certificates to access etcd. This provides strong mutual TLS authentication and ensures that only authorized components can access cluster data.",
|
|
182
|
+
"AdditionalInformation": "Etcd contains the entire state of the Kubernetes cluster, including all secrets and sensitive configuration data. Without client certificate authentication, etcd could be accessed by unauthorized clients, leading to complete cluster compromise, data breaches, and unauthorized modifications to cluster state. Enabling client certificate authentication ensures that only verified components can interact with etcd, providing a critical layer of security for cluster data protection.",
|
|
183
|
+
"LevelOfRisk": 5,
|
|
184
|
+
"Weight": 1000
|
|
185
|
+
}
|
|
186
|
+
]
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"Id": "1.1.11",
|
|
190
|
+
"Description": "Ensure that peer client certificate authentication is enabled for etcd",
|
|
191
|
+
"Checks": [
|
|
192
|
+
"etcd_peer_client_cert_auth"
|
|
193
|
+
],
|
|
194
|
+
"Attributes": [
|
|
195
|
+
{
|
|
196
|
+
"Title": "Etcd peer client certificate authentication enabled",
|
|
197
|
+
"Section": "1. IAM",
|
|
198
|
+
"SubSection": "1.1 Authentication",
|
|
199
|
+
"AttributeDescription": "In a multi-node etcd cluster, etcd instances communicate with each other to replicate data and maintain cluster consensus. The --peer-client-cert-auth argument enables certificate-based authentication for peer communication, ensuring that only verified etcd nodes can join the cluster and participate in data replication.",
|
|
200
|
+
"AdditionalInformation": "Without peer certificate authentication, unauthorized etcd nodes could join the cluster and access or manipulate all cluster data. An attacker who gains access to the etcd peer network could introduce a malicious etcd instance, extract all secrets and configuration, or corrupt cluster state. Enabling peer certificate authentication ensures that only authorized etcd instances can participate in the cluster, protecting data integrity and preventing unauthorized access.",
|
|
201
|
+
"LevelOfRisk": 4,
|
|
202
|
+
"Weight": 100
|
|
203
|
+
}
|
|
204
|
+
]
|
|
205
|
+
},
|
|
206
|
+
{
|
|
207
|
+
"Id": "1.1.12",
|
|
208
|
+
"Description": "Ensure that a unique Certificate Authority is used for etcd",
|
|
209
|
+
"Checks": [
|
|
210
|
+
"etcd_unique_ca"
|
|
211
|
+
],
|
|
212
|
+
"Attributes": [
|
|
213
|
+
{
|
|
214
|
+
"Title": "Unique Certificate Authority used for etcd",
|
|
215
|
+
"Section": "1. IAM",
|
|
216
|
+
"SubSection": "1.1 Authentication",
|
|
217
|
+
"AttributeDescription": "Etcd should use a dedicated Certificate Authority (CA) separate from the Kubernetes cluster CA. This ensures that certificates issued for etcd communication cannot be used to access other cluster components, and vice versa. Using a unique CA provides defense in depth by limiting the scope of certificate compromise.",
|
|
218
|
+
"AdditionalInformation": "If etcd shares the same CA as other cluster components, a certificate compromise in one area could allow attackers to impersonate other components and access etcd. By using a dedicated CA for etcd, the impact of a certificate compromise is limited, preventing lateral movement within the cluster. This security practice follows the principle of least privilege and provides isolation between different security domains within the cluster.",
|
|
219
|
+
"LevelOfRisk": 3,
|
|
220
|
+
"Weight": 10
|
|
221
|
+
}
|
|
222
|
+
]
|
|
223
|
+
},
|
|
224
|
+
{
|
|
225
|
+
"Id": "1.1.13",
|
|
226
|
+
"Description": "Ensure anonymous authentication is disabled for the kubelet",
|
|
227
|
+
"Checks": [
|
|
228
|
+
"kubelet_disable_anonymous_auth"
|
|
229
|
+
],
|
|
230
|
+
"Attributes": [
|
|
231
|
+
{
|
|
232
|
+
"Title": "Kubelet anonymous authentication disabled",
|
|
233
|
+
"Section": "1. IAM",
|
|
234
|
+
"SubSection": "1.1 Authentication",
|
|
235
|
+
"AttributeDescription": "The kubelet is the primary node agent that runs on each worker node and manages containers. Anonymous authentication allows unauthenticated users to make requests to the kubelet API, potentially exposing pod information, metrics, and node status. The --anonymous-auth argument should be set to false to ensure all requests are properly authenticated.",
|
|
236
|
+
"AdditionalInformation": "Enabling anonymous authentication on kubelet allows unauthenticated users to query the kubelet API for information about pods, containers, and node status. Attackers can exploit this to gather intelligence about running workloads, discover vulnerabilities, and plan attacks. Additionally, depending on the authorization mode, anonymous users might be able to perform privileged operations. Disabling anonymous authentication ensures that all kubelet API access requires proper credentials, significantly reducing the attack surface.",
|
|
237
|
+
"LevelOfRisk": 5,
|
|
238
|
+
"Weight": 1000
|
|
239
|
+
}
|
|
240
|
+
]
|
|
241
|
+
},
|
|
242
|
+
{
|
|
243
|
+
"Id": "1.1.14",
|
|
244
|
+
"Description": "Ensure that the --client-ca-file argument is set for the kubelet",
|
|
245
|
+
"Checks": [
|
|
246
|
+
"kubelet_client_ca_file_set"
|
|
247
|
+
],
|
|
248
|
+
"Attributes": [
|
|
249
|
+
{
|
|
250
|
+
"Title": "Kubelet client CA file configured",
|
|
251
|
+
"Section": "1. IAM",
|
|
252
|
+
"SubSection": "1.1 Authentication",
|
|
253
|
+
"AttributeDescription": "The kubelet uses client certificates to authenticate requests from the API server and other authorized components. The --client-ca-file argument specifies the Certificate Authority bundle that the kubelet uses to validate client certificates. This ensures that the kubelet can verify the identity of clients and only accept requests from authorized sources.",
|
|
254
|
+
"AdditionalInformation": "Without proper client certificate validation, the kubelet cannot verify the identity of clients making requests. This could allow unauthorized components or attackers to communicate with the kubelet, potentially executing commands in containers, extracting sensitive data, or manipulating pod lifecycle. Configuring the client CA file ensures that only clients with valid certificates signed by the trusted CA can interact with the kubelet, providing strong authentication and access control.",
|
|
255
|
+
"LevelOfRisk": 4,
|
|
256
|
+
"Weight": 100
|
|
257
|
+
}
|
|
258
|
+
]
|
|
259
|
+
},
|
|
260
|
+
{
|
|
261
|
+
"Id": "1.1.15",
|
|
262
|
+
"Description": "Ensure that the --root-ca-file argument is set for the Controller Manager",
|
|
263
|
+
"Checks": [
|
|
264
|
+
"controllermanager_root_ca_file_set"
|
|
265
|
+
],
|
|
266
|
+
"Attributes": [
|
|
267
|
+
{
|
|
268
|
+
"Title": "Controller Manager root CA file configured",
|
|
269
|
+
"Section": "1. IAM",
|
|
270
|
+
"SubSection": "1.1 Authentication",
|
|
271
|
+
"AttributeDescription": "The Controller Manager is responsible for running core control loops that manage cluster state. The --root-ca-file argument specifies the root Certificate Authority certificate that is used to verify service account tokens and to include in service account's token secrets. This ensures that service accounts can securely authenticate with the API server.",
|
|
272
|
+
"AdditionalInformation": "Service accounts are used by pods to authenticate with the API server and access cluster resources. The root CA certificate is essential for service account token validation and distribution. Without proper CA configuration, service accounts may not function correctly, or token validation may be compromised. Configuring the root CA file ensures that service account authentication is secure and that pods can reliably access the resources they need with proper authentication.",
|
|
273
|
+
"LevelOfRisk": 3,
|
|
274
|
+
"Weight": 10
|
|
275
|
+
}
|
|
276
|
+
]
|
|
277
|
+
},
|
|
278
|
+
{
|
|
279
|
+
"Id": "1.2.1",
|
|
280
|
+
"Description": "Ensure that the --authorization-mode argument includes RBAC for the API server",
|
|
281
|
+
"Checks": [
|
|
282
|
+
"apiserver_auth_mode_include_rbac"
|
|
283
|
+
],
|
|
284
|
+
"Attributes": [
|
|
285
|
+
{
|
|
286
|
+
"Title": "API Server authorization mode includes RBAC",
|
|
287
|
+
"Section": "1. IAM",
|
|
288
|
+
"SubSection": "1.2 Authorization",
|
|
289
|
+
"AttributeDescription": "Role-Based Access Control (RBAC) is the recommended authorization mode for Kubernetes clusters. RBAC uses roles and role bindings to define which users, groups, and service accounts can perform specific actions on cluster resources. The --authorization-mode argument must include RBAC to enable this fine-grained access control mechanism.",
|
|
290
|
+
"AdditionalInformation": "RBAC is essential for implementing the principle of least privilege in Kubernetes clusters. Without RBAC, it is difficult to control who can access what resources and perform which operations. Enabling RBAC allows administrators to create granular permissions, limit user access to only necessary resources, and prevent unauthorized actions. This is a fundamental security control that should be enabled on all production clusters to prevent privilege escalation and unauthorized resource access.",
|
|
291
|
+
"LevelOfRisk": 5,
|
|
292
|
+
"Weight": 1000
|
|
293
|
+
}
|
|
294
|
+
]
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
"Id": "1.2.2",
|
|
298
|
+
"Description": "Ensure that the --authorization-mode argument includes Node for the API server",
|
|
299
|
+
"Checks": [
|
|
300
|
+
"apiserver_auth_mode_include_node"
|
|
301
|
+
],
|
|
302
|
+
"Attributes": [
|
|
303
|
+
{
|
|
304
|
+
"Title": "API Server authorization mode includes Node",
|
|
305
|
+
"Section": "1. IAM",
|
|
306
|
+
"SubSection": "1.2 Authorization",
|
|
307
|
+
"AttributeDescription": "The Node authorization mode is a special-purpose authorizer that grants permissions to kubelets based on the pods scheduled to run on them. This authorizer restricts kubelets to only read objects associated with their own node, preventing kubelets from accessing resources or secrets from pods scheduled on other nodes. The --authorization-mode argument should include Node to enable this restriction.",
|
|
308
|
+
"AdditionalInformation": "Without Node authorization, kubelets have broader permissions and could potentially access secrets, pods, and other resources from all nodes in the cluster. This violates the principle of least privilege and increases the blast radius if a single node is compromised. The Node authorizer ensures that each kubelet can only access the specific resources needed for pods running on its node, limiting the impact of a node compromise and preventing lateral movement within the cluster.",
|
|
309
|
+
"LevelOfRisk": 3,
|
|
310
|
+
"Weight": 10
|
|
311
|
+
}
|
|
312
|
+
]
|
|
313
|
+
},
|
|
314
|
+
{
|
|
315
|
+
"Id": "1.2.3",
|
|
316
|
+
"Description": "Ensure that the AlwaysAdmit admission control plugin is not set for the API server",
|
|
317
|
+
"Checks": [
|
|
318
|
+
"apiserver_no_always_admit_plugin"
|
|
319
|
+
],
|
|
320
|
+
"Attributes": [
|
|
321
|
+
{
|
|
322
|
+
"Title": "API Server AlwaysAdmit admission plugin not enabled",
|
|
323
|
+
"Section": "1. IAM",
|
|
324
|
+
"SubSection": "1.2 Authorization",
|
|
325
|
+
"AttributeDescription": "Admission control plugins enforce policies on objects during create, update, and delete operations. The AlwaysAdmit plugin approves all admission requests without any validation or enforcement. This plugin should never be enabled as it bypasses all admission control policies, including security policies, resource quotas, and configuration validation.",
|
|
326
|
+
"AdditionalInformation": "The AlwaysAdmit admission plugin completely disables admission control, allowing any request to be accepted regardless of cluster policies. This can lead to insecure configurations, resource exhaustion, and policy violations. Attackers could create privileged containers, bypass security contexts, ignore resource limits, or violate organizational policies. Ensuring that AlwaysAdmit is not enabled is critical for maintaining cluster security and policy enforcement.",
|
|
327
|
+
"LevelOfRisk": 5,
|
|
328
|
+
"Weight": 1000
|
|
329
|
+
}
|
|
330
|
+
]
|
|
331
|
+
},
|
|
332
|
+
{
|
|
333
|
+
"Id": "1.2.4",
|
|
334
|
+
"Description": "Ensure that the NodeRestriction admission control plugin is set for the API server",
|
|
335
|
+
"Checks": [
|
|
336
|
+
"apiserver_node_restriction_plugin"
|
|
337
|
+
],
|
|
338
|
+
"Attributes": [
|
|
339
|
+
{
|
|
340
|
+
"Title": "API Server NodeRestriction admission plugin enabled",
|
|
341
|
+
"Section": "1. IAM",
|
|
342
|
+
"SubSection": "1.2 Authorization",
|
|
343
|
+
"AttributeDescription": "The NodeRestriction admission control plugin limits the Node and Pod objects a kubelet can modify. This plugin ensures that kubelets can only modify their own Node object and Pod objects bound to their node, preventing a compromised kubelet from affecting other nodes or workloads in the cluster.",
|
|
344
|
+
"AdditionalInformation": "Without the NodeRestriction plugin, a compromised kubelet could modify any node's status or labels, potentially affecting scheduling decisions across the entire cluster. It could also tamper with pods on other nodes, leading to service disruption or data exposure. The NodeRestriction plugin enforces isolation between nodes and limits the blast radius of a node compromise, making it a critical security control for multi-tenant and production clusters.",
|
|
345
|
+
"LevelOfRisk": 3,
|
|
346
|
+
"Weight": 10
|
|
347
|
+
}
|
|
348
|
+
]
|
|
349
|
+
},
|
|
350
|
+
{
|
|
351
|
+
"Id": "1.2.5",
|
|
352
|
+
"Description": "Ensure that the --service-account-lookup argument is set to true for the API server",
|
|
353
|
+
"Checks": [
|
|
354
|
+
"apiserver_service_account_lookup_true"
|
|
355
|
+
],
|
|
356
|
+
"Attributes": [
|
|
357
|
+
{
|
|
358
|
+
"Title": "API Server service account lookup enabled",
|
|
359
|
+
"Section": "1. IAM",
|
|
360
|
+
"SubSection": "1.2 Authorization",
|
|
361
|
+
"AttributeDescription": "The --service-account-lookup argument controls whether the API server validates that service account tokens reference existing service accounts. When set to true, the API server checks that the service account specified in the token actually exists in the cluster and has not been deleted. This prevents the use of tokens for deleted service accounts.",
|
|
362
|
+
"AdditionalInformation": "Without service account lookup, tokens for deleted service accounts would continue to be valid until they expire. This creates a security gap where compromised or old tokens could be used to access the cluster even after the associated service account has been removed. Enabling service account lookup ensures that tokens are invalidated immediately when service accounts are deleted, providing better control over access revocation and reducing the window of exposure after credential compromise.",
|
|
363
|
+
"LevelOfRisk": 4,
|
|
364
|
+
"Weight": 100
|
|
365
|
+
}
|
|
366
|
+
]
|
|
367
|
+
},
|
|
368
|
+
{
|
|
369
|
+
"Id": "1.2.6",
|
|
370
|
+
"Description": "Ensure that the ServiceAccount admission control plugin is set for the API server",
|
|
371
|
+
"Checks": [
|
|
372
|
+
"apiserver_service_account_plugin"
|
|
373
|
+
],
|
|
374
|
+
"Attributes": [
|
|
375
|
+
{
|
|
376
|
+
"Title": "API Server ServiceAccount admission plugin enabled",
|
|
377
|
+
"Section": "1. IAM",
|
|
378
|
+
"SubSection": "1.2 Authorization",
|
|
379
|
+
"AttributeDescription": "The ServiceAccount admission control plugin automatically manages service accounts for pods. When enabled, it ensures that pods without a specified service account are assigned the default service account, and that the necessary service account token, CA certificate, and namespace information are mounted into pods. This plugin is essential for proper service account functionality.",
|
|
380
|
+
"AdditionalInformation": "Service accounts provide identity for processes running in pods and are the primary mechanism for pod authentication to the API server. The ServiceAccount admission plugin ensures that all pods have proper credentials to authenticate and that service account tokens are correctly provisioned. Without this plugin, pods may not have proper authentication credentials, leading to authentication failures or the use of insecure workarounds. This plugin should always be enabled to ensure secure and functional service account implementation.",
|
|
381
|
+
"LevelOfRisk": 3,
|
|
382
|
+
"Weight": 10
|
|
383
|
+
}
|
|
384
|
+
]
|
|
385
|
+
},
|
|
386
|
+
{
|
|
387
|
+
"Id": "1.2.7",
|
|
388
|
+
"Description": "Ensure that the authorization mode is not set to AlwaysAllow for the kubelet",
|
|
389
|
+
"Checks": [
|
|
390
|
+
"kubelet_authorization_mode"
|
|
391
|
+
],
|
|
392
|
+
"Attributes": [
|
|
393
|
+
{
|
|
394
|
+
"Title": "Kubelet authorization mode not AlwaysAllow",
|
|
395
|
+
"Section": "1. IAM",
|
|
396
|
+
"SubSection": "1.2 Authorization",
|
|
397
|
+
"AttributeDescription": "The kubelet supports various authorization modes to control access to its API endpoints. The AlwaysAllow mode bypasses all authorization checks, granting access to all requests without validation. The --authorization-mode argument should be set to Webhook or another secure mode instead of AlwaysAllow to ensure proper access control.",
|
|
398
|
+
"AdditionalInformation": "Using AlwaysAllow authorization mode on the kubelet allows any authenticated user to perform any operation through the kubelet API, including executing commands in containers, accessing logs, and managing pod lifecycle. This effectively removes all access control from the kubelet, creating a critical security vulnerability. Proper authorization configuration ensures that only authorized users and components can access kubelet APIs, preventing unauthorized access to containers and sensitive workload data.",
|
|
399
|
+
"LevelOfRisk": 5,
|
|
400
|
+
"Weight": 1000
|
|
401
|
+
}
|
|
402
|
+
]
|
|
403
|
+
},
|
|
404
|
+
{
|
|
405
|
+
"Id": "1.2.8",
|
|
406
|
+
"Description": "Ensure that the cluster-admin role is only used where required",
|
|
407
|
+
"Checks": [
|
|
408
|
+
"rbac_cluster_admin_usage"
|
|
409
|
+
],
|
|
410
|
+
"Attributes": [
|
|
411
|
+
{
|
|
412
|
+
"Title": "Cluster-admin role usage minimized",
|
|
413
|
+
"Section": "1. IAM",
|
|
414
|
+
"SubSection": "1.2 Authorization",
|
|
415
|
+
"AttributeDescription": "The cluster-admin role is a built-in ClusterRole that provides unrestricted access to all resources in the cluster. This role should be granted sparingly and only to users or service accounts that absolutely require full cluster access. Most users and applications should use more restrictive roles that grant only the minimum necessary permissions.",
|
|
416
|
+
"AdditionalInformation": "Overuse of the cluster-admin role violates the principle of least privilege and increases the risk of accidental or malicious cluster damage. If an account with cluster-admin access is compromised, attackers gain full control over the cluster, including the ability to view all secrets, modify any resource, create privileged containers, and exfiltrate data. Limiting cluster-admin usage to only essential administrative tasks and using more granular RBAC roles for other users reduces the blast radius of compromised credentials and prevents privilege abuse.",
|
|
417
|
+
"LevelOfRisk": 4,
|
|
418
|
+
"Weight": 100
|
|
419
|
+
}
|
|
420
|
+
]
|
|
421
|
+
},
|
|
422
|
+
{
|
|
423
|
+
"Id": "1.2.9",
|
|
424
|
+
"Description": "Ensure that wildcard use in Roles and ClusterRoles is minimized",
|
|
425
|
+
"Checks": [
|
|
426
|
+
"rbac_minimize_wildcard_use_roles"
|
|
427
|
+
],
|
|
428
|
+
"Attributes": [
|
|
429
|
+
{
|
|
430
|
+
"Title": "RBAC wildcard usage minimized",
|
|
431
|
+
"Section": "1. IAM",
|
|
432
|
+
"SubSection": "1.2 Authorization",
|
|
433
|
+
"AttributeDescription": "RBAC roles can use wildcards (*) to grant permissions to all resources, API groups, or verbs. While wildcards can simplify role definitions, they often grant excessive permissions beyond what is actually needed. Roles and ClusterRoles should use specific resource names, API groups, and verbs instead of wildcards to implement the principle of least privilege.",
|
|
434
|
+
"AdditionalInformation": "Using wildcards in RBAC roles can inadvertently grant excessive permissions, allowing users or service accounts to access resources they don't need. This increases the risk of privilege escalation, where users could exploit their broad permissions to access sensitive data or modify critical resources. Wildcards also make it difficult to audit permissions and understand what access has been granted. Minimizing wildcard usage and explicitly defining required permissions improves security posture and makes access control more transparent and auditable.",
|
|
435
|
+
"LevelOfRisk": 4,
|
|
436
|
+
"Weight": 100
|
|
437
|
+
}
|
|
438
|
+
]
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"Id": "1.2.10",
|
|
442
|
+
"Description": "Ensure that the --use-service-account-credentials argument is set to true for the Controller Manager",
|
|
443
|
+
"Checks": [
|
|
444
|
+
"controllermanager_service_account_credentials"
|
|
445
|
+
],
|
|
446
|
+
"Attributes": [
|
|
447
|
+
{
|
|
448
|
+
"Title": "Controller Manager uses service account credentials",
|
|
449
|
+
"Section": "1. IAM",
|
|
450
|
+
"SubSection": "1.2 Authorization",
|
|
451
|
+
"AttributeDescription": "The Controller Manager runs multiple controllers that manage different aspects of the cluster. The --use-service-account-credentials argument ensures that each controller uses its own service account credentials when communicating with the API server, rather than sharing a single set of credentials. This provides better auditability and allows for fine-grained access control per controller.",
|
|
452
|
+
"AdditionalInformation": "When service account credentials are not used, all controllers operate under the same identity, making it impossible to distinguish which controller performed which action. This reduces audit trail quality and makes it difficult to implement least-privilege access for individual controllers. Using separate service account credentials for each controller allows administrators to grant each controller only the permissions it needs, implement better logging and monitoring, and reduce the impact if a single controller is compromised.",
|
|
453
|
+
"LevelOfRisk": 3,
|
|
454
|
+
"Weight": 10
|
|
455
|
+
}
|
|
456
|
+
]
|
|
457
|
+
},
|
|
458
|
+
{
|
|
459
|
+
"Id": "1.3.1",
|
|
460
|
+
"Description": "Ensure that the SecurityContextDeny admission control plugin is set for the API server if PodSecurityPolicy is not used",
|
|
461
|
+
"Checks": [
|
|
462
|
+
"apiserver_security_context_deny_plugin"
|
|
463
|
+
],
|
|
464
|
+
"Attributes": [
|
|
465
|
+
{
|
|
466
|
+
"Title": "SecurityContextDeny admission plugin configured",
|
|
467
|
+
"Section": "1. IAM",
|
|
468
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
469
|
+
"AttributeDescription": "The SecurityContextDeny admission control plugin denies any pod that attempts to set security context options that could escalate privileges. This includes privileged containers, host namespace access, and other dangerous security context settings. While PodSecurityPolicy or Pod Security Standards are preferred, SecurityContextDeny provides basic protection against privilege escalation.",
|
|
470
|
+
"AdditionalInformation": "Without security context controls, users can create pods with privileged access, host namespace sharing, and dangerous capabilities that allow container escape and node compromise. The SecurityContextDeny plugin provides a basic safety net by rejecting dangerous security contexts. However, it is a coarse-grained control and should be supplemented with PodSecurityPolicy, Pod Security Standards, or admission webhooks for production environments to provide more granular control over pod security configurations.",
|
|
471
|
+
"LevelOfRisk": 4,
|
|
472
|
+
"Weight": 100
|
|
473
|
+
}
|
|
474
|
+
]
|
|
475
|
+
},
|
|
476
|
+
{
|
|
477
|
+
"Id": "1.3.2",
|
|
478
|
+
"Description": "Ensure admission of privileged containers is minimized",
|
|
479
|
+
"Checks": [
|
|
480
|
+
"core_minimize_privileged_containers"
|
|
481
|
+
],
|
|
482
|
+
"Attributes": [
|
|
483
|
+
{
|
|
484
|
+
"Title": "Privileged containers minimized",
|
|
485
|
+
"Section": "1. IAM",
|
|
486
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
487
|
+
"AttributeDescription": "Privileged containers run with all Linux capabilities enabled and have access to the host's devices, effectively having root access to the node. The use of privileged containers should be strictly controlled and limited to specific use cases where host access is absolutely necessary, such as certain monitoring or networking components. Most application workloads should never run as privileged.",
|
|
488
|
+
"AdditionalInformation": "Privileged containers can be used to escape the container and compromise the underlying node. They have unrestricted access to the node's resources, can load kernel modules, access all devices, and perform any operation that root can perform on the host. If a privileged container is compromised, the attacker gains complete control over the node and potentially the entire cluster. Minimizing privileged container usage and implementing strict admission controls prevents container escapes and limits the blast radius of container compromises.",
|
|
489
|
+
"LevelOfRisk": 5,
|
|
490
|
+
"Weight": 1000
|
|
491
|
+
}
|
|
492
|
+
]
|
|
493
|
+
},
|
|
494
|
+
{
|
|
495
|
+
"Id": "1.3.3",
|
|
496
|
+
"Description": "Ensure admission of containers running as root is minimized",
|
|
497
|
+
"Checks": [
|
|
498
|
+
"core_minimize_root_containers_admission"
|
|
499
|
+
],
|
|
500
|
+
"Attributes": [
|
|
501
|
+
{
|
|
502
|
+
"Title": "Root containers minimized",
|
|
503
|
+
"Section": "1. IAM",
|
|
504
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
505
|
+
"AttributeDescription": "Containers running as the root user (UID 0) have elevated privileges within the container. While container isolation provides some protection, running as root increases the risk of container escape and privilege escalation if vulnerabilities are exploited. Containers should run as non-root users whenever possible to follow the principle of least privilege.",
|
|
506
|
+
"AdditionalInformation": "Running containers as root increases the attack surface and makes privilege escalation easier if container vulnerabilities are discovered. Many container escape vulnerabilities require root access within the container to exploit. Additionally, if a container is misconfigured or compromised, running as root gives attackers more capabilities to manipulate files, processes, and potentially escape to the host. Enforcing non-root container execution significantly reduces the risk of privilege escalation and container escape attacks.",
|
|
507
|
+
"LevelOfRisk": 5,
|
|
508
|
+
"Weight": 1000
|
|
509
|
+
}
|
|
510
|
+
]
|
|
511
|
+
},
|
|
512
|
+
{
|
|
513
|
+
"Id": "1.3.4",
|
|
514
|
+
"Description": "Ensure admission of containers with the NET_RAW capability is minimized",
|
|
515
|
+
"Checks": [
|
|
516
|
+
"core_minimize_net_raw_capability_admission"
|
|
517
|
+
],
|
|
518
|
+
"Attributes": [
|
|
519
|
+
{
|
|
520
|
+
"Title": "NET_RAW capability minimized",
|
|
521
|
+
"Section": "1. IAM",
|
|
522
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
523
|
+
"AttributeDescription": "The NET_RAW capability allows containers to create raw sockets and perform low-level network operations, including packet sniffing and spoofing. This capability is dangerous as it enables man-in-the-middle attacks, network reconnaissance, and bypassing network security controls. Containers with NET_RAW should be minimized and used only when absolutely necessary.",
|
|
524
|
+
"AdditionalInformation": "Containers with the NET_RAW capability can intercept, manipulate, and forge network traffic within the cluster. Attackers can use this capability to perform ARP spoofing, DNS poisoning, or sniff sensitive data from other pods on the same node. This significantly increases the risk of lateral movement and data exfiltration within the cluster. By default, Docker and Kubernetes grant NET_RAW to containers, but this should be explicitly dropped for workloads that don't require it to reduce attack surface and prevent network-based attacks.",
|
|
525
|
+
"LevelOfRisk": 4,
|
|
526
|
+
"Weight": 100
|
|
527
|
+
}
|
|
528
|
+
]
|
|
529
|
+
},
|
|
530
|
+
{
|
|
531
|
+
"Id": "1.3.5",
|
|
532
|
+
"Description": "Ensure admission of containers with added capabilities is minimized",
|
|
533
|
+
"Checks": [
|
|
534
|
+
"core_minimize_containers_added_capabilities"
|
|
535
|
+
],
|
|
536
|
+
"Attributes": [
|
|
537
|
+
{
|
|
538
|
+
"Title": "Containers with added capabilities minimized",
|
|
539
|
+
"Section": "1. IAM",
|
|
540
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
541
|
+
"AttributeDescription": "Linux capabilities provide fine-grained control over privileged operations. While containers run with a default set of capabilities, additional capabilities can be granted through security context. Adding capabilities increases the attack surface and can enable privilege escalation. Containers should run with the minimum required capabilities, and additional capabilities should only be granted when absolutely necessary.",
|
|
542
|
+
"AdditionalInformation": "Many dangerous operations are gated by specific Linux capabilities, such as CAP_SYS_ADMIN, CAP_NET_ADMIN, or CAP_DAC_OVERRIDE. Granting additional capabilities can enable attackers to bypass security controls, access sensitive resources, or escalate privileges. For example, CAP_SYS_ADMIN provides near-root level access and can be used to mount filesystems, load kernel modules, and perform other dangerous operations. Minimizing added capabilities ensures containers operate with least privilege and reduces the risk of container escape and privilege escalation.",
|
|
543
|
+
"LevelOfRisk": 4,
|
|
544
|
+
"Weight": 100
|
|
545
|
+
}
|
|
546
|
+
]
|
|
547
|
+
},
|
|
548
|
+
{
|
|
549
|
+
"Id": "1.3.6",
|
|
550
|
+
"Description": "Ensure admission of containers with capabilities assigned is minimized",
|
|
551
|
+
"Checks": [
|
|
552
|
+
"core_minimize_containers_capabilities_assigned"
|
|
553
|
+
],
|
|
554
|
+
"Attributes": [
|
|
555
|
+
{
|
|
556
|
+
"Title": "Containers with capabilities assigned minimized",
|
|
557
|
+
"Section": "1. IAM",
|
|
558
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
559
|
+
"AttributeDescription": "Containers with any explicitly assigned capabilities should be carefully reviewed and minimized. Even removing some default capabilities and adding others can create security risks if not properly managed. Best practice is to drop all capabilities and only add back the specific ones required for the container to function.",
|
|
560
|
+
"AdditionalInformation": "Managing container capabilities is complex, and misconfigurations can inadvertently grant excessive privileges. Attackers can exploit containers with inappropriate capabilities to perform privilege escalation, bypass security controls, or access host resources. A defense-in-depth approach involves dropping all capabilities by default and explicitly adding only the minimum required capabilities with clear justification and documentation. This ensures that capability assignments are intentional and reviewed rather than inherited by default.",
|
|
561
|
+
"LevelOfRisk": 4,
|
|
562
|
+
"Weight": 100
|
|
563
|
+
}
|
|
564
|
+
]
|
|
565
|
+
},
|
|
566
|
+
{
|
|
567
|
+
"Id": "1.3.7",
|
|
568
|
+
"Description": "Ensure admission of containers with allowPrivilegeEscalation is minimized",
|
|
569
|
+
"Checks": [
|
|
570
|
+
"core_minimize_allowPrivilegeEscalation_containers"
|
|
571
|
+
],
|
|
572
|
+
"Attributes": [
|
|
573
|
+
{
|
|
574
|
+
"Title": "AllowPrivilegeEscalation minimized",
|
|
575
|
+
"Section": "1. IAM",
|
|
576
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
577
|
+
"AttributeDescription": "The allowPrivilegeEscalation security context setting controls whether a process can gain more privileges than its parent process. When set to true, programs running in the container can use setuid or setgid binaries to escalate privileges. This setting should be set to false unless there is a specific requirement for privilege escalation within the container.",
|
|
578
|
+
"AdditionalInformation": "Allowing privilege escalation within containers enables attackers to exploit setuid/setgid binaries or other privilege escalation vulnerabilities to gain root access within the container. This significantly increases the risk of container compromise and can be a stepping stone to container escape and node compromise. Setting allowPrivilegeEscalation to false prevents processes from gaining additional privileges through setuid/setgid mechanisms, providing defense in depth against privilege escalation attacks even if vulnerable binaries exist in container images.",
|
|
579
|
+
"LevelOfRisk": 4,
|
|
580
|
+
"Weight": 100
|
|
581
|
+
}
|
|
582
|
+
]
|
|
583
|
+
},
|
|
584
|
+
{
|
|
585
|
+
"Id": "1.3.8",
|
|
586
|
+
"Description": "Ensure admission of containers sharing the host IPC namespace is minimized",
|
|
587
|
+
"Checks": [
|
|
588
|
+
"core_minimize_hostIPC_containers"
|
|
589
|
+
],
|
|
590
|
+
"Attributes": [
|
|
591
|
+
{
|
|
592
|
+
"Title": "Host IPC namespace sharing minimized",
|
|
593
|
+
"Section": "1. IAM",
|
|
594
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
595
|
+
"AttributeDescription": "Containers can be configured to share the host's IPC (Inter-Process Communication) namespace using the hostIPC setting. This allows containers to communicate with processes on the host using IPC mechanisms like shared memory and semaphores. Sharing the host IPC namespace should be minimized as it breaks container isolation and can expose sensitive information.",
|
|
596
|
+
"AdditionalInformation": "Containers sharing the host IPC namespace can access IPC resources used by other processes on the host, including other containers. This can lead to information disclosure, as containers could read shared memory segments containing sensitive data from other applications. Additionally, malicious containers could manipulate IPC resources to disrupt or compromise other processes on the host. Maintaining IPC namespace isolation ensures that containers cannot interfere with each other or the host through IPC mechanisms.",
|
|
597
|
+
"LevelOfRisk": 4,
|
|
598
|
+
"Weight": 100
|
|
599
|
+
}
|
|
600
|
+
]
|
|
601
|
+
},
|
|
602
|
+
{
|
|
603
|
+
"Id": "1.3.9",
|
|
604
|
+
"Description": "Ensure admission of containers sharing the host PID namespace is minimized",
|
|
605
|
+
"Checks": [
|
|
606
|
+
"core_minimize_hostPID_containers"
|
|
607
|
+
],
|
|
608
|
+
"Attributes": [
|
|
609
|
+
{
|
|
610
|
+
"Title": "Host PID namespace sharing minimized",
|
|
611
|
+
"Section": "1. IAM",
|
|
612
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
613
|
+
"AttributeDescription": "Containers can be configured to share the host's PID (Process ID) namespace using the hostPID setting. This allows containers to see and interact with all processes running on the host, including processes in other containers. Sharing the host PID namespace should be minimized as it breaks process isolation and enables privilege escalation attacks.",
|
|
614
|
+
"AdditionalInformation": "Containers with access to the host PID namespace can view all processes on the node, including sensitive system processes and processes from other containers. This information can be used for reconnaissance and to identify attack targets. More critically, if the container also has elevated capabilities or is running as root, it could send signals to or manipulate host processes, potentially killing critical system processes or injecting code. Maintaining PID namespace isolation prevents containers from viewing or interacting with processes outside their namespace.",
|
|
615
|
+
"LevelOfRisk": 4,
|
|
616
|
+
"Weight": 100
|
|
617
|
+
}
|
|
618
|
+
]
|
|
619
|
+
},
|
|
620
|
+
{
|
|
621
|
+
"Id": "1.3.10",
|
|
622
|
+
"Description": "Ensure admission of Windows HostProcess containers is minimized",
|
|
623
|
+
"Checks": [
|
|
624
|
+
"core_minimize_admission_windows_hostprocess_containers"
|
|
625
|
+
],
|
|
626
|
+
"Attributes": [
|
|
627
|
+
{
|
|
628
|
+
"Title": "Windows HostProcess containers minimized",
|
|
629
|
+
"Section": "1. IAM",
|
|
630
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
631
|
+
"AttributeDescription": "Windows HostProcess containers run directly on the Windows host with elevated privileges, similar to privileged containers in Linux. These containers have access to host resources and can perform administrative operations on the Windows node. HostProcess containers should only be used for specific system-level operations and should be strictly controlled.",
|
|
632
|
+
"AdditionalInformation": "Windows HostProcess containers have extensive access to the host system and can perform privileged operations that could compromise node security. If a HostProcess container is compromised, attackers gain administrative access to the Windows node, allowing them to access sensitive data, manipulate the host, or pivot to other systems. Like privileged Linux containers, HostProcess containers should only be used when absolutely necessary and with strict admission controls to prevent misuse and reduce the attack surface.",
|
|
633
|
+
"LevelOfRisk": 4,
|
|
634
|
+
"Weight": 100
|
|
635
|
+
}
|
|
636
|
+
]
|
|
637
|
+
},
|
|
638
|
+
{
|
|
639
|
+
"Id": "1.3.11",
|
|
640
|
+
"Description": "Ensure that the seccomp profile is set to docker/default or runtime/default",
|
|
641
|
+
"Checks": [
|
|
642
|
+
"core_seccomp_profile_docker_default"
|
|
643
|
+
],
|
|
644
|
+
"Attributes": [
|
|
645
|
+
{
|
|
646
|
+
"Title": "Seccomp profile set to docker/default",
|
|
647
|
+
"Section": "1. IAM",
|
|
648
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
649
|
+
"AttributeDescription": "Seccomp (Secure Computing Mode) is a Linux kernel feature that restricts the system calls a process can make. Setting the seccomp profile to docker/default or runtime/default applies a restrictive profile that blocks many dangerous system calls while allowing common operations needed by most applications. This provides defense in depth against container escape and privilege escalation.",
|
|
650
|
+
"AdditionalInformation": "Without seccomp restrictions, containers can make any system call, including dangerous ones that could be exploited for container escape or privilege escalation. Many container escape vulnerabilities rely on specific system calls to interact with the kernel in unexpected ways. Applying the docker/default seccomp profile blocks the most dangerous system calls while maintaining compatibility with most applications. This significantly reduces the attack surface and makes it harder for attackers to exploit kernel vulnerabilities from within containers.",
|
|
651
|
+
"LevelOfRisk": 4,
|
|
652
|
+
"Weight": 100
|
|
653
|
+
}
|
|
654
|
+
]
|
|
655
|
+
},
|
|
656
|
+
{
|
|
657
|
+
"Id": "1.3.12",
|
|
658
|
+
"Description": "Ensure access to secrets is minimized through RBAC",
|
|
659
|
+
"Checks": [
|
|
660
|
+
"rbac_minimize_secret_access"
|
|
661
|
+
],
|
|
662
|
+
"Attributes": [
|
|
663
|
+
{
|
|
664
|
+
"Title": "Access to secrets minimized",
|
|
665
|
+
"Section": "1. IAM",
|
|
666
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
667
|
+
"AttributeDescription": "Kubernetes secrets store sensitive data such as passwords, OAuth tokens, and SSH keys. Access to secrets should be strictly controlled using RBAC to ensure that only authorized users and service accounts can read or modify secrets. Broad secret access increases the risk of credential theft and privilege escalation.",
|
|
668
|
+
"AdditionalInformation": "Secrets often contain credentials that can be used to access external systems, databases, or escalate privileges within the cluster. If secret access is not properly restricted, compromised accounts could read all secrets in the cluster, leading to widespread credential theft and unauthorized access to external resources. Minimizing secret access through granular RBAC policies ensures that users and service accounts can only access the specific secrets they need, implementing least privilege and reducing the blast radius of compromised accounts.",
|
|
669
|
+
"LevelOfRisk": 5,
|
|
670
|
+
"Weight": 1000
|
|
671
|
+
}
|
|
672
|
+
]
|
|
673
|
+
},
|
|
674
|
+
{
|
|
675
|
+
"Id": "1.3.13",
|
|
676
|
+
"Description": "Ensure access to create pods is minimized through RBAC",
|
|
677
|
+
"Checks": [
|
|
678
|
+
"rbac_minimize_pod_creation_access"
|
|
679
|
+
],
|
|
680
|
+
"Attributes": [
|
|
681
|
+
{
|
|
682
|
+
"Title": "Pod creation access minimized",
|
|
683
|
+
"Section": "1. IAM",
|
|
684
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
685
|
+
"AttributeDescription": "The ability to create pods is a powerful permission in Kubernetes, as it allows users to run arbitrary code in the cluster. Access to create pods should be restricted to only those users and service accounts that require it. Unrestricted pod creation can lead to privilege escalation, resource exhaustion, and deployment of malicious workloads.",
|
|
686
|
+
"AdditionalInformation": "Users with pod creation permissions can potentially escalate privileges by creating pods with privileged security contexts, host namespace access, or by mounting sensitive host paths or secrets. They could also deploy malicious containers to perform reconnaissance, exfiltrate data, or attack other workloads. Restricting pod creation to authorized users and service accounts, and implementing admission controls to validate pod security configurations, prevents unauthorized workload deployment and privilege escalation through pod manipulation.",
|
|
687
|
+
"LevelOfRisk": 4,
|
|
688
|
+
"Weight": 100
|
|
689
|
+
}
|
|
690
|
+
]
|
|
691
|
+
},
|
|
692
|
+
{
|
|
693
|
+
"Id": "1.3.14",
|
|
694
|
+
"Description": "Ensure access to create persistent volumes is minimized through RBAC",
|
|
695
|
+
"Checks": [
|
|
696
|
+
"rbac_minimize_pv_creation_access"
|
|
697
|
+
],
|
|
698
|
+
"Attributes": [
|
|
699
|
+
{
|
|
700
|
+
"Title": "Persistent volume creation access minimized",
|
|
701
|
+
"Section": "1. IAM",
|
|
702
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
703
|
+
"AttributeDescription": "Creating persistent volumes allows users to provision storage that can be mounted into pods. In some configurations, users with PV creation permissions could create volumes that access sensitive host paths or storage systems, potentially leading to data exposure or privilege escalation. Access to create PVs should be restricted to administrators.",
|
|
704
|
+
"AdditionalInformation": "Persistent volumes can be configured to bind to host paths or access privileged storage systems. Users with PV creation permissions could potentially create volumes that expose sensitive data from the host filesystem, access other tenants' data in multi-tenant storage systems, or bypass storage quotas and policies. Restricting PV creation to cluster administrators ensures that storage provisioning follows organizational policies and security requirements, preventing unauthorized data access through storage manipulation.",
|
|
705
|
+
"LevelOfRisk": 3,
|
|
706
|
+
"Weight": 10
|
|
707
|
+
}
|
|
708
|
+
]
|
|
709
|
+
},
|
|
710
|
+
{
|
|
711
|
+
"Id": "1.3.15",
|
|
712
|
+
"Description": "Ensure access to proxy sub-resources of nodes is minimized through RBAC",
|
|
713
|
+
"Checks": [
|
|
714
|
+
"rbac_minimize_node_proxy_subresource_access"
|
|
715
|
+
],
|
|
716
|
+
"Attributes": [
|
|
717
|
+
{
|
|
718
|
+
"Title": "Node proxy sub-resource access minimized",
|
|
719
|
+
"Section": "1. IAM",
|
|
720
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
721
|
+
"AttributeDescription": "The node proxy sub-resource allows users to establish proxy connections to kubelet APIs on nodes. This provides access to pod logs, execution capabilities, and other node-level operations. Access to the node proxy sub-resource should be strictly limited as it can be used to bypass normal API server authorization and directly interact with kubelets.",
|
|
722
|
+
"AdditionalInformation": "Users with access to the node proxy sub-resource can communicate directly with kubelets, potentially bypassing API server audit logging and authorization policies. This could allow them to access pods on specific nodes, execute commands in containers, or retrieve sensitive information without proper tracking. In some configurations, node proxy access could be used for privilege escalation by interacting with privileged pods or system components. Restricting node proxy access ensures that all node and pod interactions go through proper API server authorization and audit logging.",
|
|
723
|
+
"LevelOfRisk": 4,
|
|
724
|
+
"Weight": 100
|
|
725
|
+
}
|
|
726
|
+
]
|
|
727
|
+
},
|
|
728
|
+
{
|
|
729
|
+
"Id": "1.3.16",
|
|
730
|
+
"Description": "Ensure access to approve certificate signing requests is minimized through RBAC",
|
|
731
|
+
"Checks": [
|
|
732
|
+
"rbac_minimize_csr_approval_access"
|
|
733
|
+
],
|
|
734
|
+
"Attributes": [
|
|
735
|
+
{
|
|
736
|
+
"Title": "CSR approval access minimized",
|
|
737
|
+
"Section": "1. IAM",
|
|
738
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
739
|
+
"AttributeDescription": "Certificate Signing Requests (CSRs) are used to request signed certificates from the cluster's certificate authority. The ability to approve CSRs is a sensitive permission that should be restricted to administrators. Unauthorized CSR approval could allow attackers to obtain valid certificates for arbitrary identities, enabling authentication bypass and privilege escalation.",
|
|
740
|
+
"AdditionalInformation": "Users who can approve CSRs could issue certificates for any identity, including cluster administrators or system components. This effectively allows them to impersonate any user or service account in the cluster, bypassing all RBAC controls. An attacker with CSR approval permissions could issue certificates for privileged accounts and use them to gain full control over the cluster. Restricting CSR approval to a small group of trusted administrators prevents unauthorized certificate issuance and protects the integrity of the cluster's PKI infrastructure.",
|
|
741
|
+
"LevelOfRisk": 4,
|
|
742
|
+
"Weight": 100
|
|
743
|
+
}
|
|
744
|
+
]
|
|
745
|
+
},
|
|
746
|
+
{
|
|
747
|
+
"Id": "1.3.17",
|
|
748
|
+
"Description": "Ensure access to create service account tokens is minimized through RBAC",
|
|
749
|
+
"Checks": [
|
|
750
|
+
"rbac_minimize_service_account_token_creation"
|
|
751
|
+
],
|
|
752
|
+
"Attributes": [
|
|
753
|
+
{
|
|
754
|
+
"Title": "Service account token creation access minimized",
|
|
755
|
+
"Section": "1. IAM",
|
|
756
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
757
|
+
"AttributeDescription": "The ability to create service account tokens allows users to generate authentication tokens for service accounts. This is a sensitive permission because tokens provide authentication credentials that can be used to access the API server. Access to create service account tokens should be restricted to prevent unauthorized token generation and privilege escalation.",
|
|
758
|
+
"AdditionalInformation": "Users with permission to create service account tokens can generate authentication credentials for any service account they have access to. This could allow them to escalate privileges by creating tokens for service accounts with higher permissions than their own. The tokens can be extracted and used outside the cluster, potentially exposing cluster access to external attackers. Restricting token creation ensures that service account credentials are only generated through approved mechanisms and prevents unauthorized privilege escalation through token manipulation.",
|
|
759
|
+
"LevelOfRisk": 4,
|
|
760
|
+
"Weight": 100
|
|
761
|
+
}
|
|
762
|
+
]
|
|
763
|
+
},
|
|
764
|
+
{
|
|
765
|
+
"Id": "1.3.18",
|
|
766
|
+
"Description": "Ensure access to webhook configuration objects is minimized through RBAC",
|
|
767
|
+
"Checks": [
|
|
768
|
+
"rbac_minimize_webhook_config_access"
|
|
769
|
+
],
|
|
770
|
+
"Attributes": [
|
|
771
|
+
{
|
|
772
|
+
"Title": "Webhook configuration access minimized",
|
|
773
|
+
"Section": "1. IAM",
|
|
774
|
+
"SubSection": "1.3 Privilege Escalation Prevention",
|
|
775
|
+
"AttributeDescription": "Admission webhooks and authentication webhooks allow external services to validate or modify requests to the API server. Access to create or modify webhook configurations should be strictly limited to administrators, as malicious webhook configurations could intercept credentials, bypass security policies, or manipulate cluster operations.",
|
|
776
|
+
"AdditionalInformation": "Users who can modify webhook configurations could redirect webhook calls to attacker-controlled servers, allowing them to intercept authentication tokens, modify admission decisions, or capture sensitive data from API requests. They could also disable security-enforcing webhooks or create webhooks that automatically approve dangerous configurations. Controlling access to webhook configurations is critical for maintaining the integrity of admission control and authentication systems, preventing attackers from bypassing security policies or stealing credentials.",
|
|
777
|
+
"LevelOfRisk": 4,
|
|
778
|
+
"Weight": 100
|
|
779
|
+
}
|
|
780
|
+
]
|
|
781
|
+
},
|
|
782
|
+
{
|
|
783
|
+
"Id": "2.1.1",
|
|
784
|
+
"Description": "Ensure that the DenyServiceExternalIPs admission control plugin is set for the API server",
|
|
785
|
+
"Checks": [
|
|
786
|
+
"apiserver_deny_service_external_ips"
|
|
787
|
+
],
|
|
788
|
+
"Attributes": [
|
|
789
|
+
{
|
|
790
|
+
"Title": "DenyServiceExternalIPs admission plugin enabled",
|
|
791
|
+
"Section": "2. Attack Surface",
|
|
792
|
+
"SubSection": "2.1 Network",
|
|
793
|
+
"AttributeDescription": "Kubernetes services can be configured with external IPs that are not managed by the cluster. The DenyServiceExternalIPs admission control plugin prevents users from creating services with arbitrary external IPs, which could be used to intercept traffic or expose services unexpectedly. This plugin should be enabled to control service external IP assignments.",
|
|
794
|
+
"AdditionalInformation": "Allowing arbitrary external IPs on services can lead to security issues and traffic interception. Users could configure services to claim external IPs that route traffic through their pods, potentially intercepting traffic intended for other services or external systems. This could enable man-in-the-middle attacks or unauthorized access to network traffic. Enabling DenyServiceExternalIPs prevents users from assigning external IPs to services, ensuring that all external service exposure is controlled through proper mechanisms like LoadBalancer services or Ingress resources.",
|
|
795
|
+
"LevelOfRisk": 3,
|
|
796
|
+
"Weight": 10
|
|
797
|
+
}
|
|
798
|
+
]
|
|
799
|
+
},
|
|
800
|
+
{
|
|
801
|
+
"Id": "2.1.2",
|
|
802
|
+
"Description": "Ensure admission of containers using hostPorts is minimized",
|
|
803
|
+
"Checks": [
|
|
804
|
+
"core_minimize_admission_hostport_containers"
|
|
805
|
+
],
|
|
806
|
+
"Attributes": [
|
|
807
|
+
{
|
|
808
|
+
"Title": "Host port containers minimized",
|
|
809
|
+
"Section": "2. Attack Surface",
|
|
810
|
+
"SubSection": "2.1 Network",
|
|
811
|
+
"AttributeDescription": "Containers can bind to ports on the host network interface using the hostPort setting. This exposes the container directly on the node's IP address, bypassing service abstractions and network policies. Host ports should be minimized as they increase the attack surface, can cause port conflicts, and make pods immobile if ports are already in use on a node.",
|
|
812
|
+
"AdditionalInformation": "Using host ports exposes containers directly to the network without the protection of service abstractions or network policies. This increases the attack surface as the container becomes directly accessible on the node's IP address. Host ports can also interfere with node services or other pods, and they make pod scheduling more complex as only one pod can bind to a specific host port on each node. Minimizing host port usage and using services and ingress controllers for external access provides better security, flexibility, and manageability.",
|
|
813
|
+
"LevelOfRisk": 4,
|
|
814
|
+
"Weight": 100
|
|
815
|
+
}
|
|
816
|
+
]
|
|
817
|
+
},
|
|
818
|
+
{
|
|
819
|
+
"Id": "2.1.3",
|
|
820
|
+
"Description": "Ensure admission of containers sharing the host network namespace is minimized",
|
|
821
|
+
"Checks": [
|
|
822
|
+
"core_minimize_hostNetwork_containers"
|
|
823
|
+
],
|
|
824
|
+
"Attributes": [
|
|
825
|
+
{
|
|
826
|
+
"Title": "Host network namespace sharing minimized",
|
|
827
|
+
"Section": "2. Attack Surface",
|
|
828
|
+
"SubSection": "2.1 Network",
|
|
829
|
+
"AttributeDescription": "Containers can be configured to use the host's network namespace with the hostNetwork setting. This gives containers direct access to the host's network interfaces and eliminates network isolation. Containers with hostNetwork can listen on any port on the host and can intercept or manipulate network traffic. This setting should be minimized and used only when absolutely necessary.",
|
|
830
|
+
"AdditionalInformation": "Containers using the host network namespace bypass network isolation and network policies, gaining the same network access as the host. They can listen on privileged ports, access services bound to localhost, and potentially intercept traffic from other pods. This is particularly dangerous in multi-tenant environments as it allows containers to bypass network segmentation. Using hostNetwork should be restricted to specific system components that require it, such as certain CNI plugins or monitoring agents, and should never be allowed for general application workloads.",
|
|
831
|
+
"LevelOfRisk": 4,
|
|
832
|
+
"Weight": 100
|
|
833
|
+
}
|
|
834
|
+
]
|
|
835
|
+
},
|
|
836
|
+
{
|
|
837
|
+
"Id": "2.1.4",
|
|
838
|
+
"Description": "Ensure that the read-only port is disabled for the kubelet",
|
|
839
|
+
"Checks": [
|
|
840
|
+
"kubelet_disable_read_only_port"
|
|
841
|
+
],
|
|
842
|
+
"Attributes": [
|
|
843
|
+
{
|
|
844
|
+
"Title": "Kubelet read-only port disabled",
|
|
845
|
+
"Section": "2. Attack Surface",
|
|
846
|
+
"SubSection": "2.1 Network",
|
|
847
|
+
"AttributeDescription": "The kubelet can expose a read-only HTTP endpoint on port 10255 that provides access to pod and node information without authentication. The --read-only-port argument should be set to 0 to disable this endpoint, as it exposes cluster information without requiring authentication and can be used for reconnaissance by attackers.",
|
|
848
|
+
"AdditionalInformation": "The read-only port provides unauthenticated access to kubelet APIs, allowing anyone who can reach the port to retrieve information about all pods on the node, including pod specifications, environment variables, and configuration details. This information can be used by attackers to identify vulnerable applications, discover secrets exposed through environment variables, or plan further attacks. Disabling the read-only port ensures that all kubelet API access requires proper authentication, preventing information disclosure and unauthorized reconnaissance.",
|
|
849
|
+
"LevelOfRisk": 4,
|
|
850
|
+
"Weight": 100
|
|
851
|
+
}
|
|
852
|
+
]
|
|
853
|
+
},
|
|
854
|
+
{
|
|
855
|
+
"Id": "2.2.1",
|
|
856
|
+
"Description": "Ensure secrets are not exposed as environment variables",
|
|
857
|
+
"Checks": [
|
|
858
|
+
"core_no_secrets_envs"
|
|
859
|
+
],
|
|
860
|
+
"Attributes": [
|
|
861
|
+
{
|
|
862
|
+
"Title": "Secrets not exposed as environment variables",
|
|
863
|
+
"Section": "2. Attack Surface",
|
|
864
|
+
"SubSection": "2.2 Storage",
|
|
865
|
+
"AttributeDescription": "Kubernetes secrets can be exposed to containers either as mounted volumes or as environment variables. Exposing secrets as environment variables is less secure because environment variables are visible in pod specifications, can be logged by applications, and may be exposed through system information endpoints. Secrets should be mounted as files instead.",
|
|
866
|
+
"AdditionalInformation": "Environment variables containing secrets are more likely to be accidentally exposed through various channels including application logs, error messages, process listings, and debugging endpoints. They are also visible to any process running in the container and can be accessed by attackers who gain limited shell access. Mounting secrets as files provides better isolation, as the files can have restricted permissions and are not automatically visible to all processes. Using volume mounts for secrets follows security best practices and reduces the risk of accidental credential exposure.",
|
|
867
|
+
"LevelOfRisk": 3,
|
|
868
|
+
"Weight": 10
|
|
869
|
+
}
|
|
870
|
+
]
|
|
871
|
+
},
|
|
872
|
+
{
|
|
873
|
+
"Id": "2.2.2",
|
|
874
|
+
"Description": "Ensure access to secrets is minimized through RBAC",
|
|
875
|
+
"Checks": [
|
|
876
|
+
"rbac_minimize_secret_access"
|
|
877
|
+
],
|
|
878
|
+
"Attributes": [
|
|
879
|
+
{
|
|
880
|
+
"Title": "Secret access minimized through RBAC",
|
|
881
|
+
"Section": "2. Attack Surface",
|
|
882
|
+
"SubSection": "2.2 Storage",
|
|
883
|
+
"AttributeDescription": "Access to read or modify secrets should be strictly controlled through RBAC policies. Only users and service accounts that specifically need to access secrets for their operations should be granted these permissions. Broad secret access increases the risk of credential theft and unauthorized access to sensitive data.",
|
|
884
|
+
"AdditionalInformation": "Secrets often contain credentials for external systems, databases, and APIs. Unrestricted secret access allows compromised accounts to retrieve all cluster secrets, potentially leading to widespread credential theft and unauthorized access to external resources. Implementing granular RBAC policies that grant secret access only to specific namespaces and resources ensures that each user and service account can only access the secrets they need, implementing the principle of least privilege and minimizing the impact of compromised accounts.",
|
|
885
|
+
"LevelOfRisk": 5,
|
|
886
|
+
"Weight": 1000
|
|
887
|
+
}
|
|
888
|
+
]
|
|
889
|
+
},
|
|
890
|
+
{
|
|
891
|
+
"Id": "2.2.3",
|
|
892
|
+
"Description": "Ensure access to create persistent volumes is minimized through RBAC",
|
|
893
|
+
"Checks": [
|
|
894
|
+
"rbac_minimize_pv_creation_access"
|
|
895
|
+
],
|
|
896
|
+
"Attributes": [
|
|
897
|
+
{
|
|
898
|
+
"Title": "Persistent volume creation minimized",
|
|
899
|
+
"Section": "2. Attack Surface",
|
|
900
|
+
"SubSection": "2.2 Storage",
|
|
901
|
+
"AttributeDescription": "Creating persistent volumes with host path or other privileged storage configurations can expose sensitive data or allow privilege escalation. Access to create PVs should be restricted to cluster administrators who understand the security implications of different storage configurations.",
|
|
902
|
+
"AdditionalInformation": "Persistent volumes can be configured to access host directories, cloud storage, or network storage systems. Without proper controls, users could create PVs that expose sensitive data from the host filesystem, access other tenants' data in shared storage, or bypass storage quotas. Restricting PV creation to administrators ensures that storage is provisioned according to security policies and that sensitive data paths are not accidentally exposed to unauthorized workloads.",
|
|
903
|
+
"LevelOfRisk": 3,
|
|
904
|
+
"Weight": 10
|
|
905
|
+
}
|
|
906
|
+
]
|
|
907
|
+
},
|
|
908
|
+
{
|
|
909
|
+
"Id": "2.3.1",
|
|
910
|
+
"Description": "Ensure that the AlwaysPullImages admission control plugin is set for the API server",
|
|
911
|
+
"Checks": [
|
|
912
|
+
"apiserver_always_pull_images_plugin"
|
|
913
|
+
],
|
|
914
|
+
"Attributes": [
|
|
915
|
+
{
|
|
916
|
+
"Title": "AlwaysPullImages admission plugin enabled",
|
|
917
|
+
"Section": "2. Attack Surface",
|
|
918
|
+
"SubSection": "2.3 Application",
|
|
919
|
+
"AttributeDescription": "The AlwaysPullImages admission control plugin modifies pod specifications to always pull container images, regardless of the imagePullPolicy specified. This ensures that image registry authentication is checked for every pod creation and prevents users from running images cached on nodes that they wouldn't normally have access to pull from the registry.",
|
|
920
|
+
"AdditionalInformation": "Without AlwaysPullImages, users could potentially run containers from cached images that they no longer have permission to pull from the registry. This could allow unauthorized access to proprietary or sensitive images after access has been revoked. Additionally, always pulling images ensures that pods run the latest image version rather than potentially vulnerable cached versions. Enabling this plugin provides defense in depth by ensuring that image registry permissions are enforced at pod runtime, not just at initial image pull.",
|
|
921
|
+
"LevelOfRisk": 3,
|
|
922
|
+
"Weight": 10
|
|
923
|
+
}
|
|
924
|
+
]
|
|
925
|
+
},
|
|
926
|
+
{
|
|
927
|
+
"Id": "2.3.2",
|
|
928
|
+
"Description": "Ensure that the NamespaceLifecycle admission control plugin is set for the API server",
|
|
929
|
+
"Checks": [
|
|
930
|
+
"apiserver_namespace_lifecycle_plugin"
|
|
931
|
+
],
|
|
932
|
+
"Attributes": [
|
|
933
|
+
{
|
|
934
|
+
"Title": "NamespaceLifecycle admission plugin enabled",
|
|
935
|
+
"Section": "2. Attack Surface",
|
|
936
|
+
"SubSection": "2.3 Application",
|
|
937
|
+
"AttributeDescription": "The NamespaceLifecycle admission control plugin enforces that objects cannot be created in non-existent or terminating namespaces. It prevents race conditions where objects might be created in namespaces that are being deleted, and ensures that the default and kube-system namespaces cannot be deleted.",
|
|
938
|
+
"AdditionalInformation": "Without the NamespaceLifecycle plugin, users could create resources in namespaces that are being deleted, leading to orphaned resources or inconsistent cluster state. The plugin also protects critical namespaces from accidental deletion. This is a fundamental admission control plugin that should always be enabled to maintain cluster consistency and prevent namespace-related errors and security issues.",
|
|
939
|
+
"LevelOfRisk": 3,
|
|
940
|
+
"Weight": 10
|
|
941
|
+
}
|
|
942
|
+
]
|
|
943
|
+
},
|
|
944
|
+
{
|
|
945
|
+
"Id": "4.1.1",
|
|
946
|
+
"Description": "Ensure that the API server TLS certificate and key are configured",
|
|
947
|
+
"Checks": [
|
|
948
|
+
"apiserver_tls_config"
|
|
949
|
+
],
|
|
950
|
+
"Attributes": [
|
|
951
|
+
{
|
|
952
|
+
"Title": "API Server TLS certificate and key configured",
|
|
953
|
+
"Section": "4. Encryption",
|
|
954
|
+
"SubSection": "4.1 In-Transit",
|
|
955
|
+
"AttributeDescription": "The API server must use TLS to secure communications with clients, kubelets, and other components. The --tls-cert-file and --tls-private-key-file arguments specify the certificate and private key used for TLS encryption. Properly configuring TLS ensures that all API server communications are encrypted and authenticated.",
|
|
956
|
+
"AdditionalInformation": "Without TLS encryption, all communications with the API server are transmitted in plaintext, allowing attackers to intercept credentials, authentication tokens, secrets, and other sensitive data. TLS encryption protects the confidentiality and integrity of all API communications, preventing man-in-the-middle attacks and credential theft. Properly configured TLS is a fundamental security requirement for Kubernetes clusters and should never be disabled in production environments.",
|
|
957
|
+
"LevelOfRisk": 5,
|
|
958
|
+
"Weight": 1000
|
|
959
|
+
}
|
|
960
|
+
]
|
|
961
|
+
},
|
|
962
|
+
{
|
|
963
|
+
"Id": "4.1.2",
|
|
964
|
+
"Description": "Ensure that the API server only uses strong cryptographic ciphers",
|
|
965
|
+
"Checks": [
|
|
966
|
+
"apiserver_strong_ciphers_only"
|
|
967
|
+
],
|
|
968
|
+
"Attributes": [
|
|
969
|
+
{
|
|
970
|
+
"Title": "API Server strong ciphers enforced",
|
|
971
|
+
"Section": "4. Encryption",
|
|
972
|
+
"SubSection": "4.1 In-Transit",
|
|
973
|
+
"AttributeDescription": "The API server should be configured to use only strong cryptographic ciphers for TLS connections. Weak ciphers can be exploited by attackers to decrypt communications or perform downgrade attacks. Configuring a restricted set of strong ciphers ensures that all TLS connections use modern, secure encryption algorithms.",
|
|
974
|
+
"AdditionalInformation": "Using weak or outdated cryptographic ciphers exposes TLS connections to various attacks including BEAST, CRIME, and padding oracle attacks. Even if TLS is enabled, weak ciphers can be exploited to decrypt traffic or downgrade security. Restricting the API server to strong ciphers ensures that all connections use modern, secure encryption that is resistant to known attacks. This is particularly important for the API server as it handles highly sensitive authentication tokens and cluster data.",
|
|
975
|
+
"LevelOfRisk": 4,
|
|
976
|
+
"Weight": 100
|
|
977
|
+
}
|
|
978
|
+
]
|
|
979
|
+
},
|
|
980
|
+
{
|
|
981
|
+
"Id": "4.1.3",
|
|
982
|
+
"Description": "Ensure that etcd uses TLS encryption for client connections",
|
|
983
|
+
"Checks": [
|
|
984
|
+
"etcd_tls_encryption"
|
|
985
|
+
],
|
|
986
|
+
"Attributes": [
|
|
987
|
+
{
|
|
988
|
+
"Title": "Etcd TLS encryption for clients configured",
|
|
989
|
+
"Section": "4. Encryption",
|
|
990
|
+
"SubSection": "4.1 In-Transit",
|
|
991
|
+
"AttributeDescription": "Etcd must use TLS to encrypt client connections from the API server and other etcd clients. The --cert-file and --key-file arguments specify the certificate and private key for TLS. Encrypting etcd client connections protects all cluster data in transit, including secrets and sensitive configuration.",
|
|
992
|
+
"AdditionalInformation": "Etcd contains all Kubernetes cluster data, including secrets, configurations, and state. Without TLS encryption, this data is transmitted in plaintext and could be intercepted by attackers with network access. Unencrypted etcd traffic exposes all cluster secrets and configuration to network sniffing attacks. Enabling TLS encryption for etcd client connections is absolutely critical for protecting cluster data confidentiality and preventing credential theft through network interception.",
|
|
993
|
+
"LevelOfRisk": 5,
|
|
994
|
+
"Weight": 1000
|
|
995
|
+
}
|
|
996
|
+
]
|
|
997
|
+
},
|
|
998
|
+
{
|
|
999
|
+
"Id": "4.1.4",
|
|
1000
|
+
"Description": "Ensure that etcd peer connections use TLS encryption",
|
|
1001
|
+
"Checks": [
|
|
1002
|
+
"etcd_peer_tls_config"
|
|
1003
|
+
],
|
|
1004
|
+
"Attributes": [
|
|
1005
|
+
{
|
|
1006
|
+
"Title": "Etcd peer TLS encryption configured",
|
|
1007
|
+
"Section": "4. Encryption",
|
|
1008
|
+
"SubSection": "4.1 In-Transit",
|
|
1009
|
+
"AttributeDescription": "In a multi-node etcd cluster, etcd instances communicate with each other to replicate data and maintain consensus. The --peer-cert-file and --peer-key-file arguments configure TLS encryption for peer communication. This ensures that etcd replication traffic is encrypted and authenticated.",
|
|
1010
|
+
"AdditionalInformation": "Etcd peer communications contain all cluster data being replicated between etcd instances. Without TLS encryption, this data is transmitted in plaintext across the network, potentially exposing all secrets and configuration to network attackers. Additionally, without peer authentication, malicious etcd instances could join the cluster. Configuring TLS for peer connections ensures that etcd replication is both encrypted and authenticated, protecting data confidentiality and cluster integrity.",
|
|
1011
|
+
"LevelOfRisk": 4,
|
|
1012
|
+
"Weight": 100
|
|
1013
|
+
}
|
|
1014
|
+
]
|
|
1015
|
+
},
|
|
1016
|
+
{
|
|
1017
|
+
"Id": "4.1.5",
|
|
1018
|
+
"Description": "Ensure that auto-TLS is not enabled for etcd",
|
|
1019
|
+
"Checks": [
|
|
1020
|
+
"etcd_no_auto_tls"
|
|
1021
|
+
],
|
|
1022
|
+
"Attributes": [
|
|
1023
|
+
{
|
|
1024
|
+
"Title": "Etcd auto-TLS disabled",
|
|
1025
|
+
"Section": "4. Encryption",
|
|
1026
|
+
"SubSection": "4.1 In-Transit",
|
|
1027
|
+
"AttributeDescription": "Etcd supports auto-TLS mode which automatically generates self-signed certificates. While this enables TLS encryption, it does not provide authentication because clients cannot verify the certificate. The --auto-tls argument should be set to false, and properly signed certificates should be used instead.",
|
|
1028
|
+
"AdditionalInformation": "Auto-TLS enables encryption but does not provide authentication, making it vulnerable to man-in-the-middle attacks. Attackers can intercept connections and present their own certificates, as clients have no way to verify certificate authenticity. While auto-TLS is better than no encryption, it should never be used in production environments. Properly issued and validated certificates provide both encryption and authentication, ensuring that clients are connecting to legitimate etcd servers.",
|
|
1029
|
+
"LevelOfRisk": 4,
|
|
1030
|
+
"Weight": 100
|
|
1031
|
+
}
|
|
1032
|
+
]
|
|
1033
|
+
},
|
|
1034
|
+
{
|
|
1035
|
+
"Id": "4.1.6",
|
|
1036
|
+
"Description": "Ensure that peer auto-TLS is not enabled for etcd",
|
|
1037
|
+
"Checks": [
|
|
1038
|
+
"etcd_no_peer_auto_tls"
|
|
1039
|
+
],
|
|
1040
|
+
"Attributes": [
|
|
1041
|
+
{
|
|
1042
|
+
"Title": "Etcd peer auto-TLS disabled",
|
|
1043
|
+
"Section": "4. Encryption",
|
|
1044
|
+
"SubSection": "4.1 In-Transit",
|
|
1045
|
+
"AttributeDescription": "The --peer-auto-tls argument enables automatic generation of self-signed certificates for etcd peer communication. Like client auto-TLS, this provides encryption without authentication. Peer auto-TLS should be disabled and proper certificates should be used to ensure both encryption and authentication of etcd cluster members.",
|
|
1046
|
+
"AdditionalInformation": "Without proper peer authentication, malicious etcd instances could join the cluster and access or manipulate all cluster data. Auto-TLS for peer connections provides encryption but no authentication, allowing attackers to impersonate etcd cluster members. Using properly issued peer certificates ensures that only authorized etcd instances can join the cluster and participate in data replication, protecting cluster data integrity and preventing unauthorized access.",
|
|
1047
|
+
"LevelOfRisk": 4,
|
|
1048
|
+
"Weight": 100
|
|
1049
|
+
}
|
|
1050
|
+
]
|
|
1051
|
+
},
|
|
1052
|
+
{
|
|
1053
|
+
"Id": "4.1.7",
|
|
1054
|
+
"Description": "Ensure that the kubelet uses TLS certificates",
|
|
1055
|
+
"Checks": [
|
|
1056
|
+
"kubelet_tls_cert_and_key"
|
|
1057
|
+
],
|
|
1058
|
+
"Attributes": [
|
|
1059
|
+
{
|
|
1060
|
+
"Title": "Kubelet TLS certificates configured",
|
|
1061
|
+
"Section": "4. Encryption",
|
|
1062
|
+
"SubSection": "4.1 In-Transit",
|
|
1063
|
+
"AttributeDescription": "The kubelet should use TLS certificates for secure communication with the API server and other components. Properly configured TLS certificates ensure that kubelet communications are encrypted and that the kubelet's identity can be authenticated. This protects pod data, logs, and metrics in transit.",
|
|
1064
|
+
"AdditionalInformation": "Communications between the API server and kubelet include sensitive data such as pod specifications, secrets, logs, and exec session data. Without TLS encryption, this data is transmitted in plaintext and could be intercepted by attackers. TLS certificates also enable mutual authentication, ensuring that both the API server and kubelet can verify each other's identity. Properly configured kubelet TLS prevents credential theft, data interception, and man-in-the-middle attacks on kubelet communications.",
|
|
1065
|
+
"LevelOfRisk": 4,
|
|
1066
|
+
"Weight": 100
|
|
1067
|
+
}
|
|
1068
|
+
]
|
|
1069
|
+
},
|
|
1070
|
+
{
|
|
1071
|
+
"Id": "4.1.8",
|
|
1072
|
+
"Description": "Ensure that the kubelet only uses strong cryptographic ciphers",
|
|
1073
|
+
"Checks": [
|
|
1074
|
+
"kubelet_strong_ciphers_only"
|
|
1075
|
+
],
|
|
1076
|
+
"Attributes": [
|
|
1077
|
+
{
|
|
1078
|
+
"Title": "Kubelet strong ciphers enforced",
|
|
1079
|
+
"Section": "4. Encryption",
|
|
1080
|
+
"SubSection": "4.1 In-Transit",
|
|
1081
|
+
"AttributeDescription": "The kubelet should be configured to use only strong cryptographic ciphers for TLS connections. Weak or outdated ciphers can be exploited to compromise the confidentiality of kubelet communications. Restricting the kubelet to strong ciphers ensures secure encryption of all data transmitted to and from the kubelet.",
|
|
1082
|
+
"AdditionalInformation": "Even with TLS enabled, using weak cryptographic ciphers exposes kubelet communications to various cryptographic attacks. Attackers could potentially decrypt traffic, steal credentials, or access sensitive pod data if weak ciphers are permitted. The kubelet handles highly sensitive data including pod specifications with secrets, container logs, and exec sessions. Enforcing strong ciphers ensures that this data remains confidential even if attackers can intercept network traffic.",
|
|
1083
|
+
"LevelOfRisk": 4,
|
|
1084
|
+
"Weight": 100
|
|
1085
|
+
}
|
|
1086
|
+
]
|
|
1087
|
+
},
|
|
1088
|
+
{
|
|
1089
|
+
"Id": "4.1.9",
|
|
1090
|
+
"Description": "Ensure that the kubelet client certificate rotation is enabled",
|
|
1091
|
+
"Checks": [
|
|
1092
|
+
"kubelet_rotate_certificates"
|
|
1093
|
+
],
|
|
1094
|
+
"Attributes": [
|
|
1095
|
+
{
|
|
1096
|
+
"Title": "Kubelet client certificate rotation enabled",
|
|
1097
|
+
"Section": "4. Encryption",
|
|
1098
|
+
"SubSection": "4.1 In-Transit",
|
|
1099
|
+
"AttributeDescription": "The kubelet can automatically rotate its client certificates before they expire, requesting new certificates from the API server. Enabling certificate rotation through the RotateKubeletClientCertificate feature ensures that kubelets maintain valid certificates without manual intervention, preventing service disruptions and improving security through regular credential rotation.",
|
|
1100
|
+
"AdditionalInformation": "Certificate expiration can cause kubelet disconnection from the API server, resulting in pod scheduling failures and cluster instability. Manual certificate renewal is error-prone and operationally burdensome. Automatic certificate rotation ensures continuous availability while implementing security best practices of regular credential rotation. This reduces the risk of compromised certificates being used long-term and ensures that kubelets maintain authenticated connections to the API server without manual intervention.",
|
|
1101
|
+
"LevelOfRisk": 4,
|
|
1102
|
+
"Weight": 100
|
|
1103
|
+
}
|
|
1104
|
+
]
|
|
1105
|
+
},
|
|
1106
|
+
{
|
|
1107
|
+
"Id": "4.1.10",
|
|
1108
|
+
"Description": "Ensure that the RotateKubeletServerCertificate feature is enabled for the Controller Manager",
|
|
1109
|
+
"Checks": [
|
|
1110
|
+
"controllermanager_rotate_kubelet_server_cert"
|
|
1111
|
+
],
|
|
1112
|
+
"Attributes": [
|
|
1113
|
+
{
|
|
1114
|
+
"Title": "Controller Manager kubelet server certificate rotation enabled",
|
|
1115
|
+
"Section": "4. Encryption",
|
|
1116
|
+
"SubSection": "4.1 In-Transit",
|
|
1117
|
+
"AttributeDescription": "The RotateKubeletServerCertificate feature enables automatic rotation of kubelet serving certificates. When enabled in the Controller Manager, kubelets can automatically request new serving certificates before their current certificates expire, ensuring continuous TLS protection for kubelet serving endpoints.",
|
|
1118
|
+
"AdditionalInformation": "Kubelet serving certificates are used to secure the kubelet's HTTPS endpoint. Without automatic rotation, expired certificates would break communication between the API server and kubelets, causing pod scheduling and management failures. Enabling server certificate rotation ensures that kubelet TLS certificates remain valid without manual renewal, improving both security through regular rotation and reliability by preventing certificate expiration issues.",
|
|
1119
|
+
"LevelOfRisk": 3,
|
|
1120
|
+
"Weight": 10
|
|
1121
|
+
}
|
|
1122
|
+
]
|
|
1123
|
+
},
|
|
1124
|
+
{
|
|
1125
|
+
"Id": "4.1.11",
|
|
1126
|
+
"Description": "Ensure that the --root-ca-file argument is set for the Controller Manager",
|
|
1127
|
+
"Checks": [
|
|
1128
|
+
"controllermanager_root_ca_file_set"
|
|
1129
|
+
],
|
|
1130
|
+
"Attributes": [
|
|
1131
|
+
{
|
|
1132
|
+
"Title": "Controller Manager root CA file configured",
|
|
1133
|
+
"Section": "4. Encryption",
|
|
1134
|
+
"SubSection": "4.1 In-Transit",
|
|
1135
|
+
"AttributeDescription": "The Controller Manager needs the root CA certificate to verify service account tokens and to include in service account token secrets for pods to verify the API server. The --root-ca-file argument ensures that the Controller Manager has access to the trusted CA certificate for these operations.",
|
|
1136
|
+
"AdditionalInformation": "Service accounts in pods use the CA certificate to verify the API server's TLS certificate when making authenticated requests. Without the correct root CA file, pods may not be able to verify the API server's identity, potentially accepting fraudulent certificates in man-in-the-middle attacks. The root CA file is essential for maintaining the chain of trust throughout the cluster and ensuring that service account authentication works correctly.",
|
|
1137
|
+
"LevelOfRisk": 3,
|
|
1138
|
+
"Weight": 10
|
|
1139
|
+
}
|
|
1140
|
+
]
|
|
1141
|
+
},
|
|
1142
|
+
{
|
|
1143
|
+
"Id": "4.2.1",
|
|
1144
|
+
"Description": "Ensure that the --encryption-provider-config argument is set for the API server",
|
|
1145
|
+
"Checks": [
|
|
1146
|
+
"apiserver_encryption_provider_config_set"
|
|
1147
|
+
],
|
|
1148
|
+
"Attributes": [
|
|
1149
|
+
{
|
|
1150
|
+
"Title": "API Server encryption provider configured",
|
|
1151
|
+
"Section": "4. Encryption",
|
|
1152
|
+
"SubSection": "4.2 At-Rest",
|
|
1153
|
+
"AttributeDescription": "The --encryption-provider-config argument specifies a configuration file that defines how secrets and other sensitive data are encrypted before being stored in etcd. Configuring encryption at rest ensures that even if etcd storage is compromised, sensitive data remains protected through encryption.",
|
|
1154
|
+
"AdditionalInformation": "By default, Kubernetes stores secrets and other data in etcd in base64-encoded format, which is not encrypted. If an attacker gains access to etcd data files or backups, they can easily decode all secrets and sensitive information. Configuring encryption at rest using a KMS provider or other encryption mechanism ensures that data is encrypted before being written to etcd, protecting it from unauthorized access even if storage is compromised. This is a critical security control for protecting sensitive data like passwords, API tokens, and certificates.",
|
|
1155
|
+
"LevelOfRisk": 5,
|
|
1156
|
+
"Weight": 1000
|
|
1157
|
+
}
|
|
1158
|
+
]
|
|
1159
|
+
},
|
|
1160
|
+
{
|
|
1161
|
+
"Id": "4.2.2",
|
|
1162
|
+
"Description": "Ensure that the --service-account-private-key-file argument is set for the Controller Manager",
|
|
1163
|
+
"Checks": [
|
|
1164
|
+
"controllermanager_service_account_private_key_file"
|
|
1165
|
+
],
|
|
1166
|
+
"Attributes": [
|
|
1167
|
+
{
|
|
1168
|
+
"Title": "Controller Manager service account private key configured",
|
|
1169
|
+
"Section": "4. Encryption",
|
|
1170
|
+
"SubSection": "4.2 At-Rest",
|
|
1171
|
+
"AttributeDescription": "The Controller Manager uses a private key to sign service account tokens. The --service-account-private-key-file argument specifies the key used for signing tokens. This key should be properly secured and should match the public key used by the API server to verify tokens, ensuring the integrity of service account authentication.",
|
|
1172
|
+
"AdditionalInformation": "Service account tokens are cryptographically signed to ensure their authenticity and prevent tampering. The private key used for signing must be properly secured, as anyone with access to this key could forge service account tokens for any service account in the cluster. Using a dedicated private key file for service account token signing and ensuring it is properly secured prevents token forgery and maintains the integrity of service account authentication throughout the cluster.",
|
|
1173
|
+
"LevelOfRisk": 3,
|
|
1174
|
+
"Weight": 10
|
|
1175
|
+
}
|
|
1176
|
+
]
|
|
1177
|
+
},
|
|
1178
|
+
{
|
|
1179
|
+
"Id": "3.1.1",
|
|
1180
|
+
"Description": "Ensure that the API server audit log path is configured",
|
|
1181
|
+
"Checks": [
|
|
1182
|
+
"apiserver_audit_log_path_set"
|
|
1183
|
+
],
|
|
1184
|
+
"Attributes": [
|
|
1185
|
+
{
|
|
1186
|
+
"Title": "API Server audit log path configured",
|
|
1187
|
+
"Section": "3. Logging and Monitoring",
|
|
1188
|
+
"SubSection": "3.1 Logging",
|
|
1189
|
+
"AttributeDescription": "The Kubernetes API server can be configured to generate audit logs that record all requests made to the API. The --audit-log-path argument specifies the file path where audit logs should be written. Enabling audit logging is essential for security monitoring, compliance requirements, and incident investigation, as it provides a complete record of all API server activity including authentication attempts, authorization decisions, and resource modifications.",
|
|
1190
|
+
"AdditionalInformation": "Without audit logging enabled, there is no record of API server activity, making it impossible to detect suspicious behavior, investigate security incidents, or meet compliance requirements. Audit logs provide critical visibility into cluster operations, including who accessed what resources, what actions were performed, and when they occurred. This is fundamental for security monitoring, forensic analysis, and compliance with security standards like CIS Kubernetes Benchmark, PCI DSS, and SOC 2. In managed Kubernetes environments, audit logs are typically sent to the cloud provider's logging service (CloudWatch, Cloud Logging, or Azure Monitor), but the audit-log-path setting ensures logs are captured.",
|
|
1191
|
+
"LevelOfRisk": 4,
|
|
1192
|
+
"Weight": 100
|
|
1193
|
+
}
|
|
1194
|
+
]
|
|
1195
|
+
},
|
|
1196
|
+
{
|
|
1197
|
+
"Id": "3.2.1",
|
|
1198
|
+
"Description": "Ensure that the API server audit log retention is set to 30 days or more",
|
|
1199
|
+
"Checks": [
|
|
1200
|
+
"apiserver_audit_log_maxage_set"
|
|
1201
|
+
],
|
|
1202
|
+
"Attributes": [
|
|
1203
|
+
{
|
|
1204
|
+
"Title": "API Server audit log retention configured",
|
|
1205
|
+
"Section": "3. Logging and Monitoring",
|
|
1206
|
+
"SubSection": "3.2 Retention",
|
|
1207
|
+
"AttributeDescription": "The --audit-log-maxage argument specifies the maximum number of days to retain audit log files before they are deleted. A minimum retention period of 30 days is recommended to ensure that audit logs are available for security investigations, incident response, and compliance requirements. Longer retention periods may be required based on organizational policies or regulatory frameworks.",
|
|
1208
|
+
"AdditionalInformation": "Audit logs are critical for incident investigation and forensic analysis. Retaining logs for at least 30 days ensures that security teams have sufficient historical data to investigate incidents, track suspicious activities over time, and meet compliance requirements. Many regulatory frameworks including PCI DSS, HIPAA, and SOC 2 require minimum log retention periods. Without adequate retention, critical evidence may be lost before security incidents are detected or investigated. In self-managed Kubernetes, proper retention ensures logs remain available for analysis.",
|
|
1209
|
+
"LevelOfRisk": 3,
|
|
1210
|
+
"Weight": 10
|
|
1211
|
+
}
|
|
1212
|
+
]
|
|
1213
|
+
},
|
|
1214
|
+
{
|
|
1215
|
+
"Id": "3.2.2",
|
|
1216
|
+
"Description": "Ensure that the API server maintains sufficient audit log backups",
|
|
1217
|
+
"Checks": [
|
|
1218
|
+
"apiserver_audit_log_maxbackup_set"
|
|
1219
|
+
],
|
|
1220
|
+
"Attributes": [
|
|
1221
|
+
{
|
|
1222
|
+
"Title": "API Server audit log backups configured",
|
|
1223
|
+
"Section": "3. Logging and Monitoring",
|
|
1224
|
+
"SubSection": "3.2 Retention",
|
|
1225
|
+
"AttributeDescription": "The --audit-log-maxbackup argument specifies the maximum number of audit log files to retain before old files are deleted. Setting an appropriate value (recommended 10 or more) ensures that sufficient audit history is preserved while managing disk space usage. Multiple log backups provide protection against log corruption and ensure continuous audit trail availability.",
|
|
1226
|
+
"AdditionalInformation": "Maintaining multiple audit log backups provides redundancy and protection against log corruption or accidental deletion. If a single log file becomes corrupted, having multiple backups ensures that the audit trail remains intact. This is particularly important in self-managed Kubernetes environments where log files are stored locally. Configuring an appropriate number of backups balances the need for historical data with storage constraints, ensuring that audit logs remain available for investigation while preventing excessive disk usage that could impact cluster operations.",
|
|
1227
|
+
"LevelOfRisk": 3,
|
|
1228
|
+
"Weight": 10
|
|
1229
|
+
}
|
|
1230
|
+
]
|
|
1231
|
+
},
|
|
1232
|
+
{
|
|
1233
|
+
"Id": "3.2.3",
|
|
1234
|
+
"Description": "Ensure that the API server audit log file size is appropriately configured",
|
|
1235
|
+
"Checks": [
|
|
1236
|
+
"apiserver_audit_log_maxsize_set"
|
|
1237
|
+
],
|
|
1238
|
+
"Attributes": [
|
|
1239
|
+
{
|
|
1240
|
+
"Title": "API Server audit log size limit configured",
|
|
1241
|
+
"Section": "3. Logging and Monitoring",
|
|
1242
|
+
"SubSection": "3.2 Retention",
|
|
1243
|
+
"AttributeDescription": "The --audit-log-maxsize argument specifies the maximum size in megabytes of an audit log file before it is rotated. Setting an appropriate value (recommended 100 MB or more) ensures that log files don't grow excessively large, which could make them difficult to process, transfer, and analyze. Proper log rotation based on size triggers automatic creation of new log files at reasonable intervals.",
|
|
1244
|
+
"AdditionalInformation": "Rotating audit logs based on size ensures that individual log files remain manageable for processing, transfer, and analysis. Very large log files can be difficult to handle with standard tools and may cause issues with log management systems or SIEM solutions. Setting an appropriate maximum size triggers log rotation at reasonable intervals, creating manageable log files while ensuring continuous audit coverage without gaps during rotation. This is particularly important when audit logs need to be transferred to external systems for analysis or long-term storage.",
|
|
1245
|
+
"LevelOfRisk": 2,
|
|
1246
|
+
"Weight": 8
|
|
1247
|
+
}
|
|
1248
|
+
]
|
|
1249
|
+
},
|
|
1250
|
+
{
|
|
1251
|
+
"Id": "3.1.2",
|
|
1252
|
+
"Description": "Ensure that the kubelet event recording rate is appropriately configured",
|
|
1253
|
+
"Checks": [
|
|
1254
|
+
"kubelet_event_record_qps"
|
|
1255
|
+
],
|
|
1256
|
+
"Attributes": [
|
|
1257
|
+
{
|
|
1258
|
+
"Title": "Kubelet event recording QPS configured",
|
|
1259
|
+
"Section": "3. Logging and Monitoring",
|
|
1260
|
+
"SubSection": "3.1 Logging",
|
|
1261
|
+
"AttributeDescription": "The eventRecordQPS setting controls the rate at which the kubelet records events. Events provide important information about pod lifecycle, container failures, resource issues, and node problems. Setting an appropriate queries-per-second (QPS) value ensures that important events are captured without overwhelming the event system or causing performance issues.",
|
|
1262
|
+
"AdditionalInformation": "Kubernetes events are an important source of operational and security information about cluster activity. Setting the eventRecordQPS too low could result in missing important events like container failures, image pull errors, scheduling issues, or security-related warnings. However, setting it too high could lead to event flooding that impacts cluster performance. An appropriate value ensures that significant events including security-relevant activities are recorded for monitoring, troubleshooting, and incident investigation, while preventing excessive event generation that could impact cluster stability.",
|
|
1263
|
+
"LevelOfRisk": 3,
|
|
1264
|
+
"Weight": 10
|
|
1265
|
+
}
|
|
1266
|
+
]
|
|
1267
|
+
}
|
|
1268
|
+
]
|
|
1269
|
+
}
|