aws_recon 0.2.21 → 0.2.26

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f284896f9397e44d7662775c85b1f4363bcbc284420a248bc936b80ddef6d5e8
4
- data.tar.gz: 572b909d6d5bce44d908f70204e3d511cbfae1d5ed8574a99ffe0d7559f836b3
3
+ metadata.gz: e94ce0b14c283d62b106df80e56dc8340c4acc37970381cf95294d337d55ceda
4
+ data.tar.gz: 8e4fb54d9907493f94dfe4a3d73afcb396d8c101c309e8acacac6b527076be81
5
5
  SHA512:
6
- metadata.gz: dc68e292ca1d346e8313117d6f93ebe0587fdf61b735b445ec6b90bd6d4c5c4d2a0165f2a47d6bcf57cb046b7a8e1dee769120f0ff569fe613f3930afdad9dac
7
- data.tar.gz: 3f67fa10f1286a1eb9f5d5a1d56bc2ccb6dc5b6a9e7a9324b991e1f468f2fd41000081d1aa9f20a723ae17471b3e3a41463bc870e8e94dcd6e21bfb02fdcb0e6
6
+ metadata.gz: '0283ab12e7885d2462f24038648b5a326978a88465a92869fb8dcd617dde52789383d48a343c017050fc1c321ce72c40466a116b8d2bf3adc861fea2c4307e35'
7
+ data.tar.gz: e325e70dee2f236d77c0bb72e257a80b14900d26ed3347aa316e9b0e89fec7dbfa82e3fca010ae95959692114ccd58a0b7a8addc93fc573eac193ca5ea0c4ccc
@@ -8,20 +8,37 @@ class CodePipeline < Mapper
8
8
  #
9
9
  # list_pipelines
10
10
  #
11
- @client.list_pipelines.each_with_index do |response, page|
12
- log(response.context.operation_name, page)
11
+ begin
12
+ @client.list_pipelines.each_with_index do |response, page|
13
+ log(response.context.operation_name, page)
13
14
 
14
- # get_pipeline
15
- response.pipelines.each do |pipeline|
16
- resp = @client.get_pipeline(name: pipeline.name)
17
- struct = OpenStruct.new(resp.pipeline.to_h)
18
- struct.type = 'pipeline'
19
- struct.arn = resp.metadata.pipeline_arn
15
+ # get_pipeline
16
+ response.pipelines.each do |pipeline|
17
+ resp = @client.get_pipeline(name: pipeline.name)
18
+ struct = OpenStruct.new(resp.pipeline.to_h)
19
+ struct.type = 'pipeline'
20
+ struct.arn = resp.metadata.pipeline_arn
20
21
 
21
- resources.push(struct.to_h)
22
+ resources.push(struct.to_h)
23
+ end
24
+ end
25
+ rescue Aws::CodePipeline::Errors::ServiceError => e
26
+ log_error(e.code)
27
+
28
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
29
+ raise e
22
30
  end
23
31
  end
24
32
 
25
33
  resources
26
34
  end
35
+
36
+ private
37
+
38
+ # not an error
39
+ def suppressed_errors
40
+ %w[
41
+ AccessDeniedException
42
+ ]
43
+ end
27
44
  end
@@ -14,7 +14,7 @@ class DatabaseMigrationService < Mapper
14
14
  response.replication_instances.each do |instance|
15
15
  struct = OpenStruct.new(instance.to_h)
16
16
  struct.type = 'replication_instance'
17
- struct.arb = "arn:aws:#{@service}:#{@region}::replication_instance/#{instance.replication_instance_identifier}"
17
+ struct.arn = "arn:aws:#{@service}:#{@region}::replication_instance/#{instance.replication_instance_identifier}"
18
18
 
19
19
  resources.push(struct.to_h)
20
20
  end
@@ -29,6 +29,7 @@ class DynamoDB < Mapper
29
29
  struct = OpenStruct.new(@client.describe_table({ table_name: table_name }).table.to_h)
30
30
  struct.type = 'table'
31
31
  struct.arn = struct.table_arn
32
+ struct.continuous_backups_description = @client.describe_continuous_backups({ table_name: table_name }).continuous_backups_description.to_h
32
33
 
33
34
  resources.push(struct.to_h)
34
35
  end
@@ -19,7 +19,11 @@ class ECR < Mapper
19
19
  .get_repository_policy({ repository_name: repo.repository_name }).policy_text.parse_policy
20
20
 
21
21
  rescue Aws::ECR::Errors::ServiceError => e
22
- raise e unless suppressed_errors.include?(e.code)
22
+ log_error(e.code)
23
+
24
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
25
+ raise e
26
+ end
23
27
  ensure
24
28
  resources.push(struct.to_h)
25
29
  end
@@ -8,24 +8,24 @@ class ECS < Mapper
8
8
  resources = []
9
9
 
10
10
  #
11
- # describe_clusters
11
+ # list_clusters
12
12
  #
13
- @client.describe_clusters.each_with_index do |response, page|
13
+ @client.list_clusters.each_with_index do |response, page|
14
14
  log(response.context.operation_name, page)
15
15
 
16
- response.clusters.each do |cluster|
17
- struct = OpenStruct.new(cluster.to_h)
16
+ response.cluster_arns.each do |cluster|
17
+ struct = OpenStruct.new(@client.describe_clusters({ clusters: [cluster] }).clusters.first.to_h)
18
18
  struct.type = 'cluster'
19
- struct.arn = cluster.cluster_arn
19
+ struct.arn = cluster
20
20
  struct.tasks = []
21
21
 
22
22
  # list_tasks
23
- @client.list_tasks({ cluster: cluster.cluster_arn }).each_with_index do |response, page|
23
+ @client.list_tasks({ cluster: cluster }).each_with_index do |response, page|
24
24
  log(response.context.operation_name, 'list_tasks', page)
25
25
 
26
26
  # describe_tasks
27
27
  response.task_arns.each do |task_arn|
28
- @client.describe_tasks({ cluster: cluster.cluster_arn, tasks: [task_arn] }).tasks.each do |task|
28
+ @client.describe_tasks({ cluster: cluster, tasks: [task_arn] }).tasks.each do |task|
29
29
  struct.tasks.push(task)
30
30
  end
31
31
  end
@@ -0,0 +1,39 @@
1
+ class EMR < Mapper
2
+ #
3
+ # Returns an array of resources.
4
+ #
5
+ def collect
6
+ resources = []
7
+
8
+ #
9
+ # get_block_public_access_configuration
10
+ #
11
+ @client.get_block_public_access_configuration.each do |response|
12
+ log(response.context.operation_name)
13
+
14
+ struct = OpenStruct.new(response.block_public_access_configuration.to_h)
15
+ struct.type = 'configuration'
16
+
17
+ resources.push(struct.to_h)
18
+ end
19
+
20
+ #
21
+ # list_clusters
22
+ #
23
+ @client.list_clusters.each_with_index do |response, page|
24
+ log(response.context.operation_name, page)
25
+
26
+ response.clusters.each do |cluster|
27
+ log(response.context.operation_name, cluster.id)
28
+
29
+ struct = OpenStruct.new(@client.describe_cluster({ cluster_id: cluster.id }).cluster.to_h)
30
+ struct.type = 'cluster'
31
+ struct.arn = cluster.cluster_arn
32
+
33
+ resources.push(struct.to_h)
34
+ end
35
+ end
36
+
37
+ resources
38
+ end
39
+ end
@@ -89,14 +89,22 @@ class IAM < Mapper
89
89
  #
90
90
  # get_account_password_policy
91
91
  #
92
- @client.get_account_password_policy.each do |response|
93
- log(response.context.operation_name)
92
+ begin
93
+ @client.get_account_password_policy.each do |response|
94
+ log(response.context.operation_name)
94
95
 
95
- struct = OpenStruct.new(response.password_policy.to_h)
96
- struct.type = 'password_policy'
97
- struct.arn = "arn:aws:iam::#{@account}:account_password_policy/global"
96
+ struct = OpenStruct.new(response.password_policy.to_h)
97
+ struct.type = 'password_policy'
98
+ struct.arn = "arn:aws:iam::#{@account}:account_password_policy/global"
98
99
 
99
- resources.push(struct.to_h)
100
+ resources.push(struct.to_h)
101
+ end
102
+ rescue Aws::IAM::Errors::ServiceError => e
103
+ log_error(e.code)
104
+
105
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
106
+ raise e
107
+ end
100
108
  end
101
109
 
102
110
  #
@@ -178,7 +186,10 @@ class IAM < Mapper
178
186
  end
179
187
  rescue Aws::IAM::Errors::ServiceError => e
180
188
  log_error(e.code)
181
- raise e unless suppressed_errors.include?(e.code)
189
+
190
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
191
+ raise e
192
+ end
182
193
  end
183
194
 
184
195
  resources
@@ -190,6 +201,7 @@ class IAM < Mapper
190
201
  def suppressed_errors
191
202
  %w[
192
203
  ReportNotPresent
204
+ NoSuchEntity
193
205
  ]
194
206
  end
195
207
  end
@@ -29,7 +29,10 @@ class KMS < Mapper
29
29
  .key_rotation_enabled
30
30
  rescue Aws::KMS::Errors::ServiceError => e
31
31
  log_error(e.code)
32
- raise e unless suppressed_errors.include?(e.code)
32
+
33
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
34
+ raise e
35
+ end
33
36
  end
34
37
 
35
38
  # list_grants
@@ -34,18 +34,35 @@ class Organizations < Mapper
34
34
  #
35
35
  # list_policies
36
36
  #
37
- @client.list_policies({ filter: 'SERVICE_CONTROL_POLICY' }).each_with_index do |response, page|
38
- log(response.context.operation_name, page)
37
+ begin
38
+ @client.list_policies({ filter: 'SERVICE_CONTROL_POLICY' }).each_with_index do |response, page|
39
+ log(response.context.operation_name, page)
39
40
 
40
- response.policies.each do |policy|
41
- struct = OpenStruct.new(policy.to_h)
42
- struct.type = 'service_control_policy'
43
- struct.content = @client.describe_policy({ policy_id: policy.id }).policy.content.parse_policy
41
+ response.policies.each do |policy|
42
+ struct = OpenStruct.new(policy.to_h)
43
+ struct.type = 'service_control_policy'
44
+ struct.content = @client.describe_policy({ policy_id: policy.id }).policy.content.parse_policy
44
45
 
45
- resources.push(struct.to_h)
46
+ resources.push(struct.to_h)
47
+ end
48
+ end
49
+ rescue Aws::Organizations::Errors::ServiceError => e
50
+ log_error(e.code)
51
+
52
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
53
+ raise e
46
54
  end
47
55
  end
48
56
 
49
57
  resources
50
58
  end
59
+
60
+ private
61
+
62
+ # not an error
63
+ def suppressed_errors
64
+ %w[
65
+ AccessDeniedException
66
+ ]
67
+ end
51
68
  end
@@ -38,6 +38,8 @@ class RDS < Mapper
38
38
  struct.arn = instance.db_instance_arn
39
39
  struct.parent_id = instance.db_cluster_identifier
40
40
 
41
+ # TODO: describe_db_snapshots here (with public flag)
42
+
41
43
  resources.push(struct.to_h)
42
44
  end
43
45
  end
@@ -15,6 +15,7 @@ class Redshift < Mapper
15
15
  struct = OpenStruct.new(cluster.to_h)
16
16
  struct.type = 'cluster'
17
17
  struct.arn = cluster.cluster_identifier
18
+ struct.logging_status = @client.describe_logging_status({ cluster_identifier: cluster.cluster_identifier }).to_h
18
19
 
19
20
  resources.push(struct.to_h)
20
21
  end
@@ -61,7 +61,11 @@ class S3 < Mapper
61
61
  end
62
62
 
63
63
  rescue Aws::S3::Errors::ServiceError => e
64
- raise e unless suppressed_errors.include?(e.code)
64
+ log_error(e.code)
65
+
66
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
67
+ raise e
68
+ end
65
69
  end
66
70
 
67
71
  resources.push(struct.to_h)
@@ -8,16 +8,33 @@ class SecurityHub < Mapper
8
8
  #
9
9
  # describe_hub
10
10
  #
11
- @client.describe_hub.each do |response|
12
- log(response.context.operation_name)
11
+ begin
12
+ @client.describe_hub.each do |response|
13
+ log(response.context.operation_name)
13
14
 
14
- struct = OpenStruct.new(response.to_h)
15
- struct.type = 'hub'
16
- struct.arn = response.hub_arn
15
+ struct = OpenStruct.new(response.to_h)
16
+ struct.type = 'hub'
17
+ struct.arn = response.hub_arn
17
18
 
18
- resources.push(struct.to_h)
19
+ resources.push(struct.to_h)
20
+ end
21
+ rescue Aws::SecurityHub::Errors::ServiceError => e
22
+ log_error(e.code)
23
+
24
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
25
+ raise e
26
+ end
19
27
  end
20
28
 
21
29
  resources
22
30
  end
31
+
32
+ private
33
+
34
+ # not an error
35
+ def suppressed_errors
36
+ %w[
37
+ InvalidAccessException
38
+ ]
39
+ end
23
40
  end
@@ -27,7 +27,10 @@ class ServiceQuotas < Mapper
27
27
  end
28
28
  rescue Aws::ServiceQuotas::Errors::ServiceError => e
29
29
  log_error(e.code, service)
30
- raise e unless suppressed_errors.include?(e.code)
30
+
31
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
32
+ raise e
33
+ end
31
34
  end
32
35
 
33
36
  resources
@@ -51,7 +51,10 @@ class Shield < Mapper
51
51
  resources
52
52
  rescue Aws::Shield::Errors::ServiceError => e
53
53
  log_error(e.code)
54
- raise e unless suppressed_errors.include?(e.code)
54
+
55
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
56
+ raise e
57
+ end
55
58
 
56
59
  [] # no access or service isn't enabled
57
60
  end
@@ -18,7 +18,7 @@ class SQS < Mapper
18
18
  struct = OpenStruct.new(@client.get_queue_attributes({ queue_url: queue, attribute_names: ['All'] }).attributes.to_h)
19
19
  struct.type = 'queue'
20
20
  struct.arn = struct.QueueArn
21
- struct.policy = struct.delete_field('Policy').parse_policy
21
+ struct.policy = struct.Policy ? struct.delete_field('Policy').parse_policy : nil
22
22
 
23
23
  resources.push(struct.to_h)
24
24
  end
@@ -27,7 +27,10 @@ class Support < Mapper
27
27
  resources
28
28
  rescue Aws::Support::Errors::ServiceError => e
29
29
  log_error(e.code)
30
- raise e unless suppressed_errors.include?(e.code)
30
+
31
+ unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
32
+ raise e
33
+ end
31
34
 
32
35
  [] # no Support subscription
33
36
  end
@@ -37,6 +40,7 @@ class Support < Mapper
37
40
  # not an error
38
41
  def suppressed_errors
39
42
  %w[
43
+ AccessDeniedException
40
44
  SubscriptionRequiredException
41
45
  ]
42
46
  end
@@ -20,6 +20,7 @@ class Parser
20
20
  :skip_credential_report,
21
21
  :stream_output,
22
22
  :verbose,
23
+ :quit_on_exception,
23
24
  :debug
24
25
  )
25
26
 
@@ -47,6 +48,7 @@ class Parser
47
48
  false,
48
49
  false,
49
50
  false,
51
+ false,
50
52
  false
51
53
  )
52
54
 
@@ -135,6 +137,11 @@ class Parser
135
137
  args.verbose = true unless args.stream_output
136
138
  end
137
139
 
140
+ # re-raise exceptions
141
+ opts.on('-q', '--quit-on-exception', 'Stop collection if an API error is encountered (default: false)') do
142
+ args.quit_on_exception = true
143
+ end
144
+
138
145
  # debug
139
146
  opts.on('-d', '--debug', 'Output debug with wire trace info') do
140
147
  unless args.stream_output
@@ -12,8 +12,13 @@
12
12
  alias: config
13
13
  - name: CodeBuild
14
14
  alias: codebuild
15
+ excluded_regions:
16
+ - af-south-1
15
17
  - name: CodePipeline
16
18
  alias: codepipeline
19
+ excluded_regions:
20
+ - af-south-1
21
+ - me-south-1
17
22
  - name: AutoScaling
18
23
  alias: autoscaling
19
24
  - name: CloudTrail
@@ -39,6 +44,13 @@
39
44
  - ap-southeast-1
40
45
  - name: ElastiCache
41
46
  alias: elasticache
47
+ - name: EMR
48
+ alias: emr
49
+ excluded_regions:
50
+ - ap-east-1
51
+ - af-south-1
52
+ - eu-south-1
53
+ - me-south-1
42
54
  - name: IAM
43
55
  global: true
44
56
  alias: iam
@@ -87,12 +99,17 @@
87
99
  - eu-north-1
88
100
  - eu-west-3
89
101
  - us-west-1
102
+ - ap-east-1
103
+ - af-south-1
104
+ - eu-south-1
90
105
  - name: CloudWatch
91
106
  alias: cloudwatch
92
107
  - name: CloudWatchLogs
93
108
  alias: cloudwatchlogs
94
109
  - name: Kafka
95
110
  alias: kafka
111
+ excluded_regions:
112
+ - af-south-1
96
113
  - name: SecretsManager
97
114
  alias: sm
98
115
  - name: SecurityHub
@@ -118,6 +135,10 @@
118
135
  - eu-north-1
119
136
  - us-west-1
120
137
  - sa-east-1
138
+ - ap-east-1
139
+ - af-south-1
140
+ - eu-south-1
141
+ - me-south-1
121
142
  - name: WorkSpaces
122
143
  alias: workspaces
123
144
  excluded_regions:
@@ -126,12 +147,21 @@
126
147
  - eu-west-3
127
148
  - us-east-2
128
149
  - us-west-1
150
+ - ap-east-1
151
+ - af-south-1
152
+ - eu-south-1
153
+ - me-south-1
129
154
  - name: SageMaker
130
155
  alias: sagemaker
131
156
  - name: ServiceQuotas
132
157
  alias: servicequotas
133
158
  - name: Transfer
134
159
  alias: transfer
160
+ excluded_regions:
161
+ - ap-east-1
162
+ - af-south-1
163
+ - eu-south-1
164
+ - me-south-1
135
165
  - name: DirectConnect
136
166
  alias: dc
137
167
  - name: DirectoryService
@@ -1,3 +1,3 @@
1
1
  module AwsRecon
2
- VERSION = "0.2.21"
2
+ VERSION = "0.2.26"
3
3
  end
data/readme.md CHANGED
@@ -7,9 +7,9 @@ A multi-threaded AWS inventory collection tool.
7
7
 
8
8
  The [creators](https://darkbit.io) of this tool have a recurring need to be able to efficiently collect a large amount of AWS resource attributes and metadata to help clients understand their cloud security posture.
9
9
 
10
- There are a handful of tools (e.g. [AWS Config](https://aws.amazon.com/config), [CloudMapper](https://github.com/duo-labs/cloudmapper), [CloudSploit](https://github.com/cloudsploit/scans), [Prowler](https://github.com/toniblyx/prowler)) that do some form of resource collection to support other functions. But we found we needed broader coverage and more details at a per-service level. We also needed a consistent and structured format that allowed for integration with our other systems and tooling.
10
+ Existing tools (e.g. [AWS Config](https://aws.amazon.com/config)) that do some form of resource collection lack the coverage and specificity we needed. We also needed a tool that produced consistent output that was easily consumed by other tools/systems.
11
11
 
12
- Enter AWS Recon, multi-threaded AWS inventory collection tool written in plain Ruby. Though most AWS tooling tends to be dominated by Python, the [Ruby SDK](https://aws.amazon.com/sdk-for-ruby/) is quite mature and capable. The maintainers of the Ruby SDK have done a fantastic job making it easy to handle automatic retries, paging of large responses, and threading huge numbers of requests.
12
+ Enter AWS Recon, multi-threaded AWS inventory collection tool written in plain Ruby. Though Python tends to dominate the AWS tooling landscape, the [Ruby SDK](https://aws.amazon.com/sdk-for-ruby/) has a few convenient advantages over the [other](https://aws.amazon.com/sdk-for-node-js/) [AWS](https://aws.amazon.com/sdk-for-python/) [SDKs](https://aws.amazon.com/sdk-for-go/) we tested. Specifically, easy handling of automatic retries, paging of large responses, and - with some help - threading huge numbers of requests.
13
13
 
14
14
  ## Project Goals
15
15
 
@@ -23,24 +23,44 @@ Enter AWS Recon, multi-threaded AWS inventory collection tool written in plain R
23
23
 
24
24
  ### Requirements
25
25
 
26
- Ruby 2.5.x or 2.6.x (developed and tested with 2.6.5)
26
+ AWS Recon needs AWS account role or credentials with `ReadOnlyAccess`. Full `AdministratorAccess` is over-privileged, but will work as well. The `SecurityAudit` policy is **not** sufficient as it omits access to many services.
27
+
28
+ #### Running via Docker
29
+
30
+ Use Docker version 19.x or above to run the pre-built image without having to install anything.
31
+
32
+ #### Running locally via Ruby
33
+
34
+ If you already have Ruby installed (2.5.x or 2.6.x), you may want to install the Ruby gem.
27
35
 
28
36
  ### Installation
29
37
 
30
- AWS Recon can be run locally by installing the Ruby gem, or via a Docker container.
38
+ AWS Recon can be run locally via a Docker container or by installing the Ruby gem.
39
+
40
+ To run via a Docker a container, pass the necessary AWS credentials into the Docker `run` command. For example:
41
+
42
+ ```
43
+ $ docker run -t --rm \
44
+ -e AWS_REGION \
45
+ -e AWS_ACCESS_KEY_ID \
46
+ -e AWS_SECRET_ACCESS_KEY \
47
+ -e AWS_SESSION_TOKEN \
48
+ -v $(pwd)/output.json:/recon/output.json \
49
+ darkbitio/aws_recon:latest \
50
+ aws_recon -v -s EC2 -r global,us-east-1,us-east-2
51
+ ```
31
52
 
32
53
  To run locally, first install the gem:
33
54
 
34
55
  ```
35
56
  $ gem install aws_recon
36
- Fetching aws_recon-0.2.8.gem
37
- Fetching aws-sdk-resources-3.76.0.gem
57
+ Fetching aws_recon-0.2.26.gem
38
58
  Fetching aws-sdk-3.0.1.gem
39
- Fetching parallel-1.19.2.gem
59
+ Fetching parallel-1.20.1.gem
40
60
  ...
41
61
  Successfully installed aws-sdk-3.0.1
42
- Successfully installed parallel-1.19.2
43
- Successfully installed aws_recon-0.2.8
62
+ Successfully installed parallel-1.20.1
63
+ Successfully installed aws_recon-0.2.26
44
64
  ```
45
65
 
46
66
  Or add it to your Gemfile using `bundle`:
@@ -51,27 +71,13 @@ Fetching gem metadata from https://rubygems.org/
51
71
  Resolving dependencies...
52
72
  ...
53
73
  Using aws-sdk 3.0.1
54
- Using parallel 1.19.2
55
- Using aws_recon 0.2.8
56
- ```
57
-
58
- To run via a Docker a container, pass the necessary AWS credentials into the Docker `run` command. For example:
59
-
60
- ```
61
- $ docker run -t --rm \
62
- -e AWS_REGION \
63
- -e AWS_ACCESS_KEY_ID \
64
- -e AWS_SECRET_ACCESS_KEY \
65
- -e AWS_SESSION_TOKEN \
66
- -v $(pwd)/output.json:/recon/output.json \
67
- darkbitio/aws_recon:latest \
68
- aws_recon -v -s EC2 -r global,us-east-1,us-east-2
74
+ Using parallel-1.20.1
75
+ Using aws_recon 0.2.26
69
76
  ```
70
77
 
71
-
72
78
  ## Usage
73
79
 
74
- AWS Recon will leverage any AWS credentials currently available to the environment it runs in. If you are collecting from multiple accounts, you may want to leverage something like [aws-vault](https://github.com/99designs/aws-vault) to manage different credentials.
80
+ AWS Recon will leverage any AWS credentials (see [requirements](#requirements)) currently available to the environment it runs in. If you are collecting from multiple accounts, you may want to leverage something like [aws-vault](https://github.com/99designs/aws-vault) to manage different credentials.
75
81
 
76
82
  ```
77
83
  $ aws-vault exec profile -- aws_recon
@@ -95,7 +101,7 @@ $ aws-vault exec <vault_profile> -- docker run -t --rm \
95
101
  aws_recon -j -s EC2 -r global,us-east-1,us-east-2
96
102
  ```
97
103
 
98
- To run from a Docker container using `aws-vault` managed credentials and output to a file, you will need to satisfy a couple of requirements. First, Docker needs access to bind mount the path you specify (or a parent path above). Second, you need to create an empty file to save the output into (e.g. `output.json`). This is because we are only mounting that one file into the Docker container at run time. For example:
104
+ To run from a Docker container using `aws-vault` managed credentials and output to a file, you will need to satisfy a couple of requirements. First, Docker needs access to bind mount the path you specify (or a parent path above). Second, you need to create an empty file to save the output into (e.g. `output.json`). This is because only that one file is mounted into the Docker container at run time. For example:
99
105
 
100
106
  Create an empty file.
101
107
 
@@ -167,14 +173,33 @@ $ AWS_PROFILE=<profile> aws_recon -s S3,EC2 -r global,us-east-1,us-east-2 -f cus
167
173
 
168
174
  #### Errors
169
175
 
170
- An exception will be raised on `AccessDeniedException` errors. This typically means your user/role doesn't have the necessary permissions to get/list/describe for that service. These exceptions are raised so troubleshooting access issues is easier.
176
+ API exceptions related to permissions are silently ignored in most cases. These errors are usually due to one of these cases:
177
+
178
+ - using a role without sufficient permissions
179
+ - querying an account with SCPs in place that prevent usage of certain services
180
+ - trying to query a service that isn't enabled/available in your region/account
181
+
182
+ In `verbose` mode, you will see exception logs in the output:
183
+
184
+ ```
185
+ t2.us-east-1.EC2.describe_subnets.0
186
+ t4.us-east-1.SSM.describe_instance_information.0
187
+ t6.us-east-1.SecurityHub.InvalidAccessException <-----
188
+ t2.us-east-1.EC2.describe_addresses.0
189
+ t4.us-east-1.SSM.describe_parameters.0
190
+ t1.us-east-1.GuardDuty.list_detectors.0
191
+ ```
192
+
193
+ Use the `-q` command line option to re-raise these exceptions so troubleshooting access issues is easier.
171
194
 
172
195
  ```
173
196
  Traceback (most recent call last):
174
- arn:aws:sts::1234567890:assumed-role/role/9876543210 is not authorized to perform: codepipeline:GetPipeline on resource: arn:aws:codepipeline:us-west-2:876543210123:pipeline (Aws::CodePipeline::Errors::AccessDeniedException)
197
+ arn:aws:sts::1234567890:assumed-role/role/my-audit-role is not authorized to perform:
198
+ codepipeline:GetPipeline on resource: arn:aws:codepipeline:us-west-2:1234567890:pipeline
199
+ (Aws::CodePipeline::Errors::AccessDeniedException)
175
200
  ```
176
201
 
177
- The exact API operation that triggered the exception is indicated on the last line of the stack trace. If you can't resolve the necessary access, you should exclude those services with `-x` or `--not-services` so the collection can continue.
202
+ The exact API operation that triggered the exception is indicated on the last line of the stack trace. If you can't resolve the necessary access, you should exclude those services with `-x` or `--not-services`, or leave off the `-q` option so the collection can continue.
178
203
 
179
204
  ### Threads
180
205
 
@@ -184,14 +209,20 @@ For global services like IAM, Shield, and Support, requests are not multi-thread
184
209
 
185
210
  For regional services, a thread (up to the thread limit) is spawned for each service in a region. By default, up to 8 threads will be used. If your account has resources spread across many regions, you may see a speed improvement by increasing threads with `-t X`, where `X` is the number of threads.
186
211
 
212
+ ### Performance
213
+
214
+ AWS Recon will make a minimum of ~2,000 API calls in a new/empty account, just to query the supported services in all 20 standard (non-GovCloud, non-China) regions. It is very likely to encounter API rate-limiting (throttling) on large accounts if you enable more threads than the default (8).
215
+
216
+ Recon will automatically backoff and respect the retry limits in the API response. If you observe long pauses during collection, this is likely what is happening. Retry collection with the `-d` or `--debug` option to observe the wire trace and see if you're being throttled. Consider using fewer threads or requesting higher rate limits from AWS if you are regularly getting rate-limited.
217
+
187
218
  ### Options
188
219
 
189
- Most users will want to limit collection to relevant services and regions. Running without any options will attempt to collect all resources from all 16 regular regions.
220
+ Most users will want to limit collection to relevant services and regions. Running without any exclusions will attempt to collect all resources from all regions enabled for the account.
190
221
 
191
222
  ```
192
223
  $ aws_recon -h
193
224
 
194
- AWS Recon - AWS Inventory Collector (0.2.8)
225
+ AWS Recon - AWS Inventory Collector (0.2.26)
195
226
 
196
227
  Usage: aws_recon [options]
197
228
  -r, --regions [REGIONS] Regions to scan, separated by comma (default: all)
@@ -204,8 +235,10 @@ Usage: aws_recon [options]
204
235
  -t, --threads [THREADS] Specify max threads (default: 8, max: 128)
205
236
  -u, --user-data Collect EC2 instance user data (default: false)
206
237
  -z, --skip-slow Skip slow operations (default: false)
238
+ -g, --skip-credential-report Skip generating IAM credential report (default: false)
207
239
  -j, --stream-output Stream JSON lines to stdout (default: false)
208
240
  -v, --verbose Output client progress and current operation
241
+ -q, --quit-on-exception Stop collection if an API error is encountered (default: false)
209
242
  -d, --debug Output debug with wire trace info
210
243
  -h, --help Print this help information
211
244
 
@@ -215,6 +248,20 @@ Usage: aws_recon [options]
215
248
 
216
249
  Output is always some form of JSON - either JSON lines or plain JSON. The output is either written to a file (the default), or written to stdout (with `-j`).
217
250
 
251
+ ## Support for Manually Enabled Regions
252
+
253
+ If you have enabled **manually enabled regions**:
254
+
255
+ - me-south-1 - Middle East (Bahrain)
256
+ - af-south-1 - Africa (Cape Town)
257
+ - ap-east-1 - Asia Pacific (Hong Kong)
258
+ - eu-south-1 - Europe (Milan)
259
+
260
+ and you are using STS to assume a role into an account, you will need to [enable v2 STS tokens](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) in the account you are assuming the role **from** to be able to run AWS Recon against those regions.
261
+
262
+ > Version 1 tokens are valid only in AWS Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens.
263
+
264
+ If you are using a static access key/secret, you can collect from these regions regardless of STS token version.
218
265
 
219
266
  ## Supported Services & Resources
220
267
 
@@ -250,8 +297,9 @@ AWS Recon aims to collect all resources and metadata that are relevant in determ
250
297
  - [x] ECR
251
298
  - [x] ECS
252
299
  - [x] EFS
253
- - [x] ELB
254
300
  - [x] EKS
301
+ - [x] ELB
302
+ - [x] EMR
255
303
  - [x] Elasticsearch
256
304
  - [x] ElastiCache
257
305
  - [x] Firehose
@@ -309,11 +357,8 @@ To install this gem onto your local machine, run `bundle exec rake install`. To
309
357
 
310
358
  ### TODO
311
359
 
312
- - [ ] Optionally suppress AWS API errors instead of re-raising them
313
- - [x] Package as a gem
314
360
  - [ ] Test coverage with AWS SDK stubbed resources
315
361
 
316
-
317
362
  ## Kudos
318
363
 
319
364
  AWS Recon was inspired by the excellent work of the people and teams behind these tools:
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws_recon
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.21
4
+ version: 0.2.26
5
5
  platform: ruby
6
6
  authors:
7
7
  - Josh Larsen
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2020-11-27 00:00:00.000000000 Z
12
+ date: 2020-12-15 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: aws-sdk
@@ -209,6 +209,7 @@ files:
209
209
  - lib/aws_recon/collectors/elasticloadbalancing.rb
210
210
  - lib/aws_recon/collectors/elasticloadbalancingv2.rb
211
211
  - lib/aws_recon/collectors/elasticsearch.rb
212
+ - lib/aws_recon/collectors/emr.rb
212
213
  - lib/aws_recon/collectors/firehose.rb
213
214
  - lib/aws_recon/collectors/guardduty.rb
214
215
  - lib/aws_recon/collectors/iam.rb