aws_recon 0.3.4 → 0.4.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 24978de4c7cf85dc8ed7f4a177caca6f1746ed6de3a877d921c3169817fa036d
4
- data.tar.gz: f60ebd2d5e072656dfac5e2514edffb95b4bc059f66ed9d540608abd6cad9322
3
+ metadata.gz: 80f767ac960548775c701684868aa2a58a2318e3c442a3c751feab370a40cee6
4
+ data.tar.gz: 413c1365dc007b9bc6917c7ee1ccd708305b70080d45caae662077d9859834e8
5
5
  SHA512:
6
- metadata.gz: 07fb3311cae4325997aa6b66bf9dd150a8d5a4eccff8a444dceb5a22b6cbdddd597025082afc7c02e96e889e988e23f1939d1ef2bff5772c5b51975a4bc32fdf
7
- data.tar.gz: d09fb6a88b9e0f8f6565a49c66c302e9725817295eeb2556f292423fda0c66a6fa5975a74444f6c6597ce6703cfe84b4aaf02c62a10e3854f20f73b70b740ee0
6
+ metadata.gz: '094fbf6a631b68c70eaae7982755b800a407f5efcf6a1ea3a0e64ec6221669a51ae78d2c386b61b4a36c54a90396c5312e759bb49783f28affe4034ef15709c2'
7
+ data.tar.gz: a40c2417c1cc26c6d4ddedfe860c7c10bec38dbd6c1a55f5d1e0da23a7fc1fef2f083778b8ae8048ff4cdd84ed5bdbf74e6beeee68e96618727561aa6603c8f5
data/.gitignore CHANGED
@@ -12,3 +12,5 @@ Gemfile.lock
12
12
  /pkg/
13
13
  /spec/reports/
14
14
  /tmp/
15
+ .terraform*
16
+ terraform.tfstate*
data/.rubocop.yml CHANGED
@@ -12,7 +12,6 @@ Layout/LineLength:
12
12
  Max: 100
13
13
  Style/FrozenStringLiteralComment:
14
14
  EnforcedStyle: always_true
15
- Safe: true
16
15
  SafeAutoCorrect: true
17
16
  Style/ClassAndModuleChildren:
18
17
  Enabled: false
data/aws_recon.gemspec CHANGED
@@ -9,7 +9,7 @@ Gem::Specification.new do |spec|
9
9
  spec.version = AwsRecon::VERSION
10
10
  spec.authors = ['Josh Larsen', 'Darkbit']
11
11
  spec.required_ruby_version = '>= 2.5.0'
12
- spec.summary = 'A multi-threaded AWS inventory collection cli tool.'
12
+ spec.summary = 'A multi-threaded AWS security-focused inventory collection tool.'
13
13
  spec.description = 'AWS Recon is a command line tool to collect resources from an Amazon Web Services (AWS) account. The tool outputs JSON suitable for processing with other tools.'
14
14
  spec.homepage = 'https://github.com/darkbitio/aws-recon'
15
15
  spec.license = 'MIT'
@@ -34,9 +34,9 @@ module AwsRecon
34
34
  # formatter
35
35
  @formatter = Formatter.new
36
36
 
37
- unless @options.stream_output
38
- puts "\nStarting collection with #{@options.threads} threads...\n"
39
- end
37
+ return unless @options.stream_output
38
+
39
+ puts "\nStarting collection with #{@options.threads} threads...\n"
40
40
  end
41
41
 
42
42
  #
@@ -72,7 +72,7 @@ module AwsRecon
72
72
  #
73
73
  # global services
74
74
  #
75
- @aws_services.map { |x| OpenStruct.new(x) }.filter { |s| s.global }.each do |service|
75
+ @aws_services.map { |x| OpenStruct.new(x) }.filter(&:global).each do |service|
76
76
  # user included this service in the args
77
77
  next unless @services.include?(service.name)
78
78
 
@@ -102,16 +102,47 @@ module AwsRecon
102
102
  rescue Interrupt # ctrl-c
103
103
  elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - @starting
104
104
 
105
- puts "\nStopped early after \x1b[32m#{elapsed.to_i}\x1b[0m seconds.\n"
105
+ puts "\nStopped early after #{elapsed.to_i} seconds.\n"
106
106
  ensure
107
- # write output file
108
- if @options.output_file
109
- elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - @starting
107
+ elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - @starting
110
108
 
111
- puts "\nFinished in \x1b[32m#{elapsed.to_i}\x1b[0m seconds. Saving resources to \x1b[32m#{@options.output_file}\x1b[0m.\n\n"
109
+ puts "\nFinished in #{elapsed.to_i} seconds.\n\n"
110
+
111
+ # write output file
112
+ if @options.output_file && !@options.s3
113
+ puts "Saving resources to #{@options.output_file}.\n\n"
112
114
 
113
115
  File.write(@options.output_file, @resources.to_json)
114
116
  end
117
+
118
+ # write output file to S3 bucket
119
+ if @options.s3
120
+ t = Time.now.utc
121
+
122
+ s3_full_object_path = "AWSRecon/#{t.year}/#{t.month}/#{t.day}/#{@account_id}_aws_recon_#{t.to_i}.json.gz"
123
+
124
+ begin
125
+ # get bucket name (and region if not us-east-1)
126
+ s3_bucket, s3_region = @options.s3.split(':')
127
+
128
+ # build IO object and gzip it
129
+ io = StringIO.new
130
+ gzip_data = Zlib::GzipWriter.new(io)
131
+ gzip_data.write(@resources.to_json)
132
+ gzip_data.close
133
+
134
+ # send it to S3
135
+ s3_client = Aws::S3::Client.new(region: s3_region || 'us-east-1')
136
+ s3_resource = Aws::S3::Resource.new(client: s3_client)
137
+ obj = s3_resource.bucket(s3_bucket).object(s3_full_object_path)
138
+ obj.put(body: io.string)
139
+
140
+ puts "Saving resources to S3 s3://#{s3_bucket}/#{s3_full_object_path}\n\n"
141
+ rescue Aws::S3::Errors::ServiceError => e
142
+ puts "Error! - could not save output S3 bucket\n\n"
143
+ puts "#{e.message} - #{e.code}\n"
144
+ end
145
+ end
115
146
  end
116
147
  end
117
148
  end
@@ -1,2 +1,4 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # require all collectors
2
- Dir[File.join(__dir__, 'collectors', '*.rb')].each { |file| require file }
4
+ Dir[File.join(__dir__, 'collectors', '*.rb')].sort.each { |file| require file }
@@ -19,7 +19,7 @@ class Route53 < Mapper
19
19
  response.hosted_zones.each do |zone|
20
20
  struct = OpenStruct.new(zone.to_h)
21
21
  struct.type = 'zone'
22
- struct.arn = "aws:route53:#{@region}:#{@account}:zone/#{zone.name}"
22
+ struct.arn = "arn:aws:route53:#{@region}:#{@account}:zone/#{zone.name}"
23
23
  struct.logging_config = @client
24
24
  .list_query_logging_configs({ hosted_zone_id: zone.id })
25
25
  .query_logging_configs.first.to_h
@@ -48,6 +48,7 @@ class S3 < Mapper
48
48
  { func: 'get_bucket_policy', key: 'policy', field: 'policy' },
49
49
  { func: 'get_bucket_policy_status', key: 'public', field: 'policy_status' },
50
50
  { func: 'get_public_access_block', key: 'public_access_block', field: 'public_access_block_configuration' },
51
+ { func: 'get_object_lock_configuration', key: 'object_lock_configuration', field: 'object_lock_configuration' },
51
52
  { func: 'get_bucket_tagging', key: 'tagging', field: nil },
52
53
  { func: 'get_bucket_logging', key: 'logging', field: 'logging_enabled' },
53
54
  { func: 'get_bucket_versioning', key: 'versioning', field: nil },
@@ -66,12 +67,15 @@ class S3 < Mapper
66
67
  end
67
68
 
68
69
  rescue Aws::S3::Errors::ServiceError => e
69
- log_error(e.code)
70
+ log_error(bucket.name, op.func, e.code)
70
71
 
71
72
  raise e unless suppressed_errors.include?(e.code) && !@options.quit_on_exception
72
73
  end
73
74
 
74
75
  resources.push(struct.to_h)
76
+
77
+ rescue Aws::S3::Errors::NoSuchBucket
78
+ # skip missing bucket
75
79
  end
76
80
  end
77
81
 
@@ -90,6 +94,7 @@ class S3 < Mapper
90
94
  NoSuchWebsiteConfiguration
91
95
  ReplicationConfigurationNotFoundError
92
96
  NoSuchPublicAccessBlockConfiguration
97
+ ObjectLockConfigurationNotFoundError
93
98
  ]
94
99
  end
95
100
  end
@@ -68,12 +68,12 @@ class Mapper
68
68
  def log(*msg)
69
69
  return unless @options.verbose
70
70
 
71
- puts _msg(msg).map { |x| "\x1b[32m#{x}\x1b[0m" }.join("\x1b[35m.\x1b[0m")
71
+ puts _msg(msg).map(&:to_s).join('.')
72
72
  end
73
73
 
74
74
  def log_error(*msg)
75
75
  return unless @options.verbose
76
76
 
77
- puts _msg(msg).map { |x| "\x1b[35m#{x}\x1b[0m" }.join("\x1b[32m.\x1b[0m")
77
+ puts _msg(msg).map(&:to_s).join('.')
78
78
  end
79
79
  end
@@ -6,6 +6,7 @@
6
6
  class Parser
7
7
  DEFAULT_CONFIG_FILE = nil
8
8
  DEFAULT_OUTPUT_FILE = File.expand_path(File.join(Dir.pwd, 'output.json')).freeze
9
+ DEFAULT_S3_PATH = nil
9
10
  SERVICES_CONFIG_FILE = File.join(File.dirname(__FILE__), 'services.yaml').freeze
10
11
  DEFAULT_FORMAT = 'aws'
11
12
  DEFAULT_THREADS = 8
@@ -15,6 +16,7 @@ class Parser
15
16
  :regions,
16
17
  :services,
17
18
  :config_file,
19
+ :s3,
18
20
  :output_file,
19
21
  :output_format,
20
22
  :threads,
@@ -43,6 +45,7 @@ class Parser
43
45
  aws_regions,
44
46
  aws_services.map { |service| service[:name] },
45
47
  DEFAULT_CONFIG_FILE,
48
+ DEFAULT_S3_PATH,
46
49
  DEFAULT_OUTPUT_FILE,
47
50
  DEFAULT_FORMAT,
48
51
  DEFAULT_THREADS,
@@ -93,6 +96,11 @@ class Parser
93
96
  args.config_file = config
94
97
  end
95
98
 
99
+ # write output file to S3 bucket
100
+ opts.on('-b', '--s3-bucket [BUCKET:REGION]', 'Write output file to S3 bucket (default: \'\')') do |bucket_with_region|
101
+ args.s3 = bucket_with_region
102
+ end
103
+
96
104
  # output file
97
105
  opts.on('-o', '--output [OUTPUT]', 'Specify output file (default: output.json)') do |output|
98
106
  args.output_file = File.expand_path(File.join(Dir.pwd, output))
@@ -1,3 +1,3 @@
1
1
  module AwsRecon
2
- VERSION = "0.3.4"
2
+ VERSION = "0.4.3"
3
3
  end
data/readme.md CHANGED
@@ -3,19 +3,19 @@
3
3
 
4
4
  # AWS Recon
5
5
 
6
- A multi-threaded AWS inventory collection tool.
6
+ A multi-threaded AWS security-focused inventory collection tool written in Ruby.
7
7
 
8
- The [creators](https://darkbit.io) of this tool have a recurring need to be able to efficiently collect a large amount of AWS resource attributes and metadata to help clients understand their cloud security posture.
8
+ This tool was created to facilitate efficient collection of a large amount of AWS resource attributes and metadata. It aims to collect nearly everything that is relevant to the security configuration and posture of an AWS environment.
9
9
 
10
- Existing tools (e.g. [AWS Config](https://aws.amazon.com/config)) that do some form of resource collection lack the coverage and specificity we needed. We also needed a tool that produced consistent output that was easily consumed by other tools/systems.
10
+ Existing tools (e.g. [AWS Config](https://aws.amazon.com/config)) that do some form of resource collection lack the coverage and specificity to accurately measure security posture (e.g. detailed resource attribute data, fully parsed policy documents, and nested resource relationships).
11
11
 
12
- Enter AWS Recon, multi-threaded AWS inventory collection tool written in plain Ruby. Though Python tends to dominate the AWS tooling landscape, the [Ruby SDK](https://aws.amazon.com/sdk-for-ruby/) has a few convenient advantages over the [other](https://aws.amazon.com/sdk-for-node-js/) [AWS](https://aws.amazon.com/sdk-for-python/) [SDKs](https://aws.amazon.com/sdk-for-go/) we tested. Specifically, easy handling of automatic retries, paging of large responses, and - with some help - threading huge numbers of requests.
12
+ AWS Recon handles collection from large accounts by taking advantage of automatic retries (either due to network reliability or API throttling), automatic paging of large responses (> 100 resources per API call), and multi-threading parallel requests to speed up collection.
13
13
 
14
14
  ## Project Goals
15
15
 
16
16
  - More complete resource coverage than available tools (especially for ECS & EKS)
17
17
  - More granular resource detail, including nested related resources in the output
18
- - Flexible output (console, JSON lines, plain JSON, file, standard out)
18
+ - Flexible output (console, JSON lines, plain JSON, file, S3 bucket, and standard out)
19
19
  - Efficient (multi-threaded, rate limited, automatic retries, and automatic result paging)
20
20
  - Easy to maintain and extend
21
21
 
@@ -31,7 +31,7 @@ Use Docker version 19.x or above to run the pre-built image without having to in
31
31
 
32
32
  #### Running locally via Ruby
33
33
 
34
- If you already have Ruby installed (2.5.x or 2.6.x), you may want to install the Ruby gem.
34
+ If you already have Ruby installed (2.6.x or 2.7.x), you may want to install the Ruby gem.
35
35
 
36
36
  ### Installation
37
37
 
@@ -54,13 +54,13 @@ To run locally, first install the gem:
54
54
 
55
55
  ```
56
56
  $ gem install aws_recon
57
- Fetching aws_recon-0.3.0.gem
57
+ Fetching aws_recon-0.4.0.gem
58
58
  Fetching aws-sdk-3.0.1.gem
59
59
  Fetching parallel-1.20.1.gem
60
60
  ...
61
61
  Successfully installed aws-sdk-3.0.1
62
62
  Successfully installed parallel-1.20.1
63
- Successfully installed aws_recon-0.3.0
63
+ Successfully installed aws_recon-0.4.0
64
64
  ```
65
65
 
66
66
  Or add it to your Gemfile using `bundle`:
@@ -72,7 +72,7 @@ Resolving dependencies...
72
72
  ...
73
73
  Using aws-sdk 3.0.1
74
74
  Using parallel-1.20.1
75
- Using aws_recon 0.3.0
75
+ Using aws_recon 0.4.0
76
76
  ```
77
77
 
78
78
  ## Usage
@@ -158,13 +158,37 @@ Finished in 46 seconds. Saving resources to output.json.
158
158
  #### Example command line options
159
159
 
160
160
  ```
161
+ # collect S3 and EC2 global resources, as well as us-east-1 and us-east-2
162
+
161
163
  $ AWS_PROFILE=<profile> aws_recon -s S3,EC2 -r global,us-east-1,us-east-2
162
164
  ```
163
165
 
164
166
  ```
167
+ # collect S3 and EC2 global resources, as well as us-east-1 and us-east-2
168
+
165
169
  $ AWS_PROFILE=<profile> aws_recon --services S3,EC2 --regions global,us-east-1,us-east-2
166
170
  ```
167
171
 
172
+ ```
173
+ # save output to S3 bucket
174
+
175
+ $ AWS_PROFILE=<profile> aws_recon \
176
+ --services S3,EC2 \
177
+ --regions global,us-east-1,us-east-2 \
178
+ --verbose \
179
+ --s3-bucket my-recon-bucket
180
+ ```
181
+
182
+ ```
183
+ # save output to S3 bucket with a home region other than us-east-1
184
+
185
+ $ AWS_PROFILE=<profile> aws_recon \
186
+ --services S3,EC2 \
187
+ --regions global,us-east-1,us-east-2 \
188
+ --verbose \
189
+ --s3-bucket my-recon-bucket:us-west-2
190
+ ```
191
+
168
192
  Example [OpenCSPM](https://github.com/OpenCSPM/opencspm) formatted (NDJSON) output.
169
193
 
170
194
  ```
@@ -225,7 +249,7 @@ Most users will want to limit collection to relevant services and regions. Runni
225
249
  ```
226
250
  $ aws_recon -h
227
251
 
228
- AWS Recon - AWS Inventory Collector (0.3.0)
252
+ AWS Recon - AWS Inventory Collector (0.4.0)
229
253
 
230
254
  Usage: aws_recon [options]
231
255
  -r, --regions [REGIONS] Regions to scan, separated by comma (default: all)
@@ -233,6 +257,7 @@ Usage: aws_recon [options]
233
257
  -s, --services [SERVICES] Services to scan, separated by comma (default: all)
234
258
  -x, --not-services [SERVICES] Services to skip, separated by comma (default: none)
235
259
  -c, --config [CONFIG] Specify config file for services & regions (e.g. config.yaml)
260
+ -b, --s3-bucket [BUCKET:REGION] Write output file to S3 bucket (default: '')
236
261
  -o, --output [OUTPUT] Specify output file (default: output.json)
237
262
  -f, --format [FORMAT] Specify output format (default: aws)
238
263
  -t, --threads [THREADS] Specify max threads (default: 8, max: 128)
@@ -251,6 +276,8 @@ Usage: aws_recon [options]
251
276
 
252
277
  Output is always some form of JSON - either JSON lines or plain JSON. The output is either written to a file (the default), or written to stdout (with `-j`).
253
278
 
279
+ When writing to an S3 bucket, the JSON output is automatically compressed with `gzip`.
280
+
254
281
  ## Support for Manually Enabled Regions
255
282
 
256
283
  If you have enabled **manually enabled regions**:
@@ -351,7 +378,7 @@ $ cd aws-recon
351
378
  Create a sticky gemset if using RVM:
352
379
 
353
380
  ```
354
- $ rvm use 2.6.5@aws_recon_dev --create --ruby-version
381
+ $ rvm use 2.7.2@aws_recon_dev --create --ruby-version
355
382
  ```
356
383
 
357
384
  Run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
@@ -0,0 +1,26 @@
1
+ # https://www.terraform.io/docs/providers/aws/r/cloudwatch_event_rule.html
2
+ resource "aws_cloudwatch_event_rule" "default" {
3
+ name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
4
+ description = "AWS Recon scheduled task"
5
+ schedule_expression = var.schedule_expression
6
+ }
7
+
8
+ # https://www.terraform.io/docs/providers/aws/r/cloudwatch_event_target.html
9
+ resource "aws_cloudwatch_event_target" "default" {
10
+ target_id = aws_ecs_task_definition.aws_recon_task.id
11
+ arn = aws_ecs_cluster.aws_recon.arn
12
+ rule = aws_cloudwatch_event_rule.default.name
13
+ role_arn = aws_iam_role.cw_events.arn
14
+
15
+ ecs_target {
16
+ launch_type = "FARGATE"
17
+ task_definition_arn = aws_ecs_task_definition.aws_recon_task.arn
18
+ platform_version = "LATEST"
19
+
20
+ network_configuration {
21
+ assign_public_ip = true
22
+ security_groups = [aws_security_group.sg.id]
23
+ subnets = [aws_subnet.subnet.id]
24
+ }
25
+ }
26
+ }
data/terraform/ecs.tf ADDED
@@ -0,0 +1,49 @@
1
+ resource "aws_ecs_cluster" "aws_recon" {
2
+ name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
3
+ capacity_providers = [local.ecs_task_provider]
4
+ }
5
+
6
+ resource "aws_ecs_task_definition" "aws_recon_task" {
7
+ family = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
8
+ task_role_arn = aws_iam_role.aws_recon_role.arn
9
+ execution_role_arn = aws_iam_role.ecs_task_execution.arn
10
+ requires_compatibilities = [local.ecs_task_provider]
11
+ network_mode = "awsvpc"
12
+ cpu = 1024
13
+ memory = 2048
14
+
15
+ container_definitions = jsonencode([
16
+ {
17
+ name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
18
+ image = "${var.aws_recon_container_name}:${var.aws_recon_container_version}"
19
+ assign_public_ip = true
20
+ entryPoint = [
21
+ "aws_recon",
22
+ "--verbose",
23
+ "--format",
24
+ "custom",
25
+ "--s3-bucket",
26
+ "${aws_s3_bucket.aws_recon.bucket}:${data.aws_region.current.name}",
27
+ "--regions",
28
+ join(",", var.aws_regions)
29
+ ]
30
+ logConfiguration = {
31
+ logDriver = "awslogs"
32
+ options = {
33
+ awslogs-group = aws_cloudwatch_log_group.aws_recon.name,
34
+ awslogs-region = data.aws_region.current.name,
35
+ awslogs-stream-prefix = "ecs"
36
+ }
37
+ }
38
+ }
39
+ ])
40
+ }
41
+
42
+ resource "aws_cloudwatch_log_group" "aws_recon" {
43
+ name = "/ecs/${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
44
+ retention_in_days = var.retention_period
45
+ }
46
+
47
+ locals {
48
+ ecs_task_provider = "FARGATE"
49
+ }
data/terraform/iam.tf ADDED
@@ -0,0 +1,125 @@
1
+ #
2
+ # IAM policies and roles for ECS and CloudWatch execution
3
+ #
4
+ resource "aws_iam_role" "aws_recon_role" {
5
+ name = local.aws_recon_task_role_name
6
+ assume_role_policy = data.aws_iam_policy_document.aws_recon_task_execution_assume_role_policy.json
7
+ }
8
+
9
+ data "aws_iam_policy_document" "aws_recon_task_execution_assume_role_policy" {
10
+ statement {
11
+ actions = ["sts:AssumeRole"]
12
+
13
+ principals {
14
+ type = "Service"
15
+ identifiers = [
16
+ "ecs.amazonaws.com",
17
+ "ecs-tasks.amazonaws.com"
18
+ ]
19
+ }
20
+ }
21
+ }
22
+
23
+ resource "aws_iam_role_policy_attachment" "aws_recon_task_execution" {
24
+ role = aws_iam_role.aws_recon_role.name
25
+ policy_arn = data.aws_iam_policy.aws_recon_task_execution.arn
26
+ }
27
+
28
+ resource "aws_iam_role_policy" "aws_recon" {
29
+ name = local.bucket_write_policy_name
30
+ role = aws_iam_role.aws_recon_role.id
31
+
32
+ policy = jsonencode({
33
+ Version = "2012-10-17"
34
+ Id = "${var.aws_recon_base_name}-bucket-write"
35
+ Statement = [
36
+ {
37
+ Sid = "AWSReconS3PutObject"
38
+ Effect = "Allow"
39
+ Action = "s3:PutObject"
40
+ Resource = [
41
+ "${aws_s3_bucket.aws_recon.arn}/*"
42
+ ]
43
+ }
44
+ ]
45
+ })
46
+ }
47
+
48
+ data "aws_iam_policy" "aws_recon_task_execution" {
49
+ arn = "arn:aws:iam::aws:policy/ReadOnlyAccess"
50
+ }
51
+
52
+ resource "aws_iam_role" "ecs_task_execution" {
53
+ name = local.ecs_task_execution_role_name
54
+ assume_role_policy = data.aws_iam_policy_document.ecs_task_execution_assume_role_policy.json
55
+
56
+ tags = {
57
+ Name = local.ecs_task_execution_role_name
58
+ }
59
+ }
60
+
61
+ data "aws_iam_policy_document" "ecs_task_execution_assume_role_policy" {
62
+ statement {
63
+ actions = ["sts:AssumeRole"]
64
+
65
+ principals {
66
+ type = "Service"
67
+ identifiers = ["ecs-tasks.amazonaws.com"]
68
+ }
69
+ }
70
+ }
71
+
72
+ # ECS task execution
73
+ resource "aws_iam_policy" "ecs_task_execution" {
74
+ name = local.ecs_task_execution_policy_name
75
+ policy = data.aws_iam_policy.ecs_task_execution.policy
76
+ }
77
+
78
+ resource "aws_iam_role_policy_attachment" "ecs_task_execution" {
79
+ role = aws_iam_role.ecs_task_execution.name
80
+ policy_arn = aws_iam_policy.ecs_task_execution.arn
81
+ }
82
+
83
+ data "aws_iam_policy" "ecs_task_execution" {
84
+ arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
85
+ }
86
+
87
+ # CloudWatch Events
88
+ resource "aws_iam_role" "cw_events" {
89
+ name = local.cw_events_role_name
90
+ assume_role_policy = data.aws_iam_policy_document.cw_events_assume_role_policy.json
91
+ }
92
+
93
+ data "aws_iam_policy_document" "cw_events_assume_role_policy" {
94
+ statement {
95
+ actions = ["sts:AssumeRole"]
96
+
97
+ principals {
98
+ type = "Service"
99
+ identifiers = ["events.amazonaws.com"]
100
+ }
101
+ }
102
+ }
103
+
104
+ resource "aws_iam_policy" "cw_events" {
105
+ name = local.cw_events_policy_name
106
+ policy = data.aws_iam_policy.cw_events.policy
107
+ }
108
+
109
+ resource "aws_iam_role_policy_attachment" "cw_events" {
110
+ role = aws_iam_role.cw_events.name
111
+ policy_arn = aws_iam_policy.cw_events.arn
112
+ }
113
+
114
+ data "aws_iam_policy" "cw_events" {
115
+ arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole"
116
+ }
117
+
118
+ locals {
119
+ bucket_write_policy_name = "${var.aws_recon_base_name}-bucket-write-policy"
120
+ ecs_task_execution_role_name = "${var.aws_recon_base_name}-ecs-task-execution-role"
121
+ ecs_task_execution_policy_name = "${var.aws_recon_base_name}-ecs-task-execution-policy"
122
+ cw_events_policy_name = "${var.aws_recon_base_name}-cw-events-policy"
123
+ cw_events_role_name = "${var.aws_recon_base_name}-cw-events-role"
124
+ aws_recon_task_role_name = "${var.aws_recon_base_name}-exec-role"
125
+ }
data/terraform/main.tf ADDED
@@ -0,0 +1,13 @@
1
+ terraform {
2
+ required_providers {
3
+ aws = {
4
+ source = "hashicorp/aws"
5
+ version = "~> 3.0"
6
+ }
7
+ }
8
+ }
9
+
10
+ # Configure the AWS Provider
11
+ provider "aws" {
12
+ region = "us-east-2"
13
+ }
@@ -0,0 +1,15 @@
1
+ output "aws_recon_ecs_cluster" {
2
+ value = aws_ecs_cluster.aws_recon.name
3
+ }
4
+
5
+ output "aws_recon_ecs_scheduled_task" {
6
+ value = aws_cloudwatch_event_rule.default.name
7
+ }
8
+
9
+ output "aws_recon_s3_bucket" {
10
+ value = aws_s3_bucket.aws_recon.bucket
11
+ }
12
+
13
+ output "aws_recon_task_manual_run_command" {
14
+ value = "\nOne-off task run command:\n\naws ecs run-task --task-definition ${aws_ecs_task_definition.aws_recon_task.family} --cluster ${aws_ecs_cluster.aws_recon.name} --launch-type FARGATE --network-configuration \"awsvpcConfiguration={subnets=[${aws_subnet.subnet.id}],securityGroups=[${aws_security_group.sg.id}],assignPublicIp=ENABLED}\"\n"
15
+ }
@@ -0,0 +1,20 @@
1
+ ## Terraform Setup
2
+
3
+ This is an example module that can be used in its current form or modified for your specific environment. It builds the minimum components necessary to collect inventory on a schedule running AWS Recon as a Fargate scheduled task.
4
+
5
+ ### Requirements
6
+
7
+ Before running this Terraform module, adjust your region accordingly in `main.tf`.
8
+
9
+ ### What is created?
10
+
11
+ This Terraform example will deploy the following resources:
12
+
13
+ - an S3 bucket to store compressed JSON output files
14
+ - an IAM role for ECS task execution
15
+ - a Security Group for the ECS cluster/task
16
+ - a VPC and NGW for the ECS cluster/task
17
+ - an ECS/Fargate cluster
18
+ - an ECS task definition to run AWS Recon collection
19
+ - a CloudWatch event rule to trigger the ECS task
20
+ - a CloudTrail log group for ECS task logs
data/terraform/s3.tf ADDED
@@ -0,0 +1,20 @@
1
+ resource "aws_s3_bucket" "aws_recon" {
2
+ bucket = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}-${data.aws_iam_account_alias.current.id}"
3
+ acl = "private"
4
+ force_destroy = true
5
+
6
+ lifecycle_rule {
7
+ id = "expire-after-${var.retention_period}-days"
8
+ enabled = true
9
+
10
+ expiration {
11
+ days = var.retention_period
12
+ }
13
+ }
14
+ }
15
+
16
+ resource "random_id" "aws_recon" {
17
+ byte_length = 6
18
+ }
19
+
20
+ data "aws_iam_account_alias" "current" {}
data/terraform/vars.tf ADDED
@@ -0,0 +1,57 @@
1
+ variable "aws_recon_base_name" {
2
+ type = string
3
+ default = "aws-recon"
4
+ }
5
+
6
+ variable "aws_recon_container_name" {
7
+ type = string
8
+ default = "darkbitio/aws_recon"
9
+ }
10
+
11
+ variable "aws_recon_container_version" {
12
+ type = string
13
+ default = "latest"
14
+ }
15
+
16
+ variable "aws_regions" {
17
+ type = list(any)
18
+ default = [
19
+ "global",
20
+ # "af-south-1",
21
+ # "ap-east-1",
22
+ # "ap-northeast-1",
23
+ # "ap-northeast-2",
24
+ # "ap-northeast-3",
25
+ # "ap-south-1",
26
+ # "ap-southeast-1",
27
+ # "ap-southeast-2",
28
+ # "ca-central-1",
29
+ # "eu-central-1",
30
+ # "eu-north-1",
31
+ # "eu-south-1",
32
+ # "eu-west-1",
33
+ # "eu-west-2",
34
+ # "eu-west-3",
35
+ # "me-south-1",
36
+ # "sa-east-1",
37
+ "us-east-1",
38
+ "us-east-2",
39
+ "us-west-1",
40
+ "us-west-2",
41
+ ]
42
+ }
43
+
44
+ variable "retention_period" {
45
+ type = number
46
+ default = 30
47
+ }
48
+
49
+ variable "schedule_expression" {
50
+ type = string
51
+ default = "cron(4 * * * ? *)"
52
+ }
53
+
54
+ variable "base_subnet_cidr" {
55
+ type = string
56
+ default = "10.76.0.0/16"
57
+ }
data/terraform/vpc.tf ADDED
@@ -0,0 +1,74 @@
1
+
2
+ # Create a VPC
3
+ resource "aws_vpc" "vpc" {
4
+ cidr_block = local.cidr_block
5
+ tags = {
6
+ Name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
7
+ }
8
+ }
9
+
10
+ # Create subnet
11
+ resource "aws_subnet" "subnet" {
12
+ vpc_id = aws_vpc.vpc.id
13
+ cidr_block = local.subnet_cidr_block
14
+ availability_zone = data.aws_availability_zones.available.names[0]
15
+ map_public_ip_on_launch = true
16
+
17
+ tags = {
18
+ Name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}-public"
19
+ }
20
+ }
21
+
22
+ resource "aws_security_group" "sg" {
23
+ name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
24
+ description = "Allow AWS Recon collection egress"
25
+ vpc_id = aws_vpc.vpc.id
26
+
27
+ egress {
28
+ from_port = 0
29
+ to_port = 0
30
+ protocol = "-1"
31
+ cidr_blocks = ["0.0.0.0/0"]
32
+ }
33
+
34
+ tags = {
35
+ Name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
36
+ }
37
+ }
38
+
39
+ resource "aws_internet_gateway" "igw" {
40
+ vpc_id = aws_vpc.vpc.id
41
+
42
+ tags = {
43
+ Name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
44
+ }
45
+ }
46
+
47
+ resource "aws_route_table" "rt" {
48
+ vpc_id = aws_vpc.vpc.id
49
+
50
+ route {
51
+ cidr_block = "0.0.0.0/0"
52
+ gateway_id = aws_internet_gateway.igw.id
53
+ }
54
+
55
+ tags = {
56
+ Name = "${var.aws_recon_base_name}-${random_id.aws_recon.hex}"
57
+ }
58
+ }
59
+
60
+ resource "aws_route_table_association" "rt_association" {
61
+ subnet_id = aws_subnet.subnet.id
62
+ route_table_id = aws_route_table.rt.id
63
+ }
64
+
65
+ locals {
66
+ cidr_block = var.base_subnet_cidr
67
+ subnet_cidr_block = cidrsubnet(local.cidr_block, 8, 0)
68
+ }
69
+
70
+ data "aws_region" "current" {}
71
+
72
+ data "aws_availability_zones" "available" {
73
+ state = "available"
74
+ }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws_recon
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.4
4
+ version: 0.4.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Josh Larsen
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-03-02 00:00:00.000000000 Z
12
+ date: 2021-04-02 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: aws-sdk
@@ -167,7 +167,6 @@ files:
167
167
  - ".github/workflows/smoke-test.yml"
168
168
  - ".gitignore"
169
169
  - ".rubocop.yml"
170
- - ".travis.yml"
171
170
  - Dockerfile
172
171
  - Gemfile
173
172
  - LICENSE.txt
@@ -245,6 +244,15 @@ files:
245
244
  - lib/aws_recon/services.yaml
246
245
  - lib/aws_recon/version.rb
247
246
  - readme.md
247
+ - terraform/cloudwatch.tf
248
+ - terraform/ecs.tf
249
+ - terraform/iam.tf
250
+ - terraform/main.tf
251
+ - terraform/output.tf
252
+ - terraform/readme.md
253
+ - terraform/s3.tf
254
+ - terraform/vars.tf
255
+ - terraform/vpc.tf
248
256
  homepage: https://github.com/darkbitio/aws-recon
249
257
  licenses:
250
258
  - MIT
@@ -267,5 +275,5 @@ requirements: []
267
275
  rubygems_version: 3.0.8
268
276
  signing_key:
269
277
  specification_version: 4
270
- summary: A multi-threaded AWS inventory collection cli tool.
278
+ summary: A multi-threaded AWS security-focused inventory collection tool.
271
279
  test_files: []
data/.travis.yml DELETED
@@ -1,7 +0,0 @@
1
- ---
2
- sudo: false
3
- language: ruby
4
- cache: bundler
5
- rvm:
6
- - 2.6.5
7
- before_install: gem install bundler -v 1.17.3