nexpose_servicenow 0.7.3 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,109 +0,0 @@
1
- #TODO: Check if using site_id is OK. If the object is changed, the code on SN will likely need changed.
2
-
3
- module NexposeServiceNow
4
- class Chunker
5
- def initialize(report_details, row_limit)
6
- @row_limit = row_limit
7
- @size_limit = 4_500_000
8
- @report_details = report_details
9
- @header = get_header
10
-
11
- setup_logging
12
- end
13
-
14
- def setup_logging
15
- @log = NexposeServiceNow::NxLogger.instance
16
- @log.log_message("Chunk File Limit:\t#{@size_limit}MB")
17
- @log.log_message("Chunk Row Limit:\t#{@row_limit}")
18
- end
19
-
20
- # Grab the header from the first file
21
- def get_header
22
- file = File.open(@report_details.first[:report_name], 'r')
23
- header = file.readline
24
- file.close
25
-
26
- header
27
- end
28
-
29
- def preprocess
30
- all_chunks = []
31
- @report_details.each do |report|
32
- @log.log_message("Dividing file #{report[:report_name]} into chunks.")
33
- chunks = process_file(report[:report_name], report[:id])
34
- all_chunks.concat chunks
35
- end
36
-
37
- @log.log_message("Files divided into #{all_chunks.count} chunks")
38
-
39
- puts all_chunks.to_json
40
- end
41
-
42
- def process_file(file_path, site_id=nil)
43
- relative_size_limit = @size_limit - @header.bytesize
44
- chunk = { site_id: site_id,
45
- start: @header.bytesize,
46
- length: 0,
47
- row_count: 0 }
48
-
49
- chunks = []
50
- csv_file = CSV.open(file_path, 'r', headers: true)
51
- while(true)
52
- position = csv_file.pos
53
- line = csv_file.shift
54
- row_length = line.to_s.bytesize
55
-
56
- if line.nil?
57
- chunks << chunk
58
- break
59
- elsif chunk[:length]+row_length < relative_size_limit &&
60
- chunk[:row_count] + 1 < @row_limit
61
- chunk[:length] += row_length
62
- chunk[:row_count] += 1
63
- else
64
- chunks << chunk
65
-
66
- #Initialise chunk with this row information
67
- chunk = { site_id: site_id,
68
- start: position,
69
- length: row_length,
70
- row_count: 1 }
71
- end
72
- end
73
- csv_file.close
74
-
75
- #Should we include the row count?
76
- chunks.each do |c|
77
- c.delete :row_count
78
-
79
- #Should we do this...?
80
- c.delete :site_id if c[:site_id].nil? || c[:site_id] == -1
81
- end
82
-
83
- chunks
84
- end
85
-
86
- def get_file(site_id=nil)
87
- # -1 indicates a single query report
88
- return @report_details.first[:report_name] if site_id.to_i <= 0
89
-
90
- report = @report_details.find { |r| r[:id].to_s == site_id.to_s }
91
- report[:report_name]
92
- end
93
-
94
- def read_chunk(start, length, site_id=nil)
95
- file_path = get_file(site_id)
96
- msg = "Returning chunk. Start: #{start}, " \
97
- "Length: #{length}, File: #{file_path}"
98
- @log.log_message(msg)
99
-
100
- #If the header isn't in the chunk, prepend it
101
- header = start == 0 ? '' : @header
102
-
103
- file = File.open(file_path, 'rb')
104
- file.seek(start)
105
- puts header + file.read(length)
106
- file.close
107
- end
108
- end
109
- end
@@ -1,177 +0,0 @@
1
- require 'fileutils'
2
- require 'tempfile'
3
- require 'csv-diff'
4
- require 'csv'
5
-
6
-
7
- class CSVDiff
8
- class CSVSource
9
- def find_field_indexes(key_fields, field_names)
10
- key_fields.map do |field|
11
- if field.is_a?(Integer)
12
- field
13
- else
14
- field_names.index{ |field_name| field.to_s.downcase == field_name.downcase } or
15
- raise ArgumentError, "Could not locate field '#{field}' in source field names: #{
16
- field_names.join(', ')}"
17
- end
18
- end
19
- end
20
- end
21
- end
22
-
23
- module NexposeServiceNow
24
- class CsvCompare
25
- def self.get_columns(csv_file)
26
- columns = ''
27
- File::open(csv_file,'r') do |f|
28
- columns = f.readline.rstrip
29
- end
30
-
31
- columns.split(',')
32
- end
33
-
34
- def self.get_row(value, columns, status)
35
- columns.map { |c| value.fields[c] }.push(status)
36
- end
37
-
38
- def self.get_delete(value, columns)
39
- self.get_row(value, columns, 'old')
40
- end
41
-
42
- def self.get_add(value, columns)
43
- self.get_row(value, columns, 'new')
44
- end
45
-
46
- def self.update_to_old(value, columns)
47
- self.update_to_row(value, columns, 0, 'old')
48
- end
49
-
50
- def self.update_to_new(value, columns)
51
- self.update_to_row(value, columns, 1, 'new')
52
- end
53
-
54
- def self.update_to_row(value, columns, index, status)
55
- row = []
56
- columns.each do |c|
57
- val = value.fields[c]
58
- row << (val.kind_of?(Array) ? val[index] : val)
59
- end
60
- row.push(status)
61
- end
62
-
63
- def self.append_to_filename(current_filename, string_to_append)
64
- extension = File.extname current_filename
65
- name = File.basename current_filename, extension
66
- path = File.dirname current_filename
67
-
68
- "#{path}/#{name}-#{string_to_append}#{extension}"
69
- end
70
-
71
- def self.update_report_with_diff(report_file, key_fields=[0])
72
- old_filename = self.append_to_filename(report_file, 'old')
73
- new_filename = self.append_to_filename(report_file, 'new')
74
-
75
- # Report is 'new' file for purpose of diff
76
- FileUtils.mv(report_file, new_filename)
77
-
78
- # If the old file doesn't exist, we can just add the status column
79
- if File.exists?(old_filename)
80
- self.create_csv_diff(old_filename,
81
- new_filename,
82
- report_file,
83
- key_fields)
84
- else
85
- self.overwrite_existing_report(new_filename, report_file)
86
- end
87
-
88
- # 'new' file becomes the basis of comparison next time
89
- FileUtils.mv(new_filename, old_filename, :force => true)
90
- end
91
-
92
- # Instead of diffing, append 'status' column to an existing file
93
- def self.overwrite_existing_report(source_file, target_file)
94
- temp = Tempfile.new("#{File.basename source_file}tmp")
95
-
96
- #TODO: Don't do the column_written check every time
97
-
98
- CSV.open(temp, 'w') do |temp_csv|
99
- column_written = false
100
- new_column_value = ['status']
101
- CSV.foreach(source_file) do |orig|
102
- temp_csv << (orig + new_column_value)
103
-
104
- unless column_written
105
- new_column_value = ['new']
106
- column_written = true
107
- end
108
- end
109
- end
110
-
111
- temp.close
112
- FileUtils.mv(temp, target_file, :force => true)
113
- end
114
-
115
- def self.create_csv_diff(old_file, new_file, target_file, key_fields=[0])
116
- old_temp = Tempfile.new("#{File.basename old_file}tmp")
117
- new_temp = Tempfile.new("#{File.basename new_file}tmp")
118
-
119
- File.open(old_file) do |f|
120
- IO.copy_stream(f, old_temp)
121
- end
122
- File.open(new_file) do |f|
123
- IO.copy_stream(f, new_temp)
124
- end
125
-
126
- begin
127
- diff = CSVDiff.new(
128
- File.expand_path(old_temp),
129
- File.expand_path(new_temp),
130
- ignore_moves: true,
131
- key_fields: key_fields
132
- )
133
- rescue Exception => e
134
- file_name = File.basename target_file
135
- raise "Unable to diff file: #{file_name}. \nError received: #{e}"
136
- ensure
137
- old_temp.close!
138
- new_temp.close!
139
- end
140
-
141
- columns = self.get_columns(new_file)
142
-
143
- CSV.open(target_file, 'wb') do |csv|
144
- csv << (columns+['status'])
145
-
146
- diff.deletes.each_value { |v| csv << get_delete(v, columns) }
147
- diff.adds.each_value { |v| csv << get_add(v, columns) }
148
-
149
- if key_fields.count == 1
150
- # If only a single key field, we don't need the old values
151
- # Just grab the row and let ServiceNow coalesce and update the row
152
- update_rows = diff.updates.each_value.map { |u| u.row }
153
- update_rows = update_rows.map(&:to_i).sort
154
- update_row_num = update_rows.shift
155
- current_row_num = 0
156
-
157
- CSV.foreach(new_file, headers: true) do |new_csv|
158
- current_row_num = current_row_num + 1
159
- next unless current_row_num == update_row_num
160
-
161
- csv << new_csv.push('new')
162
- update_row_num = update_rows.shift
163
- break if update_row_num == nil
164
- end
165
- elsif key_fields.count == 2
166
- # Multiple key fields result in row "updates"
167
- diff.updates.each_value do |v|
168
- csv << self.update_to_old(v, columns)
169
- csv << self.update_to_new(v, columns)
170
- end
171
- else
172
- raise "Received #{key_fields.count} key fields. Only 1/2 supported."
173
- end
174
- end
175
- end
176
- end
177
- end
@@ -1,84 +0,0 @@
1
- require_relative '../queries/queries_base'
2
-
3
- module NexposeServiceNow
4
- class ConnectionHelper
5
- NOT_IMPL = 'Error: Method not implemented.'
6
-
7
- def initialize(url, port, username, password, silo='')
8
- @log = NexposeServiceNow::NxLogger.instance
9
- @url = url
10
- @port = port
11
- @username = username
12
- @password = password
13
- @silo = silo
14
-
15
- @timeout = 21600
16
- end
17
-
18
- def self.get_report_names(query_name, ids)
19
- if QueriesBase.single_report?(query_name)
20
- return [ id: -1, report_name: get_report_name(query_name) ]
21
- end
22
-
23
- ids.map do |id|
24
- { id: id.first, report_name: get_report_name(query_name, id.first) }
25
- end
26
- end
27
-
28
- def self.get_report_name(query_name, id=nil)
29
- name = "Nexpose-ServiceNow-#{query_name}"
30
- name += "-#{id}" unless QueriesBase.single_report?(query_name) || id.nil?
31
- name
32
- end
33
-
34
- def create_query_options(query_options, nexpose_id=nil)
35
- options = {}
36
- options[:vuln_query_date] = query_options[:vuln_query_date]
37
- options[:site_id] = nexpose_id
38
- options[:id_type] = 'site'
39
- options[:filters] = query_options[:filters] || {}
40
- options[:cvss_v] = query_options[:cvss_v]
41
-
42
- # Without a nexpose ID, we don't have a specific delta
43
- return options if [nil, -1].include? nexpose_id
44
-
45
- if query_options[:delta_values].empty?
46
- error_msg = 'No delta values provided. Exiting...'
47
- @log.log_error_message error_msg
48
- raise error_msg
49
- end
50
-
51
- options[:delta] = "#{query_options[:delta_values][nexpose_id] || 0}"
52
-
53
- @log.log_message("Query options: #{options}")
54
-
55
- options
56
- end
57
-
58
- def self.get_filepath(report_name, output_dir)
59
- path = File.join output_dir, "#{report_name}.csv"
60
- File.expand_path path
61
- end
62
-
63
- def connect(username, password)
64
- raise NOT_IMPL
65
- end
66
-
67
- def generate_report(query_name, ids, id_type, output_dir, query_options={})
68
- raise NOT_IMPL
69
- end
70
-
71
- # Pulls the collection IDs from Nexpose (e.g. asset groups, sites)
72
- def collection_ids(collection_type)
73
- raise NOT_IMPL
74
- end
75
-
76
- def save_report(report_name, report_id, output_dir)
77
- raise NOT_IMPL
78
- end
79
-
80
- def get_cvss_version_strings(use_v3)
81
- raise NOT_IMPL
82
- end
83
- end
84
- end
@@ -1,140 +0,0 @@
1
- require 'pg'
2
- require_relative './connection_helper'
3
- require_relative '../queries/warehouse_queries'
4
-
5
- module NexposeServiceNow
6
- class DataWarehouseHelper < ConnectionHelper
7
-
8
- SSL_MODE = 'prefer'
9
- GRANULARITY = 500
10
- CHUNK_LIMIT = 4_500_000
11
-
12
- def initialize(url, port, username, password, silo='')
13
- super(url, port, username, password, silo)
14
- end
15
-
16
- def connect
17
- @log.log_message 'Creating DWH connection'
18
- PG::Connection.open(:host => @url,
19
- :dbname => @silo,
20
- :port => @port,
21
- :user => @username,
22
- :password => @password,
23
- :sslmode => SSL_MODE)
24
- end
25
-
26
- def generate_report(query_name, ids, id_type, output_dir, query_options={})
27
- output_dir = File.expand_path(output_dir.to_s)
28
-
29
- #A single report doesn't use site filters
30
- ids = [-1] if WarehouseQueries.single_report?(query_name)
31
-
32
- query_options[:cvss_v] = get_cvss_version_strings(query_options[:cvss_v3])
33
-
34
- page_size = query_options[:page_size]
35
- row_limit = query_options[:row_limit]
36
-
37
- chunks = []
38
-
39
- base_name = "query_#{query_name}"
40
-
41
- ids.each do |id|
42
- delta_options = create_query_options(query_options, id)
43
- query = WarehouseQueries.send(query_name, delta_options)
44
-
45
- # Open the CSV file to write as pages are retrieved
46
- report_name = self.class.get_report_name(query_name, id)
47
- @log.log_message "Running query for #{report_name}"
48
-
49
- local_file_name = self.class.get_filepath(report_name, output_dir)
50
- csvFile = File.open(local_file_name, 'wb')
51
-
52
- conn = connect
53
- conn.transaction do
54
- table_name = "query_#{query_name}"
55
- table_name = "#{base_name}_#{id}" if id && id.to_i > 0
56
-
57
- @log.log_message "Creating cursor: #{table_name}"
58
- conn.exec("DECLARE #{table_name} CURSOR FOR #{query}")
59
- res = conn.exec("FETCH #{page_size} FROM #{table_name}")
60
-
61
- # Headers
62
- headers = res.fields.join(',')
63
- csvFile.puts(headers)
64
-
65
- # Declare the initial chunk
66
- chunk = { start: csvFile.pos, length: 0, row_count: 0 }
67
- chunk[:site_id] = id unless id.nil? || id == -1
68
-
69
- # Should we overwrite 'res' to release the memory?
70
- all_lines = res.values.map { |r| r.join(',') }
71
-
72
- # Table declared, so keep reading pages until it's consumed
73
- data_left = true
74
- while(data_left)
75
- # Lift out a number of lines in a chunk
76
- text = all_lines.slice!(0, GRANULARITY)
77
-
78
- # Try to get the next page
79
- if text.nil? || text.count == 0
80
- res = conn.exec("FETCH #{page_size} FROM #{table_name}")
81
- if res.values.count == 0
82
- chunks << chunk
83
- break
84
- end
85
- all_lines = res.values.map { |r| r.join(',') }
86
- next
87
- end
88
-
89
- # Work out the details for this chunk
90
- line_count = text.count
91
- text = text.join("\n")
92
- text << "\n"
93
- byte_size = text.bytesize
94
-
95
- # Test whether limits would be exceeded
96
- below_row_limit = chunk[:row_count] + line_count <= row_limit
97
- below_size_limit = chunk[:length] + byte_size < CHUNK_LIMIT
98
-
99
- if below_size_limit && below_row_limit
100
- chunk[:length] += byte_size
101
- chunk[:row_count] += line_count
102
- else
103
- chunks << chunk.dup
104
- # Store the current pos since the next chunk isn't written
105
- chunk[:start] = csvFile.pos
106
- chunk[:length] = byte_size
107
- chunk[:row_count] = line_count
108
- end
109
-
110
- csvFile.write(text)
111
- end
112
- end
113
-
114
- conn.finish
115
-
116
- # Close the file for this specific report
117
- csvFile.close
118
- @log.log_message "Report generated: #{report_name}"
119
- end
120
-
121
- chunks.each { |c| c.delete :row_count }
122
-
123
- @log.log_message "Finished running query: #{query_name}"
124
- chunks.to_json
125
- end
126
-
127
- def collection_ids
128
- @log.log_message 'Retrieving list of site IDs'
129
- connection = connect
130
- query = 'select site_id from dim_site'
131
- result = connection.query(query)
132
- result.map { |r| r['site_id'] }
133
- connection.finish
134
- end
135
-
136
- def get_cvss_version_strings(use_v3)
137
- use_v3 ? { choice: '_v3', fallback: '' } : { choice: '', fallback: '' }
138
- end
139
- end
140
- end