dorothy2 1.2.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +8 -8
- data/CHANGELOG +39 -14
- data/README.md +80 -62
- data/UPDATE +6 -14
- data/bin/dorothy2 +472 -0
- data/dorothy2.gemspec +22 -16
- data/etc/ddl/dorothive.ddl +619 -373
- data/etc/sources.yml.example +27 -2
- data/lib/doroGUI.rb +232 -0
- data/lib/doroParser.rb +34 -78
- data/lib/dorothy2.rb +288 -248
- data/lib/dorothy2/BFM.rb +114 -61
- data/lib/dorothy2/DEM.rb +3 -1
- data/lib/dorothy2/NAM.rb +2 -2
- data/lib/dorothy2/Settings.rb +2 -1
- data/lib/dorothy2/VSM.rb +2 -1
- data/lib/dorothy2/deep_symbolize.rb +2 -7
- data/lib/dorothy2/do-init.rb +286 -19
- data/lib/dorothy2/do-logger.rb +1 -1
- data/lib/dorothy2/do-utils.rb +382 -33
- data/lib/dorothy2/version.rb +1 -1
- data/lib/dorothy2/vtotal.rb +30 -20
- data/lib/mu/xtractr.rb +11 -11
- data/lib/mu/xtractr/stream.rb +1 -1
- data/lib/www/public/reset.css +153 -0
- data/lib/www/public/style.css +65 -0
- data/lib/www/views/analyses.erb +28 -0
- data/lib/www/views/email.erb +63 -0
- data/lib/www/views/flows.erb +30 -0
- data/lib/www/views/layout.erb +27 -0
- data/lib/www/views/profile.erb +49 -0
- data/lib/www/views/queue.erb +28 -0
- data/lib/www/views/resume.erb +135 -0
- data/lib/www/views/resume.erb~ +88 -0
- data/lib/www/views/samples.erb +20 -0
- data/lib/www/views/upload.erb +154 -0
- data/share/img/The_big_picture.pdf +0 -0
- data/test/tc_dorothy_full.rb +3 -0
- metadata +169 -70
- data/TODO +0 -27
- data/bin/dorothy_start +0 -225
- data/bin/dorothy_stop +0 -28
- data/bin/dparser_start +0 -94
- data/bin/dparser_stop +0 -31
- data/etc/dorothy copy.yml.example +0 -39
- data/etc/extensions.yml +0 -41
- data/share/update-dorothive.sql +0 -19
data/lib/dorothy2.rb
CHANGED
@@ -7,7 +7,8 @@
|
|
7
7
|
|
8
8
|
##.for irb debug:
|
9
9
|
##from $home, irb and :
|
10
|
-
|
10
|
+
#load 'dorothy2.rb'; include Dorothy; LOGGER = DoroLogger.new(STDOUT, "weekly"); DoroSettings.load!("#{File.expand_path("~")}/.dorothy.yml")
|
11
|
+
##/
|
11
12
|
|
12
13
|
require 'net/ssh'
|
13
14
|
require 'net/scp'
|
@@ -21,8 +22,16 @@ require 'pg'
|
|
21
22
|
require 'filemagic'
|
22
23
|
require 'rbvmomi'
|
23
24
|
require 'timeout'
|
24
|
-
require '
|
25
|
+
require 'uirusu'
|
25
26
|
require 'digest'
|
27
|
+
require 'mail'
|
28
|
+
require 'io/console'
|
29
|
+
require 'base64'
|
30
|
+
require 'open-uri'
|
31
|
+
require 'csv'
|
32
|
+
require 'whois'
|
33
|
+
|
34
|
+
|
26
35
|
|
27
36
|
require File.dirname(__FILE__) + '/dorothy2/do-init'
|
28
37
|
require File.dirname(__FILE__) + '/dorothy2/Settings'
|
@@ -35,60 +44,77 @@ require File.dirname(__FILE__) + '/dorothy2/do-utils'
|
|
35
44
|
require File.dirname(__FILE__) + '/dorothy2/do-logger'
|
36
45
|
require File.dirname(__FILE__) + '/dorothy2/version'
|
37
46
|
|
38
|
-
module Dorothy
|
39
47
|
|
40
|
-
def get_time(local=Time.new)
|
41
|
-
time = local
|
42
|
-
time.utc.strftime("%Y-%m-%d %H:%M:%S")
|
43
|
-
end
|
44
48
|
|
45
49
|
|
46
|
-
|
50
|
+
|
51
|
+
module Dorothy
|
52
|
+
|
53
|
+
def start_analysis(queue)
|
47
54
|
#Create a mutex for monitoring the access to the methods
|
48
|
-
@
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
55
|
+
@queue_size = queue.size
|
56
|
+
|
57
|
+
unless @queue_size == 0
|
58
|
+
queue.each do |qentry|
|
59
|
+
|
60
|
+
bin = Loadmalw.new(qentry["path"].strip, qentry["filename"])
|
61
|
+
profile = Util.load_profile(qentry['profile'])
|
62
|
+
|
63
|
+
next unless profile
|
64
|
+
next unless check_support(bin, qentry["id"], profile)
|
65
|
+
scan(bin) if profile[1]['vtotal_query'] #avoid to stress VT if we are just testing
|
66
|
+
|
67
|
+
if MANUAL #no multithread
|
68
|
+
execute_analysis(bin, qentry["id"], profile)
|
69
|
+
else #Use multithreading
|
70
|
+
@analysis_threads << Thread.new(bin.filename){
|
71
|
+
sleep rand(@queue_size * 2) #OPTIMIZE #REVIEW
|
72
|
+
execute_analysis(bin, qentry["id"],profile,rand(30))
|
73
|
+
}
|
74
|
+
end
|
67
75
|
end
|
76
|
+
else
|
77
|
+
LOGGER.warn("Analyser", "The queue is currently empty!") if DEBUG
|
68
78
|
end
|
69
79
|
end
|
70
80
|
|
71
81
|
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
82
|
+
|
83
|
+
def execute_analysis(bin, qentry, profile, timer=0)
|
84
|
+
db = Insertdb.new
|
85
|
+
|
86
|
+
prof_info = profile[1]
|
87
|
+
|
88
|
+
#guestvm struct: array ["sandbox id", "sandbox name", "ipaddress", "user", "password"]
|
89
|
+
sleep timer until (guestvm = db.find_vm(prof_info['OS']['type'], prof_info['OS']['version'], prof_info['OS']['lang']))
|
90
|
+
|
91
|
+
db.analysis_queue_mark(qentry, "processing")
|
92
|
+
|
93
|
+
begin
|
94
|
+
if analyze(bin, guestvm, qentry, profile)
|
95
|
+
db.analysis_queue_mark(qentry, "analysed")
|
96
|
+
else
|
97
|
+
db.analysis_queue_mark(qentry, "error")
|
98
|
+
end
|
99
|
+
rescue
|
100
|
+
db.analysis_queue_mark(qentry, "cancelled")
|
83
101
|
end
|
102
|
+
|
103
|
+
db.free_vm(guestvm[0])
|
104
|
+
db.close
|
105
|
+
|
84
106
|
end
|
85
107
|
|
108
|
+
|
109
|
+
|
110
|
+
|
86
111
|
###ANALYZE THE SOURCE
|
87
|
-
def analyze(bin, guestvm)
|
112
|
+
def analyze(bin, guestvm, queueid, profile)
|
88
113
|
|
89
114
|
#RESERVING AN ANALYSIS ID
|
90
115
|
db = Insertdb.new
|
91
116
|
anal_id = db.get_anal_id
|
117
|
+
prof_info = profile[1]
|
92
118
|
|
93
119
|
#set home vars
|
94
120
|
sample_home = DoroSettings.env[:analysis_dir] + "/#{anal_id}"
|
@@ -99,6 +125,7 @@ module Dorothy
|
|
99
125
|
|
100
126
|
vm_log_header = "VM#{guestvm[0]} ".yellow + "[" + "#{anal_id}".red + "] "
|
101
127
|
|
128
|
+
|
102
129
|
LOGGER.info "VSM", vm_log_header + "Analyzing binary #{bin.filename}"
|
103
130
|
|
104
131
|
begin
|
@@ -111,12 +138,11 @@ module Dorothy
|
|
111
138
|
Dir.mkdir bin.dir_screens
|
112
139
|
Dir.mkdir bin.dir_downloads
|
113
140
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
end
|
141
|
+
LOGGER.debug "VSM", sample_home
|
142
|
+
LOGGER.debug "VSM",bin.dir_bin
|
143
|
+
LOGGER.debug "VSM",bin.dir_pcap
|
144
|
+
LOGGER.debug "VSM",bin.dir_screens
|
145
|
+
|
120
146
|
|
121
147
|
else
|
122
148
|
LOGGER.warn "VSM",vm_log_header + "Malware #{bin.md5} sample_home already present, WTF!? Skipping.."
|
@@ -125,8 +151,7 @@ module Dorothy
|
|
125
151
|
end
|
126
152
|
|
127
153
|
|
128
|
-
|
129
|
-
FileUtils.cp(bin.binpath,bin.dir_bin) # mv?
|
154
|
+
FileUtils.ln_s(bin.binpath,bin.dir_bin + bin.filename) # put a symbolic link from the analysis folder to the bins repo
|
130
155
|
|
131
156
|
|
132
157
|
#Creating a new VSM object for managing the SandBox VM
|
@@ -164,29 +189,31 @@ module Dorothy
|
|
164
189
|
dumpname = anal_id.to_s + "-" + bin.md5
|
165
190
|
pid = @nam.start_sniffer(guestvm[2],DoroSettings.nam[:interface], dumpname, DoroSettings.nam[:pcaphome])
|
166
191
|
LOGGER.info "NAM",vm_log_header + "Start sniffing module"
|
167
|
-
LOGGER.debug "NAM",vm_log_header + "Tcpdump instance #{pid} started"
|
192
|
+
LOGGER.debug "NAM",vm_log_header + "Tcpdump instance #{pid} started"
|
168
193
|
|
169
194
|
#sleep 5
|
170
195
|
|
171
196
|
@screenshots = Array.new
|
172
197
|
|
173
198
|
#Execute File into VM
|
174
|
-
LOGGER.info "VSM",vm_log_header + "Executing #{bin.full_filename} with #{
|
199
|
+
LOGGER.info "VSM",vm_log_header + "Executing #{bin.full_filename} with #{prof_info['extensions'][bin.extension]['prog_name']}"
|
175
200
|
|
176
201
|
if MANUAL
|
177
|
-
LOGGER.debug "
|
202
|
+
LOGGER.debug "MANUAL-MODE",vm_log_header + " MANUAL mode detected. You can now logon to rdp://#{guestvm[2]} "
|
178
203
|
|
179
204
|
menu="
|
180
|
-
|
181
|
-
|
182
|
-
1) Take Screenshot
|
183
|
-
2) Take ProcessList
|
184
|
-
3) Execute #{bin.full_filename}
|
185
|
-
|
205
|
+
#{"Choose your next action:".yellow}
|
206
|
+
------------------------
|
207
|
+
#{"1".yellow}) Take Screenshot
|
208
|
+
#{"2".yellow}) Take ProcessList
|
209
|
+
#{"3".yellow}) Execute #{bin.full_filename}
|
210
|
+
#{"0".yellow}) Continue and revert the machine.
|
211
|
+
------------------------
|
186
212
|
|
187
213
|
Select a nuber:"
|
188
214
|
|
189
|
-
|
215
|
+
print menu
|
216
|
+
$stdout.flush
|
190
217
|
answer = gets.chop
|
191
218
|
|
192
219
|
until answer == "0"
|
@@ -201,11 +228,12 @@ module Dorothy
|
|
201
228
|
LOGGER.info "MANUAL-MODE", vm_log_header + "[" + "+".red + "]" + " PID: #{pid}, NAME: #{@current_procs[pid]["pname"]}, COMMAND: #{@current_procs[pid]["cmdLine"]}"
|
202
229
|
end
|
203
230
|
when "3"
|
204
|
-
guestpid = vsm.exec_file("C:\\#{bin.full_filename}",
|
231
|
+
guestpid = vsm.exec_file("C:\\#{bin.full_filename}",prof_info['extensions'][bin.extension])
|
205
232
|
LOGGER.debug "MANUAL-MODE",vm_log_header + "Program executed with PID #{guestpid}"
|
206
233
|
#when "x" then -- More interactive actions to add
|
207
234
|
else
|
208
|
-
|
235
|
+
print menu
|
236
|
+
$stdout.flush
|
209
237
|
end
|
210
238
|
answer = gets.chop
|
211
239
|
end
|
@@ -214,21 +242,21 @@ module Dorothy
|
|
214
242
|
|
215
243
|
|
216
244
|
else
|
217
|
-
guestpid = vsm.exec_file("C:\\#{bin.full_filename}",
|
218
|
-
LOGGER.debug "VSM",vm_log_header + "Program executed with PID #{guestpid}"
|
245
|
+
guestpid = vsm.exec_file("C:\\#{bin.full_filename}",prof_info['extensions'][bin.extension])
|
246
|
+
LOGGER.debug "VSM",vm_log_header + "Program executed with PID #{guestpid}"
|
219
247
|
sleep 1
|
220
248
|
returncode = vsm.get_status(guestpid)
|
221
249
|
raise "The program was not correctly executed into the Sandbox. Status code: #{returncode}" unless returncode == 0 || returncode.nil?
|
222
250
|
|
223
|
-
LOGGER.info "VSM",vm_log_header + " Sleeping #{
|
224
|
-
sleep
|
251
|
+
LOGGER.info "VSM",vm_log_header + " Sleeping #{prof_info['sleeptime']} seconds".yellow
|
252
|
+
sleep prof_info['screenshots']['delay_first'] % prof_info['sleeptime']
|
225
253
|
|
226
|
-
|
254
|
+
prof_info['screenshots']['number'].times do
|
227
255
|
@screenshots.push vsm.screenshot
|
228
|
-
sleep
|
256
|
+
sleep prof_info['screenshots']['delay_inbetween'] % prof_info['sleeptime'] if prof_info['screenshots']['delay_inbetween']
|
229
257
|
end
|
230
258
|
|
231
|
-
sleep
|
259
|
+
sleep prof_info['sleeptime']
|
232
260
|
|
233
261
|
#Get Procs
|
234
262
|
@current_procs = vsm.get_running_procs
|
@@ -246,7 +274,7 @@ module Dorothy
|
|
246
274
|
LOGGER.info "VSM", vm_log_header + "Checking for spowned processes"
|
247
275
|
|
248
276
|
unless @current_procs.nil?
|
249
|
-
@procs = vsm.get_new_procs(@current_procs)
|
277
|
+
@procs = vsm.get_new_procs(@current_procs, "#{DoroSettings.env[:home]}/etc/#{profile[0]}_baseline_procs.yml")
|
250
278
|
if @procs.size > 0
|
251
279
|
LOGGER.info "VSM", vm_log_header + "#{@procs.size} new process(es) found"
|
252
280
|
@procs.each_key do |pid|
|
@@ -284,7 +312,7 @@ module Dorothy
|
|
284
312
|
pcaprid = Loadmalw.calc_pcaprid(dump.filename, dump.size).rstrip
|
285
313
|
end
|
286
314
|
|
287
|
-
LOGGER.debug "NAM", vm_log_header + "Pcaprid: " + pcaprid
|
315
|
+
LOGGER.debug "NAM", vm_log_header + "Pcaprid: " + pcaprid
|
288
316
|
|
289
317
|
empty_pcap = false
|
290
318
|
|
@@ -296,7 +324,7 @@ module Dorothy
|
|
296
324
|
|
297
325
|
dumpvalues = [dump.sha, dump.size, pcaprid, dump.binpath, 'false']
|
298
326
|
dump.sha = "EMPTYPCAP" if empty_pcap
|
299
|
-
analysis_values = [anal_id, bin.sha, guestvm[0], dump.sha, get_time]
|
327
|
+
analysis_values = [anal_id, bin.sha, guestvm[0], dump.sha, Util.get_time, queueid]
|
300
328
|
|
301
329
|
if pcaprid.nil? || bin.dir_pcap.nil? || bin.sha.nil? || bin.md5.nil?
|
302
330
|
LOGGER.error "VSM", "VM#{guestvm[0]} Can't retrieve the required information"
|
@@ -304,7 +332,7 @@ module Dorothy
|
|
304
332
|
end
|
305
333
|
|
306
334
|
|
307
|
-
LOGGER.debug "DB", "VM#{guestvm[0]} Database insert phase"
|
335
|
+
LOGGER.debug "DB", "VM#{guestvm[0]} Database insert phase"
|
308
336
|
|
309
337
|
db.begin_t #needed for rollbacks
|
310
338
|
in_transaction = true
|
@@ -324,9 +352,9 @@ module Dorothy
|
|
324
352
|
end
|
325
353
|
|
326
354
|
@procs.each_key do |pid|
|
327
|
-
@procs[pid]["endTime"] ? end_time = get_time(@procs[pid]["endTime"]) : end_time = "null"
|
355
|
+
@procs[pid]["endTime"] ? end_time = Util.get_time(@procs[pid]["endTime"]) : end_time = "null"
|
328
356
|
@procs[pid]["exitCode"] ? exit_code = @procs[pid]["exitCode"] : exit_code = "null"
|
329
|
-
sys_procs_values = [anal_id, pid, @procs[pid]["pname"], @procs[pid]["owner"], @procs[pid]["cmdLine"], get_time(@procs[pid]["startTime"]), end_time, exit_code ]
|
357
|
+
sys_procs_values = [anal_id, pid, @procs[pid]["pname"], @procs[pid]["owner"], @procs[pid]["cmdLine"], Util.get_time(@procs[pid]["startTime"]), end_time, exit_code ]
|
330
358
|
unless db.insert("sys_procs", sys_procs_values)
|
331
359
|
LOGGER.fatal "DB", vm_log_header + "Error while inserting data into table sys_procs. Skipping binary #{bin.md5}"
|
332
360
|
raise "DB-ERROR"
|
@@ -336,33 +364,33 @@ module Dorothy
|
|
336
364
|
|
337
365
|
#TODO ADD RT CODE
|
338
366
|
|
367
|
+
|
339
368
|
db.commit
|
340
369
|
in_transaction = false
|
341
370
|
db.close
|
342
371
|
|
343
|
-
LOGGER.info "VSM", vm_log_header + "Removing file from /bins directory"
|
344
|
-
FileUtils.rm(bin.binpath)
|
345
372
|
LOGGER.info "VSM", vm_log_header + "Process compleated successfully"
|
346
373
|
|
347
|
-
rescue SignalException
|
374
|
+
rescue SignalException #, RuntimeError
|
348
375
|
LOGGER.warn "DOROTHY", "SIGINT".red + " Catched, exiting gracefully."
|
349
376
|
stop_nam_revertvm(@nam, pid, vsm, reverted, vm_log_header)
|
350
377
|
LOGGER.debug "VSM", vm_log_header + "Removing working dir"
|
351
378
|
FileUtils.rm_r(sample_home)
|
379
|
+
|
352
380
|
if in_transaction
|
353
381
|
db.rollback #rollback in case there is a transaction on going
|
354
382
|
db.close
|
355
383
|
end
|
356
384
|
|
385
|
+
raise
|
357
386
|
rescue Exception => e
|
358
387
|
LOGGER.error "VSM", vm_log_header + "An error occurred while analyzing #{bin.filename}, skipping\n"
|
359
|
-
LOGGER.debug "
|
388
|
+
LOGGER.debug "Analyser" , "#{$!}\n #{e.inspect} \n #{e.backtrace}"
|
360
389
|
|
361
|
-
LOGGER.warn "
|
390
|
+
LOGGER.warn "Analyser", vm_log_header + "Stopping NAM instances if presents, reverting the Sandbox, and removing working directory"
|
362
391
|
|
363
392
|
stop_nam_revertvm(@nam, pid, vsm, reverted, vm_log_header)
|
364
393
|
LOGGER.debug "VSM", vm_log_header + "Removing working dir"
|
365
|
-
|
366
394
|
FileUtils.rm_r(sample_home)
|
367
395
|
|
368
396
|
if in_transaction
|
@@ -371,228 +399,240 @@ module Dorothy
|
|
371
399
|
end
|
372
400
|
|
373
401
|
LOGGER.warn "VSM", vm_log_header + "Recover finished."
|
374
|
-
|
402
|
+
false
|
375
403
|
|
376
404
|
end
|
377
405
|
|
378
406
|
end
|
379
407
|
|
380
|
-
#Stop NAM instance and Revert VM
|
381
|
-
def stop_nam_revertvm(nam, pid, vsm, reverted, vm_log_header)
|
382
408
|
|
383
|
-
if pid
|
384
|
-
LOGGER.info "VSM", vm_log_header + " Stopping sniffing module " + pid.to_s
|
385
|
-
nam.stop_sniffer(pid)
|
386
|
-
end
|
387
409
|
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
sleep 3 #wait some seconds for letting the vm revert..
|
392
|
-
end
|
393
|
-
end
|
410
|
+
#########################
|
411
|
+
## MAIN #
|
412
|
+
#########################
|
394
413
|
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
LOGGER.error "VSM", "VM#{guestvm[0]} ".yellow + "An error occurred while performing the BASELINE run, please retry"
|
414
|
-
LOGGER.debug "Dorothy" , "VM#{guestvm[0]} ".yellow + "#{$!}\n #{e.inspect} \n #{e.backtrace}" if VERBOSE
|
415
|
-
LOGGER.warn "VSM", "VM#{guestvm[0]} ".yellow + "[RECOVER] Reverting VM"
|
416
|
-
vsm.revert_vm
|
417
|
-
db.free_vm(guestvm[0])
|
418
|
-
db.close
|
419
|
-
end
|
420
|
-
else
|
421
|
-
LOGGER.fatal "VSM", "[CRITICAL]".red + " There are no free VM at the moment..how it is possible?"
|
422
|
-
end
|
423
|
-
end
|
414
|
+
def self.start(daemon=false)
|
415
|
+
@vtotal_threads = []
|
416
|
+
@analysis_threads = []
|
417
|
+
@bins = []
|
418
|
+
@db = Insertdb.new
|
419
|
+
|
420
|
+
|
421
|
+
LOGGER.info "Analyser", "Started".yellow
|
422
|
+
|
423
|
+
|
424
|
+
#Creating a new NAM object for managing the sniffer
|
425
|
+
@nam = Doro_NAM.new(DoroSettings.nam)
|
426
|
+
#Be sure that there are no open tcpdump instances opened
|
427
|
+
@nam.init_sniffer
|
428
|
+
|
429
|
+
|
430
|
+
finish = false
|
431
|
+
infinite = true
|
424
432
|
|
425
|
-
|
426
|
-
|
427
|
-
########################
|
428
|
-
private
|
429
|
-
def scan(bin)
|
430
|
-
#puts "TOTAL", "Forking for VTOTAL"
|
431
|
-
@vtotal_threads << Thread.new(bin.sha) {
|
432
|
-
LOGGER.info "VTOTAL", "Scanning file #{bin.md5}".yellow
|
433
|
+
#be sure that all the vm are available by forcing their release
|
434
|
+
@db.vm_init
|
433
435
|
|
434
|
-
|
435
|
-
|
436
|
+
#Check if the are some analysis pending in the queue
|
437
|
+
unless @db.analysis_queue_pull.empty? || daemon
|
438
|
+
LOGGER.warn "WARNING", "There are some pending analyses in the queue, what do you want to do?"
|
439
|
+
menu="
|
440
|
+
--------------------------------------
|
441
|
+
#{"1".yellow}) Mark as analysed and continue
|
442
|
+
#{"2".yellow}) Append the new files and analyse also the pending ones
|
443
|
+
#{"3".yellow}) List pending analyses
|
444
|
+
--------------------------------------
|
445
|
+
Select a nuber:"
|
446
|
+
|
447
|
+
print menu
|
448
|
+
$stdout.flush
|
449
|
+
answer = gets.chop
|
450
|
+
|
451
|
+
until finish
|
452
|
+
case answer
|
453
|
+
when "1" then
|
454
|
+
@db.analysis_queue_mark_all
|
455
|
+
LOGGER.info "Analyser", "Queue Cleared, proceding.."
|
456
|
+
finish = true
|
436
457
|
|
437
|
-
|
458
|
+
when "2"
|
459
|
+
LOGGER.info "Analyser", "Proceding.."
|
460
|
+
finish = true
|
438
461
|
|
439
|
-
|
462
|
+
when "3"
|
463
|
+
@db.analysis_queue_view
|
440
464
|
|
441
|
-
|
442
|
-
|
443
|
-
|
465
|
+
else
|
466
|
+
LOGGER.warn "Analyser", "There are some pending analyses in the queue, what do you want to do?"
|
467
|
+
print menu
|
468
|
+
$stdout.flush
|
444
469
|
end
|
445
470
|
|
446
|
-
|
447
|
-
|
471
|
+
answer = gets.chop unless finish
|
472
|
+
end
|
473
|
+
end
|
474
|
+
|
475
|
+
|
476
|
+
begin
|
477
|
+
while infinite #infinite loop
|
448
478
|
|
449
|
-
LOGGER.info "VTOTAL", "Updating DB"
|
450
|
-
vtvalues = [bin.sha, vt.family, vt.vendor, vt.version, vt.rate, vt.updated, vt.detected]
|
451
|
-
db = Insertdb.new
|
452
|
-
db.begin
|
453
479
|
begin
|
454
|
-
db.
|
455
|
-
|
456
|
-
rescue
|
457
|
-
|
458
|
-
|
480
|
+
start_analysis(@db.analysis_queue_pull)
|
481
|
+
infinite = daemon #exit if wasn't set
|
482
|
+
rescue SignalException #, RuntimeError
|
483
|
+
LOGGER.warn "DOROTHY", "SIGINT".red + " Catched [2], exiting gracefully."
|
484
|
+
stop_running_analyses
|
485
|
+
Process.kill('HUP',Process.pid)
|
459
486
|
end
|
460
487
|
|
461
|
-
#
|
462
|
-
|
488
|
+
# Sleeping a while if -d wasn't set, then quit.
|
489
|
+
if daemon
|
490
|
+
LOGGER.info "Analyser", "SLEEPING" if DEBUG
|
491
|
+
sleep DoroSettings.env[:sleeptime].to_i
|
492
|
+
end
|
463
493
|
|
464
|
-
|
494
|
+
wait_end #TODO: is really required (here)?
|
465
495
|
|
496
|
+
end
|
497
|
+
rescue SignalException #, RuntimeError
|
498
|
+
LOGGER.warn "DOROTHY", "SIGINT".red + " Catched [3], exiting gracefully."
|
499
|
+
end
|
500
|
+
@db.close
|
466
501
|
|
502
|
+
end
|
467
503
|
|
468
|
-
|
469
|
-
## MAIN #
|
470
|
-
#########################
|
504
|
+
def wait_end
|
471
505
|
|
472
|
-
|
506
|
+
unless @vtotal_threads.empty?
|
507
|
+
@vtotal_threads.each { |aThread| aThread.join}
|
508
|
+
LOGGER.info "VTOTAL","Process compleated successfully" if DEBUG
|
509
|
+
end
|
473
510
|
|
474
|
-
|
475
|
-
|
476
|
-
@db = Insertdb.new
|
511
|
+
@analysis_threads.each { |aThread| aThread.join }
|
512
|
+
LOGGER.info "Analyser", "Process finished" if DEBUG
|
477
513
|
|
478
|
-
|
514
|
+
end
|
479
515
|
|
480
|
-
|
516
|
+
############# END OF MAIN
|
481
517
|
|
482
518
|
|
483
|
-
LOGGER.info "Dorothy", "Started".yellow
|
484
519
|
|
485
|
-
if daemon
|
486
|
-
check_pid_file DoroSettings.env[:pidfile]
|
487
|
-
puts "[" + "+".red + "] " + "[Dorothy]".yellow + " Going in backround with pid #{Process.pid}"
|
488
|
-
puts "[" + "+".red + "] " + "[Dorothy]".yellow + " Logging on #{DoroSettings.env[:logfile]}"
|
489
|
-
Process.daemon
|
490
|
-
create_pid_file DoroSettings.env[:pidfile]
|
491
|
-
puts "[" + "+".red + "] " + "[Dorothy]".yellow + " Going in backround with pid #{Process.pid}"
|
492
|
-
end
|
493
520
|
|
494
|
-
#Creating a new NAM object for managing the sniffer
|
495
|
-
@nam = Doro_NAM.new(DoroSettings.nam)
|
496
|
-
#Be sure that there are no open tcpdump instances opened
|
497
|
-
@nam.init_sniffer
|
498
521
|
|
499
522
|
|
500
523
|
|
501
|
-
infinite = true
|
502
524
|
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
if source # a source has been specified
|
507
|
-
while infinite #infinite loop
|
508
|
-
dfm = DorothyFetcher.new(source)
|
509
|
-
start_analysis(dfm.bins)
|
510
|
-
infinite = daemon #exit if wasn't set
|
511
|
-
wait_end
|
512
|
-
LOGGER.info "Dorothy", "SLEEPING" if daemon
|
513
|
-
sleep DoroSettings.env[:dtimeout] if daemon # Sleeping a while if -d wasn't set, then quit.
|
514
|
-
end
|
515
|
-
else # no sources specified, analyze all of them
|
516
|
-
while infinite #infinite loop
|
517
|
-
sources = YAML.load_file(DoroSettings.env[:home] + '/etc/sources.yml')
|
518
|
-
sources.keys.each do |sname|
|
519
|
-
dfm = DorothyFetcher.new(sources[sname])
|
520
|
-
start_analysis(dfm.bins)
|
521
|
-
end
|
522
|
-
infinite = daemon #exit if wasn't set
|
523
|
-
wait_end
|
524
|
-
LOGGER.info "Dorothy", "SLEEPING" if daemon
|
525
|
-
sleep DoroSettings.env[:dtimeout].to_i if daemon # Sleeping a while if -d wasn't set, then quit.
|
526
|
-
end
|
527
|
-
end
|
525
|
+
#Stop NAM instance and Revert VM
|
526
|
+
def stop_nam_revertvm(nam, pid, vsm, reverted, vm_log_header)
|
528
527
|
|
529
|
-
|
528
|
+
if pid
|
529
|
+
LOGGER.info "VSM", vm_log_header + " Stopping sniffing module " + pid.to_s
|
530
|
+
nam.stop_sniffer(pid)
|
531
|
+
end
|
530
532
|
|
533
|
+
unless reverted || vsm.nil?
|
534
|
+
LOGGER.info "VSM", vm_log_header + " Reverting VM"
|
535
|
+
vsm.revert_vm
|
536
|
+
sleep 3 #wait some seconds for letting the vm revert..
|
531
537
|
end
|
538
|
+
end
|
532
539
|
|
533
|
-
def wait_end
|
534
540
|
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
541
|
+
#Check the sample's md5 hash with VirusTotal
|
542
|
+
def scan(bin)
|
543
|
+
#puts "TOTAL", "Forking for VTOTAL"
|
544
|
+
@vtotal_threads << Thread.new(bin.sha) {
|
545
|
+
LOGGER.info "VTOTAL", "Scanning file #{bin.md5}".yellow
|
539
546
|
|
540
|
-
|
541
|
-
LOGGER.info "Dorothy", "Process finished"
|
547
|
+
vt_results = Vtotal.check_hash(bin.md5)
|
542
548
|
|
543
|
-
|
549
|
+
if vt_results != false
|
550
|
+
|
551
|
+
LOGGER.info "VTOTAL", vt_results[:rate]
|
552
|
+
db = Insertdb.new
|
553
|
+
db.begin_t
|
544
554
|
|
545
|
-
def check_pid_file(file)
|
546
|
-
if File.exist? file
|
547
|
-
# If we get Errno::ESRCH then process does not exist and
|
548
|
-
# we can safely cleanup the pid file.
|
549
|
-
pid = File.read(file).to_i
|
550
555
|
begin
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
556
|
+
@id = db.get_curr_malwares_id
|
557
|
+
vtvalues = [bin.sha, vt_results[:rate], vt_results[:positive], vt_results[:date], vt_results[:link], @id]
|
558
|
+
db.insert("malwares", vtvalues)
|
559
|
+
|
560
|
+
#Instert DB
|
561
|
+
vt_results[:results].each do |av|
|
562
|
+
vendor = av[0]
|
563
|
+
if av[1]["detected"]
|
564
|
+
family = av[1]["result"]
|
565
|
+
updated = (av[1]["update"] != "-" ? av[1]["update"] : "null")
|
566
|
+
version = (av[1]["version"] != "-" ? av[1]["version"] : "null")
|
567
|
+
vtvalues = [@id, vendor, family, version, updated]
|
568
|
+
db.insert("av_signs", vtvalues)
|
569
|
+
end
|
570
|
+
end
|
555
571
|
|
556
|
-
|
557
|
-
|
558
|
-
|
572
|
+
rescue => e
|
573
|
+
LOGGER.debug "VTOTAL" , "#{$!}\n #{e.inspect} \n #{e.backtrace}"
|
574
|
+
db.rollback
|
559
575
|
end
|
576
|
+
db.commit
|
577
|
+
db.close
|
560
578
|
end
|
561
|
-
|
579
|
+
}
|
580
|
+
end
|
562
581
|
|
563
|
-
def create_pid_file(file)
|
564
|
-
File.open(file, "w") { |f| f.puts Process.pid }
|
565
582
|
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
583
|
+
###Create Baseline
|
584
|
+
def self.run_baseline(profile)
|
585
|
+
db = Insertdb.new
|
586
|
+
db.vm_init
|
587
|
+
prof_info = profile[1]
|
588
|
+
guestvm = db.find_vm(prof_info['OS']['type'], prof_info['OS']['version'], prof_info['OS']['lang'])
|
589
|
+
if guestvm
|
590
|
+
begin
|
591
|
+
LOGGER.info "VSM","VM#{guestvm[0]}".red + " Executng the baseline run"
|
592
|
+
vsm = Doro_VSM::ESX.new(DoroSettings.esx[:host],DoroSettings.esx[:user],DoroSettings.esx[:pass],guestvm[1], guestvm[3], guestvm[4])
|
593
|
+
LOGGER.info "VSM","VM#{guestvm[0]}".red + " Sleeping #{prof_info['sleeptime']} seconds".yellow
|
594
|
+
sleep prof_info['sleeptime']
|
595
|
+
vsm.get_running_procs(nil, true, "#{DoroSettings.env[:home]}/etc/#{profile[0]}_baseline_procs.yml") #save on file
|
596
|
+
LOGGER.info "VSM", "VM#{guestvm[0]} ".red + "Reverting VM".yellow
|
597
|
+
vsm.revert_vm
|
598
|
+
db.free_vm(guestvm[0])
|
599
|
+
db.close
|
600
|
+
rescue => e
|
601
|
+
LOGGER.error "VSM", "VM#{guestvm[0]} ".yellow + "An error occurred while performing the BASELINE run, please retry"
|
602
|
+
LOGGER.debug "Analyser" , "VM#{guestvm[0]} ".yellow + "#{$!}\n #{e.inspect} \n #{e.backtrace}"
|
603
|
+
LOGGER.warn "VSM", "VM#{guestvm[0]} ".yellow + "[RECOVER] Reverting VM"
|
604
|
+
vsm.revert_vm #TODO vsm var might be nil here
|
605
|
+
db.free_vm(guestvm[0])
|
606
|
+
db.close
|
572
607
|
end
|
608
|
+
else
|
609
|
+
LOGGER.fatal "VSM", "[CRITICAL]".red + " There are no free VM at the moment..how it is possible?"
|
573
610
|
end
|
611
|
+
end
|
574
612
|
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
pid_file = DoroSettings.env[:pidfile]
|
588
|
-
if pid_file and File.exist? pid_file
|
589
|
-
pid = Integer(File.read(pid_file))
|
590
|
-
Process.kill(-2,-pid)
|
591
|
-
LOGGER.info "Dorothy", "Process #{pid} terminated"
|
592
|
-
puts "[" + "+".red + "]" + " Dorothy Process #{pid} terminated"
|
593
|
-
else
|
594
|
-
LOGGER.info "Dorothy", "Can't find PID file, is Dorothy really running?"
|
595
|
-
end
|
613
|
+
|
614
|
+
#Check if the sample extension is supported (= is configured into the extension.yml).
|
615
|
+
def check_support(bin, qentry, profile)
|
616
|
+
if profile[1]['extensions'].key?(bin.extension)
|
617
|
+
true
|
618
|
+
else
|
619
|
+
db = Insertdb.new #TODO too many db sessions opened. review, and try to use less
|
620
|
+
db.analysis_queue_mark(qentry, "error")
|
621
|
+
db.close
|
622
|
+
LOGGER.warn("VSM", "File extension #{bin.extension} currently not configured in the selected profile #{profile[0]}, skipping")
|
623
|
+
LOGGER.debug("VSM", "Filtype: #{bin.type}")
|
624
|
+
false
|
596
625
|
end
|
626
|
+
end
|
627
|
+
|
597
628
|
|
629
|
+
|
630
|
+
def self.stop_running_analyses
|
631
|
+
LOGGER.info "Analyser", "Killing curent live analysis threads.."
|
632
|
+
@analysis_threads.each { |aThread|
|
633
|
+
aThread.raise
|
634
|
+
aThread.join
|
635
|
+
}
|
598
636
|
end
|
637
|
+
|
638
|
+
end
|