arachni 0.3 → 0.4
Sign up to get free protection for your applications and to get access to all the features.
- data/ACKNOWLEDGMENTS.md +1 -1
- data/CHANGELOG.md +146 -0
- data/CONTRIBUTORS.md +1 -0
- data/HACKING.md +3 -3
- data/README.md +81 -49
- data/Rakefile +11 -14
- data/bin/arachni +4 -8
- data/bin/arachni_rpc +17 -0
- data/bin/arachni_rpcd +18 -0
- data/bin/arachni_rpcd_monitor +18 -0
- data/bin/arachni_web +25 -48
- data/bin/arachni_web_autostart +3 -3
- data/conf/README.webui.yaml.txt +7 -21
- data/external/metasploit/plugins/arachni.rb +0 -7
- data/extras/modules/recon/raft_dirs.rb +108 -0
- data/extras/modules/recon/raft_dirs/raft-large-directories.txt +62290 -0
- data/extras/modules/recon/raft_files.rb +110 -0
- data/extras/modules/recon/raft_files/raft-large-files.txt +37037 -0
- data/extras/modules/recon/svn_digger_dirs.rb +108 -0
- data/extras/modules/recon/svn_digger_dirs/Licence.txt +674 -0
- data/extras/modules/recon/svn_digger_dirs/ReadMe-Arachni.txt +4 -0
- data/extras/modules/recon/svn_digger_dirs/ReadMe.txt +6 -0
- data/extras/modules/recon/svn_digger_dirs/all-dirs.txt +5960 -0
- data/extras/modules/recon/svn_digger_files.rb +114 -0
- data/extras/modules/recon/svn_digger_files/Licence.txt +674 -0
- data/extras/modules/recon/svn_digger_files/ReadMe-Arachni.txt +4 -0
- data/extras/modules/recon/svn_digger_files/ReadMe.txt +6 -0
- data/extras/modules/recon/svn_digger_files/all-extensionless.txt +25419 -0
- data/extras/modules/recon/svn_digger_files/all.txt +43135 -0
- data/lib/arachni.rb +2 -7
- data/lib/{audit_store.rb → arachni/audit_store.rb} +68 -60
- data/lib/{component_manager.rb → arachni/component_manager.rb} +8 -8
- data/lib/{component_options.rb → arachni/component_options.rb} +34 -4
- data/lib/{crypto → arachni/crypto}/rsa_aes_cbc.rb +1 -2
- data/lib/arachni/database.rb +4 -0
- data/lib/arachni/database/base.rb +125 -0
- data/lib/arachni/database/hash.rb +384 -0
- data/lib/arachni/database/queue.rb +93 -0
- data/lib/{exceptions.rb → arachni/exceptions.rb} +1 -1
- data/lib/arachni/framework.rb +899 -0
- data/lib/{http.rb → arachni/http.rb} +63 -166
- data/lib/{issue.rb → arachni/issue.rb} +46 -17
- data/lib/{mixins → arachni/mixins}/observable.rb +1 -1
- data/lib/arachni/mixins/progress_bar.rb +81 -0
- data/lib/arachni/mixins/terminal.rb +106 -0
- data/lib/{module.rb → arachni/module.rb} +0 -0
- data/lib/{module → arachni/module}/auditor.rb +250 -86
- data/lib/{module → arachni/module}/base.rb +8 -18
- data/lib/{module → arachni/module}/element_db.rb +10 -2
- data/lib/{module → arachni/module}/key_filler.rb +1 -1
- data/lib/arachni/module/manager.rb +145 -0
- data/lib/{module → arachni/module}/output.rb +6 -1
- data/lib/{module → arachni/module}/trainer.rb +48 -52
- data/lib/{module → arachni/module}/utilities.rb +66 -15
- data/lib/{nokogiri → arachni/nokogiri}/xml/node.rb +0 -0
- data/lib/arachni/options.rb +986 -0
- data/lib/{parser.rb → arachni/parser.rb} +0 -0
- data/lib/{parser → arachni/parser}/auditable.rb +111 -32
- data/lib/{parser → arachni/parser}/elements.rb +28 -20
- data/lib/{parser → arachni/parser}/page.rb +20 -3
- data/lib/{parser → arachni/parser}/parser.rb +100 -63
- data/lib/{plugin.rb → arachni/plugin.rb} +0 -0
- data/lib/{plugin → arachni/plugin}/base.rb +43 -6
- data/lib/{plugin → arachni/plugin}/manager.rb +40 -13
- data/lib/{report.rb → arachni/report.rb} +0 -0
- data/lib/{report → arachni/report}/base.rb +43 -2
- data/lib/{report → arachni/report}/manager.rb +7 -18
- data/lib/arachni/rpc/client/base.rb +42 -0
- data/lib/{rpc/xml → arachni/rpc}/client/dispatcher.rb +12 -13
- data/lib/arachni/rpc/client/instance.rb +62 -0
- data/lib/arachni/rpc/server/base.rb +51 -0
- data/lib/arachni/rpc/server/dispatcher.rb +438 -0
- data/lib/arachni/rpc/server/framework.rb +1163 -0
- data/lib/arachni/rpc/server/instance.rb +184 -0
- data/lib/{rpc/xml → arachni/rpc}/server/module/manager.rb +8 -5
- data/lib/arachni/rpc/server/node.rb +267 -0
- data/lib/{rpc/xml → arachni/rpc}/server/options.rb +6 -35
- data/lib/{rpc/xml → arachni/rpc}/server/output.rb +29 -3
- data/lib/{rpc/xml → arachni/rpc}/server/plugin/manager.rb +5 -6
- data/lib/{ruby.rb → arachni/ruby.rb} +1 -2
- data/lib/arachni/ruby/array.rb +31 -0
- data/lib/{ruby → arachni/ruby}/object.rb +1 -1
- data/lib/{ruby → arachni/ruby}/string.rb +1 -1
- data/lib/{spider.rb → arachni/spider.rb} +83 -110
- data/lib/arachni/typhoeus/hydra.rb +7 -0
- data/lib/{typhoeus → arachni/typhoeus}/request.rb +11 -9
- data/lib/{typhoeus → arachni/typhoeus}/response.rb +4 -0
- data/lib/{ui → arachni/ui}/cli/cli.rb +154 -84
- data/lib/{ui → arachni/ui}/cli/output.rb +57 -19
- data/lib/{ui/xmlrpc → arachni/ui/rpc}/dispatcher_monitor.rb +11 -10
- data/lib/{ui/xmlrpc/xmlrpc.rb → arachni/ui/rpc/rpc.rb} +102 -158
- data/lib/{ui → arachni/ui}/web/addon_manager.rb +23 -3
- data/lib/arachni/ui/web/addons/autodeploy.rb +207 -0
- data/lib/{ui → arachni/ui}/web/addons/autodeploy/lib/manager.rb +142 -35
- data/lib/arachni/ui/web/addons/autodeploy/views/index.erb +291 -0
- data/lib/{ui → arachni/ui}/web/addons/sample.rb +1 -1
- data/lib/{ui → arachni/ui}/web/addons/sample/views/index.erb +0 -0
- data/lib/{ui → arachni/ui}/web/addons/scheduler.rb +30 -22
- data/lib/{ui → arachni/ui}/web/addons/scheduler/views/index.erb +56 -22
- data/lib/{ui → arachni/ui}/web/addons/scheduler/views/options.erb +0 -0
- data/lib/arachni/ui/web/dispatcher_manager.rb +274 -0
- data/lib/arachni/ui/web/instance_manager.rb +69 -0
- data/lib/{ui → arachni/ui}/web/log.rb +1 -1
- data/lib/arachni/ui/web/output_stream.rb +54 -0
- data/lib/{ui → arachni/ui}/web/report_manager.rb +48 -54
- data/lib/{ui → arachni/ui}/web/scheduler.rb +42 -47
- data/lib/arachni/ui/web/server.rb +1197 -0
- data/lib/{ui → arachni/ui}/web/server/db/placeholder +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/banner.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/bodybg-small.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/bodybg.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/pbar-ani.gif +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_flat_0_aaaaaa_40x100.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_flat_75_ffffff_40x100.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_glass_55_fbf9ee_1x400.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_glass_65_ffffff_1x400.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_glass_75_dadada_1x400.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_glass_75_e6e6e6_1x400.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_glass_95_fef1ec_1x400.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-bg_highlight-soft_75_cccccc_1x100.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-icons_222222_256x240.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-icons_2e83ff_256x240.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-icons_454545_256x240.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-icons_888888_256x240.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/images/ui-icons_cd0a0a_256x240.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/css/smoothness/jquery-ui-1.8.9.custom.css +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/favicon.ico +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/footer.jpg +0 -0
- data/lib/{ui/web/server/public/icons/error.png → arachni/ui/web/server/public/icons/bad.png} +0 -0
- data/lib/arachni/ui/web/server/public/icons/error.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/icons/info.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/icons/ok.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/icons/status.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/js/jquery-1.4.4.min.js +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/js/jquery-ui-1.8.9.custom.min.js +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/js/jquery-ui-timepicker.js +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/logo.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/nav-left.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/nav-right.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/nav-selected-left.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/nav-selected-right.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/plugins/sample/style.css +0 -0
- data/lib/{ui/web/server/tmp → arachni/ui/web/server/public/reports}/placeholder +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/sidebar-bottom.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/sidebar-h4.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/sidebar-top.jpg +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/spider.png +0 -0
- data/lib/{ui → arachni/ui}/web/server/public/style.css +3 -2
- data/lib/arachni/ui/web/server/tmp/placeholder +0 -0
- data/lib/{ui → arachni/ui}/web/server/views/addon.erb +0 -0
- data/lib/{ui → arachni/ui}/web/server/views/addons.erb +0 -0
- data/lib/{ui → arachni/ui}/web/server/views/dispatcher_error.erb +0 -0
- data/lib/arachni/ui/web/server/views/dispatchers.erb +175 -0
- data/lib/arachni/ui/web/server/views/dispatchers_edit.erb +71 -0
- data/lib/arachni/ui/web/server/views/error.erb +22 -0
- data/lib/{ui → arachni/ui}/web/server/views/flash.erb +2 -2
- data/lib/arachni/ui/web/server/views/home.erb +60 -0
- data/lib/{ui → arachni/ui}/web/server/views/instance.erb +55 -75
- data/lib/arachni/ui/web/server/views/js/home.erb +32 -0
- data/lib/{ui → arachni/ui}/web/server/views/layout.erb +2 -2
- data/lib/{ui → arachni/ui}/web/server/views/log.erb +0 -0
- data/lib/arachni/ui/web/server/views/module.erb +30 -0
- data/lib/{ui → arachni/ui}/web/server/views/modules.erb +2 -22
- data/lib/{ui → arachni/ui}/web/server/views/options.erb +0 -0
- data/lib/{ui → arachni/ui}/web/server/views/output_results.erb +4 -4
- data/lib/{ui → arachni/ui}/web/server/views/plugins.erb +23 -12
- data/lib/{ui → arachni/ui}/web/server/views/report_formats.erb +1 -1
- data/lib/{ui → arachni/ui}/web/server/views/reports.erb +1 -1
- data/lib/{ui → arachni/ui}/web/server/views/settings.erb +59 -16
- data/lib/{ui → arachni/ui}/web/server/views/welcome.erb +3 -1
- data/lib/{ui → arachni/ui}/web/utilities.rb +8 -3
- data/lib/arachni/version.rb +16 -0
- data/modules/audit/code_injection.rb +11 -20
- data/modules/audit/code_injection_timing.rb +2 -6
- data/modules/audit/csrf.rb +8 -16
- data/modules/audit/ldapi.rb +5 -11
- data/modules/audit/os_cmd_injection.rb +5 -9
- data/modules/audit/os_cmd_injection_timing.rb +4 -8
- data/modules/audit/path_traversal.rb +7 -13
- data/modules/audit/response_splitting.rb +8 -21
- data/modules/audit/rfi.rb +6 -46
- data/modules/audit/sqli.rb +5 -11
- data/modules/audit/sqli/regexp_ids.txt +0 -6
- data/modules/audit/sqli_blind_rdiff.rb +5 -10
- data/modules/audit/sqli_blind_timing.rb +4 -9
- data/modules/audit/trainer.rb +6 -12
- data/modules/audit/unvalidated_redirect.rb +6 -17
- data/modules/audit/xpath.rb +5 -12
- data/modules/audit/xss.rb +37 -23
- data/modules/audit/xss_event.rb +5 -10
- data/modules/audit/xss_path.rb +47 -41
- data/modules/audit/xss_script_tag.rb +5 -10
- data/modules/audit/xss_tag.rb +5 -10
- data/modules/audit/xss_uri.rb +17 -89
- data/modules/recon/allowed_methods.rb +6 -15
- data/modules/recon/backdoors.rb +12 -52
- data/modules/recon/backup_files.rb +25 -88
- data/modules/recon/common_directories.rb +8 -54
- data/modules/recon/common_files.rb +7 -58
- data/modules/recon/directory_listing.rb +6 -15
- data/modules/recon/grep/captcha.rb +1 -1
- data/modules/recon/grep/credit_card.rb +62 -27
- data/modules/recon/grep/cvs_svn_users.rb +1 -1
- data/modules/recon/grep/emails.rb +1 -1
- data/modules/recon/grep/html_objects.rb +1 -1
- data/modules/recon/grep/private_ip.rb +1 -1
- data/modules/recon/grep/ssn.rb +9 -9
- data/modules/recon/htaccess_limit.rb +6 -14
- data/modules/recon/http_put.rb +7 -15
- data/modules/recon/interesting_responses.rb +7 -13
- data/modules/recon/mixed_resource.rb +100 -0
- data/modules/recon/unencrypted_password_forms.rb +8 -20
- data/modules/recon/webdav.rb +6 -16
- data/modules/recon/xst.rb +7 -13
- data/path_extractors/anchors.rb +1 -1
- data/path_extractors/forms.rb +1 -1
- data/path_extractors/frames.rb +1 -1
- data/path_extractors/generic.rb +47 -3
- data/path_extractors/links.rb +1 -1
- data/path_extractors/meta_refresh.rb +1 -1
- data/path_extractors/scripts.rb +3 -4
- data/path_extractors/sitemap.rb +1 -1
- data/plugins/autologin.rb +9 -18
- data/plugins/beep_notify.rb +51 -0
- data/plugins/cookie_collector.rb +12 -12
- data/plugins/defaults/autothrottle.rb +86 -0
- data/plugins/{content_types.rb → defaults/content_types.rb} +25 -19
- data/plugins/{healthmap.rb → defaults/healthmap.rb} +30 -18
- data/plugins/defaults/metamodules/remedies/discovery.rb +164 -0
- data/plugins/defaults/metamodules/remedies/manual_verification.rb +65 -0
- data/{metamodules/timeout_notice.rb → plugins/defaults/metamodules/remedies/timing_attacks.rb} +26 -22
- data/{metamodules → plugins/defaults/metamodules}/uniformity.rb +15 -14
- data/plugins/{profiler.rb → defaults/profiler.rb} +19 -30
- data/plugins/defaults/resolver.rb +55 -0
- data/plugins/email_notify.rb +108 -0
- data/plugins/form_dicattack.rb +8 -16
- data/plugins/http_dicattack.rb +4 -12
- data/plugins/libnotify.rb +86 -0
- data/plugins/proxy.rb +8 -17
- data/plugins/proxy/server.rb +3 -3
- data/plugins/rescan.rb +60 -0
- data/plugins/waf_detector.rb +5 -16
- data/profiles/full.afp +3 -30
- data/reports/afr.rb +2 -5
- data/reports/ap.rb +3 -1
- data/reports/html.rb +210 -68
- data/reports/html/default.erb +72 -1014
- data/reports/html/default/configuration.erb +126 -0
- data/reports/html/default/css/jquery-ui.css +570 -0
- data/reports/html/default/css/jquery.jqplot.min.css +1 -0
- data/reports/html/default/css/main.css +391 -0
- data/reports/html/default/issue.erb +189 -0
- data/reports/html/default/issues.erb +65 -0
- data/reports/html/default/js/charts.js +146 -0
- data/reports/html/default/js/helpers.js +95 -0
- data/reports/html/default/js/init.js +73 -0
- data/reports/html/default/js/lib/jqplot.barRenderer.min.js +57 -0
- data/reports/html/default/js/lib/jqplot.categoryAxisRenderer.min.js +57 -0
- data/reports/html/default/js/lib/jqplot.cursor.min.js +57 -0
- data/reports/html/default/js/lib/jqplot.pieRenderer.min.js +57 -0
- data/reports/html/default/js/lib/jqplot.pointLabels.min.js +57 -0
- data/reports/html/default/js/lib/jquery-ui.min.js +404 -0
- data/reports/html/default/js/lib/jquery.jqplot.min.js +57 -0
- data/reports/html/default/js/lib/jquery.min.js +167 -0
- data/reports/html/default/plugins.erb +22 -0
- data/reports/html/default/search.erb +8 -0
- data/reports/html/default/sitemap.erb +15 -0
- data/reports/html/default/summary.erb +68 -0
- data/reports/html/default/summary_issue.erb +19 -0
- data/reports/json.rb +51 -0
- data/reports/marshal.rb +49 -0
- data/reports/metareport.rb +4 -6
- data/reports/metareport/arachni_metareport.rb +1 -1
- data/reports/plugin_formatters/html/autologin.rb +30 -41
- data/reports/plugin_formatters/html/content_types.rb +1 -10
- data/reports/plugin_formatters/html/cookie_collector.rb +36 -44
- data/reports/plugin_formatters/html/discovery.rb +50 -0
- data/reports/plugin_formatters/html/form_dicattack.rb +24 -32
- data/reports/plugin_formatters/html/healthmap.rb +45 -54
- data/reports/plugin_formatters/html/http_dicattack.rb +24 -32
- data/reports/plugin_formatters/html/profiler.rb +17 -48
- data/reports/plugin_formatters/html/profiler/template.erb +6 -99
- data/reports/plugin_formatters/html/resolver.rb +63 -0
- data/reports/plugin_formatters/html/{metaformatters/timeout_notice.rb → timing_attacks.rb} +7 -19
- data/reports/plugin_formatters/html/{metaformatters/uniformity.rb → uniformity.rb} +5 -17
- data/reports/plugin_formatters/html/waf_detector.rb +24 -32
- data/reports/plugin_formatters/stdout/autologin.rb +30 -35
- data/reports/plugin_formatters/stdout/content_types.rb +41 -46
- data/reports/plugin_formatters/stdout/cookie_collector.rb +33 -38
- data/reports/plugin_formatters/stdout/discovery.rb +47 -0
- data/reports/plugin_formatters/stdout/form_dicattack.rb +27 -32
- data/reports/plugin_formatters/stdout/healthmap.rb +47 -51
- data/reports/plugin_formatters/stdout/http_dicattack.rb +27 -32
- data/reports/plugin_formatters/stdout/metamodules.rb +48 -55
- data/reports/plugin_formatters/stdout/profiler.rb +60 -65
- data/reports/plugin_formatters/stdout/resolver.rb +45 -0
- data/reports/plugin_formatters/stdout/{metaformatters/timeout_notice.rb → timing_attacks.rb} +6 -14
- data/reports/plugin_formatters/stdout/{metaformatters/uniformity.rb → uniformity.rb} +6 -14
- data/reports/plugin_formatters/stdout/waf_detector.rb +23 -28
- data/reports/plugin_formatters/xml/autologin.rb +36 -41
- data/reports/plugin_formatters/xml/content_types.rb +47 -52
- data/reports/plugin_formatters/xml/cookie_collector.rb +39 -44
- data/reports/plugin_formatters/xml/discovery.rb +54 -0
- data/reports/plugin_formatters/xml/form_dicattack.rb +22 -27
- data/reports/plugin_formatters/xml/healthmap.rb +53 -58
- data/reports/plugin_formatters/xml/http_dicattack.rb +22 -27
- data/reports/plugin_formatters/xml/profiler.rb +61 -77
- data/reports/plugin_formatters/xml/resolver.rb +53 -0
- data/reports/plugin_formatters/xml/{metaformatters/timeout_notice.rb → timing_attacks.rb} +3 -15
- data/reports/plugin_formatters/xml/{metaformatters/uniformity.rb → uniformity.rb} +4 -14
- data/reports/plugin_formatters/xml/waf_detector.rb +23 -28
- data/reports/stdout.rb +1 -1
- data/reports/txt.rb +2 -5
- data/reports/xml.rb +2 -5
- data/reports/xml/buffer.rb +6 -2
- data/reports/yaml.rb +49 -0
- metadata +419 -278
- data/bin/arachni_xmlrpc +0 -21
- data/bin/arachni_xmlrpcd +0 -82
- data/bin/arachni_xmlrpcd_monitor +0 -74
- data/getoptslong.rb +0 -242
- data/lib/anemone.rb +0 -2
- data/lib/framework.rb +0 -673
- data/lib/module/manager.rb +0 -111
- data/lib/options.rb +0 -547
- data/lib/rpc/xml/client/base.rb +0 -76
- data/lib/rpc/xml/client/instance.rb +0 -88
- data/lib/rpc/xml/server/base.rb +0 -112
- data/lib/rpc/xml/server/dispatcher.rb +0 -386
- data/lib/rpc/xml/server/framework.rb +0 -206
- data/lib/rpc/xml/server/instance.rb +0 -191
- data/lib/ruby/xmlrpc/server.rb +0 -27
- data/lib/ui/web/addons/autodeploy.rb +0 -172
- data/lib/ui/web/addons/autodeploy/views/index.erb +0 -124
- data/lib/ui/web/dispatcher_manager.rb +0 -165
- data/lib/ui/web/instance_manager.rb +0 -87
- data/lib/ui/web/output_stream.rb +0 -94
- data/lib/ui/web/server.rb +0 -925
- data/lib/ui/web/server/public/reports/placeholder +0 -1
- data/lib/ui/web/server/views/dispatchers.erb +0 -100
- data/lib/ui/web/server/views/dispatchers_edit.erb +0 -42
- data/lib/ui/web/server/views/error.erb +0 -1
- data/lib/ui/web/server/views/home.erb +0 -25
- data/metamodules/autothrottle.rb +0 -74
- data/plugins/metamodules.rb +0 -118
- data/profiles/comprehensive.afp +0 -74
- data/reports/plugin_formatters/html/metamodules.rb +0 -93
- data/reports/plugin_formatters/xml/metamodules.rb +0 -91
@@ -0,0 +1,1163 @@
|
|
1
|
+
=begin
|
2
|
+
Arachni
|
3
|
+
Copyright (c) 2010-2012 Tasos "Zapotek" Laskos <tasos.laskos@gmail.com>
|
4
|
+
|
5
|
+
This is free software; you can copy and distribute and modify
|
6
|
+
this program under the term of the GPL v2.0 License
|
7
|
+
(See LICENSE file for details)
|
8
|
+
|
9
|
+
=end
|
10
|
+
|
11
|
+
require 'ap'
|
12
|
+
require 'pp'
|
13
|
+
require 'em-synchrony'
|
14
|
+
|
15
|
+
module Arachni
|
16
|
+
|
17
|
+
require Options.instance.dir['lib'] + 'framework'
|
18
|
+
require Options.instance.dir['lib'] + 'rpc/server/module/manager'
|
19
|
+
require Options.instance.dir['lib'] + 'rpc/server/plugin/manager'
|
20
|
+
|
21
|
+
module RPC
|
22
|
+
class Server
|
23
|
+
|
24
|
+
#
|
25
|
+
# Wraps the framework of the local instance and the frameworks of all
|
26
|
+
# remote slaves (when in High Performance Grid mode) into a neat, little,
|
27
|
+
# easy to handle package.
|
28
|
+
#
|
29
|
+
# @author: Tasos "Zapotek" Laskos
|
30
|
+
# <tasos.laskos@gmail.com>
|
31
|
+
# <zapotek@segfault.gr>
|
32
|
+
# @version: 0.1
|
33
|
+
#
|
34
|
+
class Framework < ::Arachni::Framework
|
35
|
+
|
36
|
+
include Arachni::Module::Utilities
|
37
|
+
|
38
|
+
# make this inherited methods visible again
|
39
|
+
private :audit_store, :stats, :paused?, :lsmod, :lsplug, :version, :revision
|
40
|
+
public :audit_store, :stats, :paused?, :lsmod, :lsplug, :version, :revision
|
41
|
+
|
42
|
+
alias :old_clean_up! :clean_up!
|
43
|
+
alias :auditstore :audit_store
|
44
|
+
|
45
|
+
private :old_clean_up!
|
46
|
+
|
47
|
+
attr_reader :instances, :opts, :modules, :plugins
|
48
|
+
|
49
|
+
MAX_CONCURRENCY = 20
|
50
|
+
MIN_PAGES_PER_INSTANCE = 30
|
51
|
+
|
52
|
+
def initialize( opts )
|
53
|
+
super( opts )
|
54
|
+
|
55
|
+
@modules = Arachni::RPC::Server::Module::Manager.new( opts )
|
56
|
+
@plugins = Arachni::RPC::Server::Plugin::Manager.new( self )
|
57
|
+
|
58
|
+
# holds all running instances
|
59
|
+
@instances = []
|
60
|
+
|
61
|
+
@crawling_done = nil
|
62
|
+
@override_sitemap = []
|
63
|
+
|
64
|
+
# if we're a slave this var will hold the URL of our master
|
65
|
+
@master_url = ''
|
66
|
+
|
67
|
+
@local_token = gen_token
|
68
|
+
end
|
69
|
+
|
70
|
+
#
|
71
|
+
# Returns the results of the plug-ins
|
72
|
+
#
|
73
|
+
# @return [Hash] plugin name => result
|
74
|
+
#
|
75
|
+
def get_plugin_store
|
76
|
+
@plugin_store
|
77
|
+
end
|
78
|
+
|
79
|
+
#
|
80
|
+
# Returns true if the system is scanning, false if {#run} hasn't been called yet or
|
81
|
+
# if the scan has finished.
|
82
|
+
#
|
83
|
+
# @param [Bool] include_slaves take slave status into account too? <br/>
|
84
|
+
# If so, it will only return false if slaves
|
85
|
+
# are done too.
|
86
|
+
#
|
87
|
+
# @param [Proc] &block block to which to pass the result
|
88
|
+
#
|
89
|
+
def busy?( include_slaves = true, &block )
|
90
|
+
|
91
|
+
busyness = [ extended_running? ]
|
92
|
+
|
93
|
+
if @instances.empty? || !include_slaves
|
94
|
+
block.call( busyness[0] ) if block_given?
|
95
|
+
return
|
96
|
+
end
|
97
|
+
|
98
|
+
::EM::Iterator.new( @instances, @instances.size ).map( proc {
|
99
|
+
|instance, iter|
|
100
|
+
connect_to_instance( instance ).framework.busy? {
|
101
|
+
|res|
|
102
|
+
iter.return( res )
|
103
|
+
}
|
104
|
+
}, proc {
|
105
|
+
|res|
|
106
|
+
busyness << res
|
107
|
+
busyness.flatten!
|
108
|
+
block.call( !busyness.reject{ |is_busy| !is_busy }.empty? )
|
109
|
+
})
|
110
|
+
end
|
111
|
+
|
112
|
+
#
|
113
|
+
# Returns an array containing information about all available plug-ins.
|
114
|
+
#
|
115
|
+
# @return [Array<Hash>]
|
116
|
+
#
|
117
|
+
def lsplug
|
118
|
+
plug_info = []
|
119
|
+
|
120
|
+
super.each {
|
121
|
+
|plugin|
|
122
|
+
|
123
|
+
plugin[:options] = [plugin[:options]].flatten.compact.map {
|
124
|
+
|opt|
|
125
|
+
opt.to_h.merge( 'type' => opt.type )
|
126
|
+
}
|
127
|
+
|
128
|
+
plug_info << plugin
|
129
|
+
}
|
130
|
+
|
131
|
+
return plug_info
|
132
|
+
end
|
133
|
+
|
134
|
+
#
|
135
|
+
# Returns true if running in HPG (High Performance Grid) mode and we're the master,
|
136
|
+
# false otherwise.
|
137
|
+
#
|
138
|
+
# @return [Bool]
|
139
|
+
#
|
140
|
+
def high_performance?
|
141
|
+
@opts.grid_mode == 'high_performance'
|
142
|
+
end
|
143
|
+
|
144
|
+
#
|
145
|
+
# Starts the audit.
|
146
|
+
#
|
147
|
+
# @return [Bool] false if already running, true otherwise
|
148
|
+
#
|
149
|
+
def run
|
150
|
+
# return if we're already running
|
151
|
+
return false if extended_running?
|
152
|
+
|
153
|
+
# EventMachine.add_periodic_timer( 1 ) do
|
154
|
+
# print "Arachni::RPC::Client::Handler objects: "
|
155
|
+
# puts ObjectSpace.each_object( Arachni::RPC::Client::Handler ) {}
|
156
|
+
#
|
157
|
+
# print "Arachni::RPC::Server::Proxy objects: "
|
158
|
+
# puts ObjectSpace.each_object( Arachni::RPC::Server::Proxy ) {}
|
159
|
+
#
|
160
|
+
# puts "Active connections: #{::EM.connection_count}"
|
161
|
+
# puts '--------------------------------------------'
|
162
|
+
# end
|
163
|
+
|
164
|
+
@extended_running = true
|
165
|
+
::EM.defer {
|
166
|
+
|
167
|
+
#
|
168
|
+
# if we're in HPG mode do fancy stuff like distributing and balancing workload
|
169
|
+
# as well as starting slave instances and deal with some lower level
|
170
|
+
# operations of the local instance like running plug-ins etc...
|
171
|
+
#
|
172
|
+
# otherwise just run the local instance, nothing special...
|
173
|
+
#
|
174
|
+
if high_performance?
|
175
|
+
|
176
|
+
#
|
177
|
+
# We're in HPG (High Performance Grid) mode,
|
178
|
+
# things are going to get weird...
|
179
|
+
#
|
180
|
+
|
181
|
+
# we'll need analyze the pages prior to assigning
|
182
|
+
# them to each instance at the element level so as to gain
|
183
|
+
# more granular control over the assigned workload
|
184
|
+
#
|
185
|
+
# put simply, we'll need to perform some magic in order
|
186
|
+
# to prevent different instances from auditing the same elements
|
187
|
+
# and wasting bandwidth
|
188
|
+
#
|
189
|
+
# for example: search forms, logout links and the like will
|
190
|
+
# most likely exist on most pages of the site and since each
|
191
|
+
# instance is assigned a set of URLs/pages to audit they will end up
|
192
|
+
# with common elements so we have to prevent instances from
|
193
|
+
# performing identical checks.
|
194
|
+
#
|
195
|
+
# interesting note: should previously unseen elements dynamically
|
196
|
+
# appear during the audit they will override these restrictions
|
197
|
+
# and each instance will audit them at will.
|
198
|
+
#
|
199
|
+
pages = ::Arachni::Database::Hash.new
|
200
|
+
|
201
|
+
# prepare the local instance (runs plugins and start the timer)
|
202
|
+
prepare
|
203
|
+
|
204
|
+
# we need to take our cues from the local framework as some
|
205
|
+
# plug-ins may need the system to wait for them to finish
|
206
|
+
# before moving on.
|
207
|
+
sleep( 0.2 ) while paused?
|
208
|
+
|
209
|
+
# start the crawl and extract all paths
|
210
|
+
Arachni::Spider.new( @opts ).run {
|
211
|
+
|page|
|
212
|
+
@override_sitemap << page.url
|
213
|
+
pages[page.url] = page
|
214
|
+
}
|
215
|
+
@crawling_done = true
|
216
|
+
|
217
|
+
# the plug-ins may have update the framework page queue
|
218
|
+
# so we need to distribute these pages as well
|
219
|
+
page_a = []
|
220
|
+
page_q = @page_queue
|
221
|
+
while !page_q.empty? && page = page_q.pop
|
222
|
+
page_a << page
|
223
|
+
pages[page.url] = page
|
224
|
+
end
|
225
|
+
|
226
|
+
# get the Dispatchers with unique Pipe IDs
|
227
|
+
# in order to take advantage of line aggregation
|
228
|
+
prefered_dispatchers {
|
229
|
+
|pref_dispatchers|
|
230
|
+
|
231
|
+
# split the URLs of the pages in equal chunks
|
232
|
+
chunks = split_urls( pages.keys, pref_dispatchers )
|
233
|
+
chunk_cnt = chunks.size
|
234
|
+
|
235
|
+
if chunk_cnt > 0
|
236
|
+
# split the page array into chunks that will be distributed
|
237
|
+
# across the instances
|
238
|
+
page_chunks = page_a.chunk( chunk_cnt )
|
239
|
+
|
240
|
+
# assign us our fair share of plug-in discovered pages
|
241
|
+
update_page_queue!( page_chunks.pop, @local_token )
|
242
|
+
|
243
|
+
# remove duplicate elements across the (per instance) chunks
|
244
|
+
# while spreading them out evenly
|
245
|
+
elements = distribute_elements( chunks, pages )
|
246
|
+
|
247
|
+
# empty out the Hash and remove temporary files
|
248
|
+
pages.clear
|
249
|
+
|
250
|
+
# restrict the local instance to its assigned elements
|
251
|
+
restrict_to_elements!( elements.pop, @local_token )
|
252
|
+
|
253
|
+
# set the URLs to be audited by the local instance
|
254
|
+
@opts.restrict_paths = chunks.pop
|
255
|
+
|
256
|
+
chunks.each_with_index {
|
257
|
+
|chunk, i|
|
258
|
+
|
259
|
+
# spawn a remote instance, assign a chunk of URLs
|
260
|
+
# and elements to it and run it
|
261
|
+
spawn( chunk, page_chunks[i], elements[i], pref_dispatchers[i] ) {
|
262
|
+
|inst|
|
263
|
+
@instances << inst
|
264
|
+
}
|
265
|
+
}
|
266
|
+
end
|
267
|
+
|
268
|
+
# start the local instance
|
269
|
+
Thread.new {
|
270
|
+
# ap 'AUDITING'
|
271
|
+
audit
|
272
|
+
|
273
|
+
# ap 'OLD CLEAN UP'
|
274
|
+
old_clean_up!
|
275
|
+
|
276
|
+
# ap 'DONE'
|
277
|
+
@extended_running = false
|
278
|
+
}
|
279
|
+
}
|
280
|
+
else
|
281
|
+
# start the local instance
|
282
|
+
Thread.new {
|
283
|
+
# ap 'AUDITING'
|
284
|
+
super
|
285
|
+
# ap 'DONE'
|
286
|
+
@extended_running = false
|
287
|
+
}
|
288
|
+
end
|
289
|
+
}
|
290
|
+
|
291
|
+
return true
|
292
|
+
end
|
293
|
+
|
294
|
+
#
|
295
|
+
# If the scan needs to be aborted abruptly this method takes care of
|
296
|
+
# any unfinished business (like running plug-ins).
|
297
|
+
#
|
298
|
+
# Should be called before grabbing the {#auditstore}, especially when
|
299
|
+
# running in HPG mode as it will take care of merging the plug-in results
|
300
|
+
# of all instances.
|
301
|
+
#
|
302
|
+
# @param [Proc] &block block to be called once the cleanup has finished
|
303
|
+
#
|
304
|
+
def clean_up!( &block )
|
305
|
+
old_clean_up!( true )
|
306
|
+
|
307
|
+
if @instances.empty?
|
308
|
+
block.call if block_given?
|
309
|
+
return
|
310
|
+
end
|
311
|
+
|
312
|
+
::EM::Iterator.new( @instances, @instances.size ).map( proc {
|
313
|
+
|instance, iter|
|
314
|
+
instance_conn = connect_to_instance( instance )
|
315
|
+
|
316
|
+
instance_conn.framework.clean_up! {
|
317
|
+
instance_conn.framework.get_plugin_store {
|
318
|
+
|res|
|
319
|
+
iter.return( !res.rpc_exception? ? res : nil )
|
320
|
+
}
|
321
|
+
}
|
322
|
+
|
323
|
+
}, proc {
|
324
|
+
|results|
|
325
|
+
results.compact!
|
326
|
+
results << get_plugin_store
|
327
|
+
update_plugin_results!( results )
|
328
|
+
block.call
|
329
|
+
})
|
330
|
+
end
|
331
|
+
|
332
|
+
#
|
333
|
+
# Pauses the running scan on a best effort basis.
|
334
|
+
#
|
335
|
+
def pause!
|
336
|
+
super
|
337
|
+
::EM::Iterator.new( @instances, @instances.size ).each {
|
338
|
+
|instance, iter|
|
339
|
+
connect_to_instance( instance ).framework.pause!{ iter.next }
|
340
|
+
}
|
341
|
+
return true
|
342
|
+
end
|
343
|
+
|
344
|
+
#
|
345
|
+
# Resumes a paused scan right away.
|
346
|
+
#
|
347
|
+
def resume!
|
348
|
+
super
|
349
|
+
::EM::Iterator.new( @instances, @instances.size ).each {
|
350
|
+
|instance, iter|
|
351
|
+
connect_to_instance( instance ).framework.resume!{ iter.next }
|
352
|
+
}
|
353
|
+
return true
|
354
|
+
end
|
355
|
+
|
356
|
+
#
|
357
|
+
# Returns the master's URL
|
358
|
+
#
|
359
|
+
# @return [String]
|
360
|
+
#
|
361
|
+
def master
|
362
|
+
@master_url
|
363
|
+
end
|
364
|
+
|
365
|
+
#
|
366
|
+
# Returns the merged output of all running instances.
|
367
|
+
#
|
368
|
+
# This is going probably to be wildly out of sync and lack A LOT of messages.
|
369
|
+
#
|
370
|
+
# It's here to give the notion of scan progress to the end-user rather than
|
371
|
+
# provide an accurate depiction of the actual progress.
|
372
|
+
#
|
373
|
+
# @param [Proc] &block block to which to pass the result
|
374
|
+
#
|
375
|
+
def output( &block )
|
376
|
+
|
377
|
+
buffer = flush_buffer
|
378
|
+
|
379
|
+
if @instances.empty?
|
380
|
+
block.call( buffer )
|
381
|
+
return
|
382
|
+
end
|
383
|
+
|
384
|
+
::EM::Iterator.new( @instances, MAX_CONCURRENCY ).map( proc {
|
385
|
+
|instance, iter|
|
386
|
+
connect_to_instance( instance ).service.output {
|
387
|
+
|out|
|
388
|
+
iter.return( out )
|
389
|
+
}
|
390
|
+
}, proc {
|
391
|
+
|out|
|
392
|
+
block.call( (buffer | out).flatten )
|
393
|
+
})
|
394
|
+
end
|
395
|
+
|
396
|
+
#
|
397
|
+
# Returns the status of the instance as a string.
|
398
|
+
#
|
399
|
+
# Possible values are:
|
400
|
+
# o crawling
|
401
|
+
# o paused
|
402
|
+
# o done
|
403
|
+
# o busy
|
404
|
+
#
|
405
|
+
# @return [String]
|
406
|
+
#
|
407
|
+
def status
|
408
|
+
if( !@crawling_done && master.empty? && high_performance?) ||
|
409
|
+
( master.empty? && !high_performance? && stats[:current_page].empty? )
|
410
|
+
return 'crawling'
|
411
|
+
elsif paused?
|
412
|
+
return 'paused'
|
413
|
+
elsif !extended_running?
|
414
|
+
return 'done'
|
415
|
+
else
|
416
|
+
return 'busy'
|
417
|
+
end
|
418
|
+
end
|
419
|
+
|
420
|
+
#
|
421
|
+
# Returns aggregated progress data and helps to limit the amount of calls
|
422
|
+
# required in order to get an accurate depiction of a scan's progress and includes:
|
423
|
+
# o output messages
|
424
|
+
# o discovered issues
|
425
|
+
# o overall statistics
|
426
|
+
# o overall scan status
|
427
|
+
# o statistics of all instances individually
|
428
|
+
#
|
429
|
+
# @param [Hash] opts contains info about what data to return:
|
430
|
+
# * :messages -- include output messages
|
431
|
+
# * :slaves -- include slave data
|
432
|
+
# * :issues -- include issue summaries
|
433
|
+
# Uses an implicit include for the above (i.e. nil will be considered true).
|
434
|
+
#
|
435
|
+
# * :as_hash -- if set to true will convert issues to hashes before returning
|
436
|
+
#
|
437
|
+
# @param [Proc] &block block to which to pass the result
|
438
|
+
#
|
439
|
+
def progress_data( opts= {}, &block )
|
440
|
+
|
441
|
+
if opts[:messages].nil?
|
442
|
+
include_messages = true
|
443
|
+
else
|
444
|
+
include_messages = opts[:messages]
|
445
|
+
end
|
446
|
+
|
447
|
+
if opts[:slaves].nil?
|
448
|
+
include_slaves = true
|
449
|
+
else
|
450
|
+
include_slaves = opts[:slaves]
|
451
|
+
end
|
452
|
+
|
453
|
+
if opts[:issues].nil?
|
454
|
+
include_issues = true
|
455
|
+
else
|
456
|
+
include_issues = opts[:issues]
|
457
|
+
end
|
458
|
+
|
459
|
+
if opts[:as_hash]
|
460
|
+
as_hash = true
|
461
|
+
else
|
462
|
+
as_hash = opts[:as_hash]
|
463
|
+
end
|
464
|
+
|
465
|
+
data = {
|
466
|
+
'stats' => {},
|
467
|
+
'status' => status,
|
468
|
+
'busy' => extended_running?
|
469
|
+
}
|
470
|
+
|
471
|
+
data['messages'] = flush_buffer if include_messages
|
472
|
+
|
473
|
+
if include_issues
|
474
|
+
data['issues'] = as_hash ? issues_as_hash : issues
|
475
|
+
end
|
476
|
+
|
477
|
+
data['instances'] = {} if include_slaves
|
478
|
+
|
479
|
+
stats = []
|
480
|
+
stat_hash = {}
|
481
|
+
stats( true, true ).each {
|
482
|
+
|k, v|
|
483
|
+
stat_hash[k.to_s] = v
|
484
|
+
}
|
485
|
+
|
486
|
+
if @opts.datastore[:dispatcher_url] && include_slaves
|
487
|
+
data['instances'][self_url] = stat_hash.dup
|
488
|
+
data['instances'][self_url]['url'] = self_url
|
489
|
+
data['instances'][self_url]['status'] = status
|
490
|
+
end
|
491
|
+
|
492
|
+
stats << stat_hash
|
493
|
+
|
494
|
+
if @instances.empty? || !include_slaves
|
495
|
+
data['stats'] = merge_stats( stats )
|
496
|
+
data['instances'] = data['instances'].values if include_slaves
|
497
|
+
block.call( data )
|
498
|
+
return
|
499
|
+
end
|
500
|
+
|
501
|
+
::EM::Iterator.new( @instances, MAX_CONCURRENCY ).map( proc {
|
502
|
+
|instance, iter|
|
503
|
+
connect_to_instance( instance ).framework.progress_data( opts ) {
|
504
|
+
|tmp|
|
505
|
+
if !tmp.rpc_exception?
|
506
|
+
tmp['url'] = instance['url']
|
507
|
+
iter.return( tmp )
|
508
|
+
else
|
509
|
+
iter.return( nil )
|
510
|
+
end
|
511
|
+
}
|
512
|
+
}, proc {
|
513
|
+
|slave_data|
|
514
|
+
|
515
|
+
slave_data.compact!
|
516
|
+
slave_data.each {
|
517
|
+
|slave|
|
518
|
+
data['messages'] |= slave['messages'] if include_messages
|
519
|
+
data['issues'] |= slave['issues'] if include_issues
|
520
|
+
|
521
|
+
if include_slaves
|
522
|
+
url = slave['url']
|
523
|
+
data['instances'][url] = slave['stats']
|
524
|
+
data['instances'][url]['url'] = url
|
525
|
+
data['instances'][url]['status'] = slave['status']
|
526
|
+
end
|
527
|
+
|
528
|
+
stats << slave['stats']
|
529
|
+
}
|
530
|
+
|
531
|
+
if include_slaves
|
532
|
+
sorted_data_instances = {}
|
533
|
+
data['instances'].keys.sort.each {
|
534
|
+
|url|
|
535
|
+
sorted_data_instances[url] = data['instances'][url]
|
536
|
+
}
|
537
|
+
data['instances'] = sorted_data_instances.values
|
538
|
+
end
|
539
|
+
|
540
|
+
data['stats'] = merge_stats( stats )
|
541
|
+
block.call( data )
|
542
|
+
})
|
543
|
+
end
|
544
|
+
|
545
|
+
#
|
546
|
+
# Returns the results of the audit as a hash.
|
547
|
+
#
|
548
|
+
# @return [Hash]
|
549
|
+
#
|
550
|
+
def report
|
551
|
+
audit_store.to_h
|
552
|
+
end
|
553
|
+
alias :audit_store_as_hash :report
|
554
|
+
alias :auditstore_as_hash :report
|
555
|
+
|
556
|
+
#
|
557
|
+
# @return [String] YAML representation of {#auditstore}
|
558
|
+
#
|
559
|
+
def serialized_auditstore
|
560
|
+
audit_store.to_yaml
|
561
|
+
end
|
562
|
+
|
563
|
+
#
|
564
|
+
# @return [String] YAML representation of {#report}
|
565
|
+
#
|
566
|
+
def serialized_report
|
567
|
+
audit_store.to_h.to_yaml
|
568
|
+
end
|
569
|
+
|
570
|
+
#
|
571
|
+
# Returns a array containing summaries of all discovered issues (i.e. no variations).
|
572
|
+
#
|
573
|
+
# @return [Array<Arachni::Issue>]
|
574
|
+
#
|
575
|
+
def issues
|
576
|
+
audit_store.issues.map {
|
577
|
+
|issue|
|
578
|
+
tmp_issue = issue.deep_clone
|
579
|
+
tmp_issue.variations = []
|
580
|
+
tmp_issue
|
581
|
+
}
|
582
|
+
end
|
583
|
+
|
584
|
+
#
|
585
|
+
# Returns the return value of {#issues} as an Array of hashes
|
586
|
+
#
|
587
|
+
# @return [Array<Hash>]
|
588
|
+
#
|
589
|
+
# @see #issues
|
590
|
+
#
|
591
|
+
def issues_as_hash
|
592
|
+
issues.map { |i| i.to_h }
|
593
|
+
end
|
594
|
+
|
595
|
+
#
|
596
|
+
# The following methods need to be accessible over RPC but are *privileged*.
|
597
|
+
#
|
598
|
+
# They're used for intra-Grid communication between masters and their slaves
|
599
|
+
#
|
600
|
+
#
|
601
|
+
|
602
|
+
#
|
603
|
+
# Restricts the scope of the audit to individual elements.
|
604
|
+
#
|
605
|
+
# @param [Array] elements list of element IDs
|
606
|
+
# @param [String] token privileged token, prevents this method
|
607
|
+
# from being called by 3rd parties.
|
608
|
+
#
|
609
|
+
# @return [Bool] true on success, false on invalid token
|
610
|
+
#
|
611
|
+
def restrict_to_elements!( elements, token = nil )
|
612
|
+
return false if high_performance? && !valid_token?( token )
|
613
|
+
|
614
|
+
::Arachni::Element::Auditable.restrict_to_elements!( elements )
|
615
|
+
return true
|
616
|
+
end
|
617
|
+
|
618
|
+
#
|
619
|
+
# Sets the URL and authentication token required to connect to our master.
|
620
|
+
#
|
621
|
+
# @param [String] url master's URL in 'hostname:port' form
|
622
|
+
# @param [String] token master's authentication token
|
623
|
+
#
|
624
|
+
# @return [Bool] true on success, false if this is the master of the HPG
|
625
|
+
# (in which case this is not applicable).
|
626
|
+
#
|
627
|
+
def set_master( url, token )
|
628
|
+
return false if high_performance?
|
629
|
+
|
630
|
+
@master_url = url
|
631
|
+
@master = connect_to_instance( { 'url' => url, 'token' => token } )
|
632
|
+
|
633
|
+
@modules.class.do_not_store!
|
634
|
+
@modules.class.on_register_results {
|
635
|
+
|results|
|
636
|
+
report_issues_to_master( results )
|
637
|
+
}
|
638
|
+
|
639
|
+
return true
|
640
|
+
end
|
641
|
+
|
642
|
+
#
|
643
|
+
# Updates the page queue with the provided pages.
|
644
|
+
#
|
645
|
+
# @param [Array] pages list of pages
|
646
|
+
# @param [String] token privileged token, prevents this method
|
647
|
+
# from being called by 3rd parties.
|
648
|
+
#
|
649
|
+
# @return [Bool] true on success, false on invalid token
|
650
|
+
#
|
651
|
+
def update_page_queue!( pages, token = nil )
|
652
|
+
return false if high_performance? && !valid_token?( token )
|
653
|
+
pages.each { |page| @page_queue << page }
|
654
|
+
return true
|
655
|
+
end
|
656
|
+
|
657
|
+
#
|
658
|
+
# Registers an array holding {Arachni::Issue} objects with the local instance.
|
659
|
+
#
|
660
|
+
# Primarily used by slaves to register issues they find on the spot.
|
661
|
+
#
|
662
|
+
# @param [Array<Arachni::Issue>] issues
|
663
|
+
# @param [String] token privileged token, prevents this method
|
664
|
+
# from being called by 3rd parties.
|
665
|
+
#
|
666
|
+
# @return [Bool] true on success, false on invalid token or if not in HPG mode
|
667
|
+
#
|
668
|
+
def register_issues( issues, token )
|
669
|
+
return false if high_performance? && !valid_token?( token )
|
670
|
+
|
671
|
+
@modules.class.register_results( issues )
|
672
|
+
return true
|
673
|
+
end
|
674
|
+
|
675
|
+
private
|
676
|
+
|
677
|
+
def extended_running?
|
678
|
+
@extended_running
|
679
|
+
end
|
680
|
+
|
681
|
+
def valid_token?( token )
|
682
|
+
@local_token == token
|
683
|
+
end
|
684
|
+
|
685
|
+
def set_plugin_store( plugin_store )
|
686
|
+
@plugin_store = plugin_store
|
687
|
+
end
|
688
|
+
|
689
|
+
#
|
690
|
+
# Connects to a remote Instance.
|
691
|
+
#
|
692
|
+
# @param [Hash] instance the hash must hold the 'url' and the 'token'.
|
693
|
+
# In subsequent calls the 'token' can be omitted.
|
694
|
+
#
|
695
|
+
def connect_to_instance( instance )
|
696
|
+
@tokens ||= {}
|
697
|
+
|
698
|
+
@tokens[instance['url']] = instance['token'] if instance['token']
|
699
|
+
Arachni::RPC::Client::Instance.new( @opts, instance['url'], @tokens[instance['url']] )
|
700
|
+
end
|
701
|
+
|
702
|
+
#
|
703
|
+
# Reports an array of issues back to the master instance.
|
704
|
+
#
|
705
|
+
# @param [Array<Arachni::Issue>] issues
|
706
|
+
#
|
707
|
+
def report_issues_to_master( issues )
|
708
|
+
@master.framework.register_issues( issues, master_priv_token ){}
|
709
|
+
return true
|
710
|
+
end
|
711
|
+
|
712
|
+
#
|
713
|
+
# Takes the plug-in results of all the instances, merges them together and
|
714
|
+
# resets the @plugin_store.
|
715
|
+
#
|
716
|
+
def update_plugin_results!( results )
|
717
|
+
info = {}
|
718
|
+
formatted = {}
|
719
|
+
|
720
|
+
results.each {
|
721
|
+
|plugins|
|
722
|
+
|
723
|
+
plugins.each {
|
724
|
+
|name, res|
|
725
|
+
next if !res
|
726
|
+
|
727
|
+
formatted[name] ||= []
|
728
|
+
formatted[name] << res[:results]
|
729
|
+
|
730
|
+
info[name] = res.reject{ |k, v| k == :results } if !info[name]
|
731
|
+
}
|
732
|
+
}
|
733
|
+
|
734
|
+
merged = {}
|
735
|
+
formatted.each {
|
736
|
+
|plugin, results|
|
737
|
+
|
738
|
+
if !@plugins[plugin].distributable?
|
739
|
+
res = results[0]
|
740
|
+
else
|
741
|
+
res = @plugins[plugin].merge( results )
|
742
|
+
end
|
743
|
+
merged[plugin] = info[plugin].merge( :results => res )
|
744
|
+
}
|
745
|
+
|
746
|
+
set_plugin_store( merged )
|
747
|
+
end
|
748
|
+
|
749
|
+
#
|
750
|
+
# Returns an array containing unique and evenly distributed elements per chunk
|
751
|
+
# for each instance.
|
752
|
+
#
|
753
|
+
def distribute_elements( chunks, pages )
|
754
|
+
|
755
|
+
#
|
756
|
+
# chunks = URLs to be assigned to each instance
|
757
|
+
# pages = hash with URLs for key and Pages for values.
|
758
|
+
#
|
759
|
+
|
760
|
+
# groups together all the elements of all chunks
|
761
|
+
elements_per_chunk = []
|
762
|
+
chunks.each_with_index {
|
763
|
+
|chunk, i|
|
764
|
+
|
765
|
+
elements_per_chunk[i] ||= []
|
766
|
+
chunk.each {
|
767
|
+
|url|
|
768
|
+
elements_per_chunk[i] |= build_elem_list( pages[url] )
|
769
|
+
}
|
770
|
+
}
|
771
|
+
|
772
|
+
# removes elements from each chunk
|
773
|
+
# that are also included in other chunks too
|
774
|
+
#
|
775
|
+
# this will leave us with the same grouping as before
|
776
|
+
# but without duplicate elements across the chunks,
|
777
|
+
# albeit with an non-optimal distribution amongst instances.
|
778
|
+
#
|
779
|
+
unique_chunks = elements_per_chunk.map.with_index {
|
780
|
+
|chunk, i|
|
781
|
+
chunk.reject {
|
782
|
+
|item|
|
783
|
+
elements_per_chunk[i..-1].flatten.count( item ) > 1
|
784
|
+
}
|
785
|
+
}
|
786
|
+
|
787
|
+
# get them into proper order to be ready for proping up
|
788
|
+
elements_per_chunk.reverse!
|
789
|
+
unique_chunks.reverse!
|
790
|
+
|
791
|
+
# evenly distributed elements across chunks
|
792
|
+
# using the previously duplicate elements
|
793
|
+
#
|
794
|
+
# in order for elements to be moved between chunks they need to
|
795
|
+
# have been available in the destination to begin with since
|
796
|
+
# we can't assign an element to an instance which won't
|
797
|
+
# have a page containing that element
|
798
|
+
unique_chunks.each_with_index {
|
799
|
+
|chunk, i|
|
800
|
+
|
801
|
+
chunk.each {
|
802
|
+
|item|
|
803
|
+
next_c = unique_chunks[i+1]
|
804
|
+
if next_c && (chunk.size > next_c.size ) &&
|
805
|
+
elements_per_chunk[i+1].include?( item )
|
806
|
+
unique_chunks[i].delete( item )
|
807
|
+
next_c << item
|
808
|
+
end
|
809
|
+
}
|
810
|
+
}
|
811
|
+
|
812
|
+
# set them in the same order as the original 'chunks' group
|
813
|
+
return unique_chunks.reverse
|
814
|
+
end
|
815
|
+
|
816
|
+
def build_elem_list( page )
|
817
|
+
list = []
|
818
|
+
|
819
|
+
opts = {
|
820
|
+
:no_auditor => true,
|
821
|
+
:no_timeout => true,
|
822
|
+
:no_injection_str => true
|
823
|
+
}
|
824
|
+
|
825
|
+
if @opts.audit_links
|
826
|
+
list |= page.links.map { |elem| elem.audit_id( nil, opts ) }.uniq
|
827
|
+
end
|
828
|
+
|
829
|
+
if @opts.audit_forms
|
830
|
+
list |= page.forms.map { |elem| elem.audit_id( nil, opts ) }.uniq
|
831
|
+
end
|
832
|
+
|
833
|
+
if @opts.audit_cookies
|
834
|
+
list |= page.cookies.map { |elem| elem.audit_id( nil, opts ) }.uniq
|
835
|
+
end
|
836
|
+
|
837
|
+
return list
|
838
|
+
end
|
839
|
+
|
840
|
+
#
|
841
|
+
# Returns the dispatchers that have different Pipe IDs i.e. can be setup
|
842
|
+
# in HPG mode; pretty simple at this point.
|
843
|
+
#
|
844
|
+
def prefered_dispatchers( &block )
|
845
|
+
|
846
|
+
# keep track of the Pipe IDs we've used
|
847
|
+
@used_pipe_ids ||= []
|
848
|
+
|
849
|
+
# get the info of the local dispatcher since this will be our
|
850
|
+
# frame of reference
|
851
|
+
dispatcher.node.info {
|
852
|
+
|info|
|
853
|
+
|
854
|
+
# add the Pipe ID of the local Dispatcher in order to avoid it later on
|
855
|
+
@used_pipe_ids << info['pipe_id']
|
856
|
+
|
857
|
+
# grab the rest of the Dispatchers of the Grid
|
858
|
+
dispatcher.node.neighbours_with_info {
|
859
|
+
|dispatchers|
|
860
|
+
|
861
|
+
# make sure that each Dispatcher is alive before moving on
|
862
|
+
::EM::Iterator.new( dispatchers, MAX_CONCURRENCY ).map( proc {
|
863
|
+
|dispatcher, iter|
|
864
|
+
connect_to_dispatcher( dispatcher['url'] ).stats {
|
865
|
+
|res|
|
866
|
+
if !res.rpc_exception?
|
867
|
+
iter.return( res )
|
868
|
+
else
|
869
|
+
iter.return( nil )
|
870
|
+
end
|
871
|
+
}
|
872
|
+
}, proc {
|
873
|
+
|dispatchers|
|
874
|
+
|
875
|
+
# get the Dispatchers with unique Pipe IDs and send them
|
876
|
+
# to the block
|
877
|
+
|
878
|
+
pref_dispatcher_urls = []
|
879
|
+
pick_dispatchers( dispatchers ).each {
|
880
|
+
|dispatcher|
|
881
|
+
if !@used_pipe_ids.include?( dispatcher['node']['pipe_id'] )
|
882
|
+
@used_pipe_ids << dispatcher['node']['pipe_id']
|
883
|
+
pref_dispatcher_urls << dispatcher['node']['url']
|
884
|
+
end
|
885
|
+
}
|
886
|
+
|
887
|
+
block.call( pref_dispatcher_urls )
|
888
|
+
})
|
889
|
+
}
|
890
|
+
}
|
891
|
+
end
|
892
|
+
|
893
|
+
#
|
894
|
+
# Splits URLs into chunks for each instance while taking into account a
|
895
|
+
# minimum amount of URLs per instance.
|
896
|
+
#
|
897
|
+
def split_urls( urls, dispatchers )
|
898
|
+
chunks = []
|
899
|
+
idx = 0
|
900
|
+
|
901
|
+
# figure out the min amount of pages per chunk
|
902
|
+
begin
|
903
|
+
if @opts.min_pages_per_instance && @opts.min_pages_per_instance.to_i > 0
|
904
|
+
min_pages_per_instance = @opts.min_pages_per_instance.to_i
|
905
|
+
else
|
906
|
+
min_pages_per_instance = MIN_PAGES_PER_INSTANCE
|
907
|
+
end
|
908
|
+
rescue
|
909
|
+
min_pages_per_instance = MIN_PAGES_PER_INSTANCE
|
910
|
+
end
|
911
|
+
|
912
|
+
# first try a simplistic approach, just split the the URLs in
|
913
|
+
# equally sized chunks for each instance
|
914
|
+
orig_chunks = urls.chunk( dispatchers.size + 1 )
|
915
|
+
|
916
|
+
# if the first chunk matches the minimum then they all do
|
917
|
+
# (except the last possibly) so return these as is...
|
918
|
+
return orig_chunks if orig_chunks[0].size >= min_pages_per_instance
|
919
|
+
|
920
|
+
#
|
921
|
+
# otherwise re-arrange the chunks into larger ones
|
922
|
+
#
|
923
|
+
orig_chunks.each.with_index {
|
924
|
+
|chunk, i|
|
925
|
+
|
926
|
+
chunk.each {
|
927
|
+
|url|
|
928
|
+
|
929
|
+
chunks[idx] ||= []
|
930
|
+
if chunks[idx].size < min_pages_per_instance
|
931
|
+
chunks[idx] << url
|
932
|
+
else
|
933
|
+
idx += 1
|
934
|
+
end
|
935
|
+
}
|
936
|
+
}
|
937
|
+
|
938
|
+
return chunks
|
939
|
+
end
|
940
|
+
|
941
|
+
#
|
942
|
+
# Picks the dispatchers to use based on their load balancing metrics and
|
943
|
+
# the instructed maximum amount of slaves.
|
944
|
+
#
|
945
|
+
def pick_dispatchers( dispatchers )
|
946
|
+
d = dispatchers.each.with_index {
|
947
|
+
|dispatcher, i|
|
948
|
+
dispatchers[i]['score'] = get_dispatcher_score( dispatcher )
|
949
|
+
}.sort {
|
950
|
+
|dispatcher_1, dispatcher_2|
|
951
|
+
dispatcher_1['score'] <=> dispatcher_2['score']
|
952
|
+
}
|
953
|
+
|
954
|
+
begin
|
955
|
+
if @opts.max_slaves && @opts.max_slaves.to_i > 0
|
956
|
+
return d[0...@opts.max_slaves.to_i]
|
957
|
+
end
|
958
|
+
rescue
|
959
|
+
return d
|
960
|
+
end
|
961
|
+
end
|
962
|
+
|
963
|
+
#
|
964
|
+
# Returns the load balancing score of a dispatcher based
|
965
|
+
# on its resource consumption and weight.
|
966
|
+
#
|
967
|
+
def get_dispatcher_score( dispatcher )
|
968
|
+
score = get_resource_consumption( dispatcher['running_jobs'] )
|
969
|
+
score *= dispatcher['weight'] if dispatcher['weight']
|
970
|
+
return score
|
971
|
+
end
|
972
|
+
|
973
|
+
#
|
974
|
+
# Returns a nominal resource consumption of a dispatcher.
|
975
|
+
#
|
976
|
+
# It's basically calculated as the sum of the CPU and Memory usage
|
977
|
+
# percentages of its running instances.
|
978
|
+
#
|
979
|
+
def get_resource_consumption( jobs )
|
980
|
+
mem = 0
|
981
|
+
cpu = 0
|
982
|
+
jobs.each {
|
983
|
+
|job|
|
984
|
+
mem += Float( job['proc']['pctmem'] ) if job['proc']['pctmem']
|
985
|
+
cpu += Float( job['proc']['pctcpu'] ) if job['proc']['pctcpu']
|
986
|
+
}
|
987
|
+
|
988
|
+
return cpu + mem
|
989
|
+
end
|
990
|
+
|
991
|
+
#
|
992
|
+
# Spawns and runs a new remote instance
|
993
|
+
#
|
994
|
+
def spawn( urls, pages, elements, prefered_dispatcher, &block )
|
995
|
+
|
996
|
+
opts = @opts.to_h.deep_clone
|
997
|
+
|
998
|
+
self_token = @opts.datastore[:token]
|
999
|
+
|
1000
|
+
pref_dispatcher = connect_to_dispatcher( prefered_dispatcher )
|
1001
|
+
|
1002
|
+
pref_dispatcher.dispatch( self_url, {
|
1003
|
+
'rank' => 'slave',
|
1004
|
+
'target' => @opts.url.to_s,
|
1005
|
+
'master' => self_url
|
1006
|
+
}) {
|
1007
|
+
|instance_hash|
|
1008
|
+
|
1009
|
+
instance = connect_to_instance( instance_hash )
|
1010
|
+
|
1011
|
+
opts['url'] = opts['url'].to_s
|
1012
|
+
opts['restrict_paths'] = urls
|
1013
|
+
|
1014
|
+
opts['grid_mode'] = ''
|
1015
|
+
|
1016
|
+
opts.delete( 'dir' )
|
1017
|
+
opts.delete( 'rpc_port' )
|
1018
|
+
opts.delete( 'rpc_address' )
|
1019
|
+
opts['datastore'].delete( :dispatcher_url )
|
1020
|
+
opts['datastore'].delete( :token )
|
1021
|
+
|
1022
|
+
opts['datastore']['master_priv_token'] = @local_token
|
1023
|
+
|
1024
|
+
opts['exclude'].each_with_index {
|
1025
|
+
|v, i|
|
1026
|
+
opts['exclude'][i] = v.source
|
1027
|
+
}
|
1028
|
+
|
1029
|
+
opts['include'].each_with_index {
|
1030
|
+
|v, i|
|
1031
|
+
opts['include'][i] = v.source
|
1032
|
+
}
|
1033
|
+
|
1034
|
+
# don't let the slaves run plug-ins that are not meant
|
1035
|
+
# to be distributed
|
1036
|
+
opts['plugins'].reject! {
|
1037
|
+
|k, v|
|
1038
|
+
!@plugins[k].distributable?
|
1039
|
+
}
|
1040
|
+
|
1041
|
+
instance.opts.set( opts ){
|
1042
|
+
instance.framework.update_page_queue!( pages ) {
|
1043
|
+
instance.framework.restrict_to_elements!( elements ){
|
1044
|
+
instance.framework.set_master( self_url, self_token ){
|
1045
|
+
instance.modules.load( opts['mods'] ) {
|
1046
|
+
instance.plugins.load( opts['plugins'] ) {
|
1047
|
+
instance.framework.run {
|
1048
|
+
block.call( {
|
1049
|
+
'url' => instance_hash['url'],
|
1050
|
+
'token' => instance_hash['token'] }
|
1051
|
+
)
|
1052
|
+
}
|
1053
|
+
}
|
1054
|
+
}
|
1055
|
+
}
|
1056
|
+
}
|
1057
|
+
}
|
1058
|
+
}
|
1059
|
+
}
|
1060
|
+
end
|
1061
|
+
|
1062
|
+
def self_url
|
1063
|
+
@self_url ||= nil
|
1064
|
+
return @self_url if @self_url
|
1065
|
+
|
1066
|
+
port = @opts.rpc_port
|
1067
|
+
d_port = @opts.datastore[:dispatcher_url].split( ':', 2 )[1]
|
1068
|
+
@self_url = @opts.datastore[:dispatcher_url].gsub( d_port, port.to_s )
|
1069
|
+
end
|
1070
|
+
|
1071
|
+
def master_priv_token
|
1072
|
+
@opts.datastore['master_priv_token']
|
1073
|
+
end
|
1074
|
+
|
1075
|
+
def gen_token
|
1076
|
+
Digest::SHA2.hexdigest( 10.times.map{ rand( 9999 ) }.join( '' ) )
|
1077
|
+
end
|
1078
|
+
|
1079
|
+
def dispatcher
|
1080
|
+
connect_to_dispatcher( @opts.datastore[:dispatcher_url] )
|
1081
|
+
end
|
1082
|
+
|
1083
|
+
def connect_to_dispatcher( url )
|
1084
|
+
Arachni::RPC::Client::Dispatcher.new( @opts, url )
|
1085
|
+
end
|
1086
|
+
|
1087
|
+
def merge_stats( stats )
|
1088
|
+
final_stats = stats.pop.dup
|
1089
|
+
return {} if !final_stats || final_stats.empty?
|
1090
|
+
|
1091
|
+
return final_stats if stats.empty?
|
1092
|
+
|
1093
|
+
final_stats['current_pages'] = []
|
1094
|
+
final_stats['current_pages'] << final_stats['current_page'] if final_stats['current_page']
|
1095
|
+
|
1096
|
+
total = [
|
1097
|
+
:requests,
|
1098
|
+
:responses,
|
1099
|
+
:time_out_count,
|
1100
|
+
:avg,
|
1101
|
+
:sitemap_size,
|
1102
|
+
:auditmap_size,
|
1103
|
+
:max_concurrency
|
1104
|
+
]
|
1105
|
+
|
1106
|
+
avg = [
|
1107
|
+
:progress,
|
1108
|
+
:curr_res_time,
|
1109
|
+
:curr_res_cnt,
|
1110
|
+
:curr_avg,
|
1111
|
+
:average_res_time
|
1112
|
+
]
|
1113
|
+
|
1114
|
+
begin
|
1115
|
+
stats.each {
|
1116
|
+
|instats|
|
1117
|
+
|
1118
|
+
( avg | total ).each {
|
1119
|
+
|k|
|
1120
|
+
final_stats[k.to_s] += Float( instats[k.to_s] )
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
final_stats['current_pages'] << instats['current_page'] if instats['current_page']
|
1124
|
+
|
1125
|
+
final_stats['eta'] ||= instats['eta']
|
1126
|
+
final_stats['eta'] = max_eta( final_stats['eta'], instats['eta'] )
|
1127
|
+
}
|
1128
|
+
|
1129
|
+
avg.each {
|
1130
|
+
|k|
|
1131
|
+
final_stats[k.to_s] /= Float( stats.size + 1 )
|
1132
|
+
final_stats[k.to_s] = Float( sprintf( "%.2f", final_stats[k.to_s] ) )
|
1133
|
+
}
|
1134
|
+
rescue Exception => e
|
1135
|
+
# ap e
|
1136
|
+
# ap e.backtrace
|
1137
|
+
end
|
1138
|
+
|
1139
|
+
final_stats['sitemap_size'] = @override_sitemap.size
|
1140
|
+
|
1141
|
+
return final_stats
|
1142
|
+
end
|
1143
|
+
|
1144
|
+
def max_eta( eta1, eta2 )
|
1145
|
+
return eta1 if eta1 == eta2
|
1146
|
+
|
1147
|
+
# splits them into hours, mins and secs
|
1148
|
+
eta1_splits = eta1.split( ':' )
|
1149
|
+
eta2_splits = eta2.split( ':' )
|
1150
|
+
|
1151
|
+
# go through and compare the hours, mins, sec
|
1152
|
+
eta1_splits.size.times {
|
1153
|
+
|i|
|
1154
|
+
return eta1 if eta1_splits[i].to_i > eta2_splits[i].to_i
|
1155
|
+
return eta2 if eta1_splits[i].to_i < eta2_splits[i].to_i
|
1156
|
+
}
|
1157
|
+
end
|
1158
|
+
|
1159
|
+
end
|
1160
|
+
|
1161
|
+
end
|
1162
|
+
end
|
1163
|
+
end
|