oneforall-kjl 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- OneForAll/__init__.py +15 -0
- OneForAll/brute.py +503 -0
- OneForAll/common/check.py +41 -0
- OneForAll/common/crawl.py +10 -0
- OneForAll/common/database.py +277 -0
- OneForAll/common/domain.py +63 -0
- OneForAll/common/ipasn.py +42 -0
- OneForAll/common/ipreg.py +139 -0
- OneForAll/common/lookup.py +28 -0
- OneForAll/common/module.py +369 -0
- OneForAll/common/query.py +9 -0
- OneForAll/common/records.py +363 -0
- OneForAll/common/request.py +264 -0
- OneForAll/common/resolve.py +173 -0
- OneForAll/common/search.py +78 -0
- OneForAll/common/similarity.py +138 -0
- OneForAll/common/tablib/__init__.py +0 -0
- OneForAll/common/tablib/format.py +89 -0
- OneForAll/common/tablib/tablib.py +360 -0
- OneForAll/common/tldextract.py +240 -0
- OneForAll/common/utils.py +789 -0
- OneForAll/config/__init__.py +17 -0
- OneForAll/config/api.py +94 -0
- OneForAll/config/default.py +255 -0
- OneForAll/config/log.py +38 -0
- OneForAll/config/setting.py +108 -0
- OneForAll/export.py +72 -0
- OneForAll/modules/altdns.py +216 -0
- OneForAll/modules/autotake/github.py +105 -0
- OneForAll/modules/certificates/censys_api.py +73 -0
- OneForAll/modules/certificates/certspotter.py +48 -0
- OneForAll/modules/certificates/crtsh.py +84 -0
- OneForAll/modules/certificates/google.py +48 -0
- OneForAll/modules/certificates/myssl.py +46 -0
- OneForAll/modules/certificates/racent.py +49 -0
- OneForAll/modules/check/axfr.py +97 -0
- OneForAll/modules/check/cdx.py +44 -0
- OneForAll/modules/check/cert.py +58 -0
- OneForAll/modules/check/csp.py +94 -0
- OneForAll/modules/check/nsec.py +58 -0
- OneForAll/modules/check/robots.py +44 -0
- OneForAll/modules/check/sitemap.py +44 -0
- OneForAll/modules/collect.py +70 -0
- OneForAll/modules/crawl/archivecrawl.py +59 -0
- OneForAll/modules/crawl/commoncrawl.py +59 -0
- OneForAll/modules/datasets/anubis.py +45 -0
- OneForAll/modules/datasets/bevigil.py +50 -0
- OneForAll/modules/datasets/binaryedge_api.py +50 -0
- OneForAll/modules/datasets/cebaidu.py +45 -0
- OneForAll/modules/datasets/chinaz.py +45 -0
- OneForAll/modules/datasets/chinaz_api.py +49 -0
- OneForAll/modules/datasets/circl_api.py +49 -0
- OneForAll/modules/datasets/cloudflare_api.py +130 -0
- OneForAll/modules/datasets/dnsdb_api.py +51 -0
- OneForAll/modules/datasets/dnsdumpster.py +52 -0
- OneForAll/modules/datasets/dnsgrep.py +44 -0
- OneForAll/modules/datasets/fullhunt.py +48 -0
- OneForAll/modules/datasets/hackertarget.py +45 -0
- OneForAll/modules/datasets/ip138.py +45 -0
- OneForAll/modules/datasets/ipv4info_api.py +73 -0
- OneForAll/modules/datasets/netcraft.py +66 -0
- OneForAll/modules/datasets/passivedns_api.py +51 -0
- OneForAll/modules/datasets/qianxun.py +61 -0
- OneForAll/modules/datasets/rapiddns.py +45 -0
- OneForAll/modules/datasets/riddler.py +45 -0
- OneForAll/modules/datasets/robtex.py +58 -0
- OneForAll/modules/datasets/securitytrails_api.py +56 -0
- OneForAll/modules/datasets/sitedossier.py +57 -0
- OneForAll/modules/datasets/spyse_api.py +62 -0
- OneForAll/modules/datasets/sublist3r.py +45 -0
- OneForAll/modules/datasets/urlscan.py +45 -0
- OneForAll/modules/datasets/windvane.py +92 -0
- OneForAll/modules/dnsquery/mx.py +35 -0
- OneForAll/modules/dnsquery/ns.py +35 -0
- OneForAll/modules/dnsquery/soa.py +35 -0
- OneForAll/modules/dnsquery/spf.py +35 -0
- OneForAll/modules/dnsquery/txt.py +35 -0
- OneForAll/modules/enrich.py +72 -0
- OneForAll/modules/finder.py +206 -0
- OneForAll/modules/intelligence/alienvault.py +50 -0
- OneForAll/modules/intelligence/riskiq_api.py +58 -0
- OneForAll/modules/intelligence/threatbook_api.py +50 -0
- OneForAll/modules/intelligence/threatminer.py +45 -0
- OneForAll/modules/intelligence/virustotal.py +60 -0
- OneForAll/modules/intelligence/virustotal_api.py +59 -0
- OneForAll/modules/iscdn.py +86 -0
- OneForAll/modules/search/ask.py +69 -0
- OneForAll/modules/search/baidu.py +96 -0
- OneForAll/modules/search/bing.py +79 -0
- OneForAll/modules/search/bing_api.py +78 -0
- OneForAll/modules/search/fofa_api.py +74 -0
- OneForAll/modules/search/gitee.py +71 -0
- OneForAll/modules/search/github_api.py +86 -0
- OneForAll/modules/search/google.py +83 -0
- OneForAll/modules/search/google_api.py +77 -0
- OneForAll/modules/search/hunter_api.py +72 -0
- OneForAll/modules/search/quake_api.py +72 -0
- OneForAll/modules/search/shodan_api.py +53 -0
- OneForAll/modules/search/so.py +75 -0
- OneForAll/modules/search/sogou.py +72 -0
- OneForAll/modules/search/wzsearch.py +68 -0
- OneForAll/modules/search/yahoo.py +81 -0
- OneForAll/modules/search/yandex.py +80 -0
- OneForAll/modules/search/zoomeye_api.py +73 -0
- OneForAll/modules/srv.py +75 -0
- OneForAll/modules/wildcard.py +319 -0
- OneForAll/oneforall.py +275 -0
- OneForAll/takeover.py +168 -0
- OneForAll/test.py +23 -0
- oneforall_kjl-0.1.1.dist-info/METADATA +18 -0
- oneforall_kjl-0.1.1.dist-info/RECORD +114 -0
- oneforall_kjl-0.1.1.dist-info/WHEEL +5 -0
- oneforall_kjl-0.1.1.dist-info/entry_points.txt +2 -0
- oneforall_kjl-0.1.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,68 @@
|
|
1
|
+
import time
|
2
|
+
from common.search import Search
|
3
|
+
|
4
|
+
|
5
|
+
class WzSearch(Search):
|
6
|
+
def __init__(self, domain):
|
7
|
+
Search.__init__(self)
|
8
|
+
self.domain = domain
|
9
|
+
self.module = 'Search'
|
10
|
+
self.source = 'WzSearch'
|
11
|
+
self.addr = 'https://www.wuzhuiso.com/s'
|
12
|
+
|
13
|
+
def search(self, domain, filtered_subdomain=''):
|
14
|
+
"""
|
15
|
+
发送搜索请求并做子域匹配
|
16
|
+
|
17
|
+
:param str domain: 域名
|
18
|
+
:param str filtered_subdomain: 过滤的子域
|
19
|
+
"""
|
20
|
+
self.page_num = 1
|
21
|
+
while True:
|
22
|
+
time.sleep(self.delay)
|
23
|
+
self.header = self.get_header()
|
24
|
+
self.proxy = self.get_proxy(self.source)
|
25
|
+
query = 'site:.' + domain + filtered_subdomain
|
26
|
+
params = {'q': query, 'pn': self.page_num, 'src': 'page_www', 'fr': 'none'}
|
27
|
+
resp = self.get(self.addr, params)
|
28
|
+
subdomains = self.match_subdomains(resp, fuzzy=False)
|
29
|
+
if not self.check_subdomains(subdomains):
|
30
|
+
break
|
31
|
+
self.subdomains.update(subdomains)
|
32
|
+
self.page_num += 1
|
33
|
+
if 'next" href' not in resp.text:
|
34
|
+
print(subdomains)
|
35
|
+
break
|
36
|
+
|
37
|
+
def run(self):
|
38
|
+
"""
|
39
|
+
类执行入口
|
40
|
+
"""
|
41
|
+
self.begin()
|
42
|
+
self.search(self.domain)
|
43
|
+
# 排除同一子域搜索结果过多的子域以发现新的子域
|
44
|
+
for statement in self.filter(self.domain, self.subdomains):
|
45
|
+
self.search(self.domain, filtered_subdomain=statement)
|
46
|
+
|
47
|
+
# 递归搜索下一层的子域
|
48
|
+
if self.recursive_search:
|
49
|
+
for subdomain in self.recursive_subdomain():
|
50
|
+
self.search(subdomain)
|
51
|
+
self.finish()
|
52
|
+
self.save_json()
|
53
|
+
self.gen_result()
|
54
|
+
self.save_db()
|
55
|
+
|
56
|
+
|
57
|
+
def run(domain):
|
58
|
+
"""
|
59
|
+
类统一调用入口
|
60
|
+
|
61
|
+
:param str domain: 域名
|
62
|
+
"""
|
63
|
+
search = WzSearch(domain)
|
64
|
+
search.run()
|
65
|
+
|
66
|
+
|
67
|
+
if __name__ == '__main__':
|
68
|
+
run('qq.com')
|
@@ -0,0 +1,81 @@
|
|
1
|
+
import time
|
2
|
+
from common.search import Search
|
3
|
+
|
4
|
+
|
5
|
+
class Yahoo(Search):
|
6
|
+
def __init__(self, domain):
|
7
|
+
Search.__init__(self)
|
8
|
+
self.domain = domain
|
9
|
+
self.module = 'Search'
|
10
|
+
self.source = 'YahooSearch'
|
11
|
+
self.init = 'https://search.yahoo.com/'
|
12
|
+
self.addr = 'https://search.yahoo.com/search'
|
13
|
+
self.limit_num = 1000 # Yahoo限制搜索条数
|
14
|
+
self.delay = 2
|
15
|
+
self.per_page_num = 30 # Yahoo每次搜索最大条数
|
16
|
+
|
17
|
+
def search(self, domain, filtered_subdomain=''):
|
18
|
+
"""
|
19
|
+
发送搜索请求并做子域匹配
|
20
|
+
|
21
|
+
:param str domain: 域名
|
22
|
+
:param str filtered_subdomain: 过滤的子域
|
23
|
+
"""
|
24
|
+
self.header = self.get_header()
|
25
|
+
self.proxy = self.get_proxy(self.source)
|
26
|
+
resp = self.get(self.init)
|
27
|
+
if not resp:
|
28
|
+
return
|
29
|
+
self.cookie = resp.cookies # 获取cookie Yahoo在搜索时需要带上cookie
|
30
|
+
while True:
|
31
|
+
time.sleep(self.delay)
|
32
|
+
self.proxy = self.get_proxy(self.source)
|
33
|
+
query = 'site:.' + domain + filtered_subdomain
|
34
|
+
params = {'p': query, 'b': self.page_num, 'pz': self.per_page_num}
|
35
|
+
resp = self.get(self.addr, params)
|
36
|
+
if not resp:
|
37
|
+
return
|
38
|
+
text = resp.text.replace('<b>', '').replace('</b>', '')
|
39
|
+
subdomains = self.match_subdomains(text, fuzzy=False)
|
40
|
+
if not self.check_subdomains(subdomains):
|
41
|
+
break
|
42
|
+
self.subdomains.update(subdomains)
|
43
|
+
if '>Next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
|
44
|
+
break
|
45
|
+
self.page_num += self.per_page_num
|
46
|
+
if self.page_num >= self.limit_num: # 搜索条数限制
|
47
|
+
break
|
48
|
+
|
49
|
+
def run(self):
|
50
|
+
"""
|
51
|
+
类执行入口
|
52
|
+
"""
|
53
|
+
self.begin()
|
54
|
+
self.search(self.domain)
|
55
|
+
|
56
|
+
# 排除同一子域搜索结果过多的子域以发现新的子域
|
57
|
+
for statement in self.filter(self.domain, self.subdomains):
|
58
|
+
self.search(self.domain, filtered_subdomain=statement)
|
59
|
+
|
60
|
+
# 递归搜索下一层的子域
|
61
|
+
if self.recursive_search:
|
62
|
+
for subdomain in self.recursive_subdomain():
|
63
|
+
self.search(subdomain)
|
64
|
+
self.finish()
|
65
|
+
self.save_json()
|
66
|
+
self.gen_result()
|
67
|
+
self.save_db()
|
68
|
+
|
69
|
+
|
70
|
+
def run(domain):
|
71
|
+
"""
|
72
|
+
类统一调用入口
|
73
|
+
|
74
|
+
:param str domain: 域名
|
75
|
+
"""
|
76
|
+
search = Yahoo(domain)
|
77
|
+
search.run()
|
78
|
+
|
79
|
+
|
80
|
+
if __name__ == '__main__':
|
81
|
+
run('example.com')
|
@@ -0,0 +1,80 @@
|
|
1
|
+
import time
|
2
|
+
from common.search import Search
|
3
|
+
|
4
|
+
|
5
|
+
class Yandex(Search):
|
6
|
+
def __init__(self, domain):
|
7
|
+
Search.__init__(self)
|
8
|
+
self.domain = domain
|
9
|
+
self.module = 'Search'
|
10
|
+
self.source = 'YandexSearch'
|
11
|
+
self.init = 'https://yandex.com/'
|
12
|
+
self.addr = 'https://yandex.com/search'
|
13
|
+
self.limit_num = 1000 # 限制搜索条数
|
14
|
+
self.delay = 5
|
15
|
+
|
16
|
+
def search(self, domain, filtered_subdomain=''):
|
17
|
+
"""
|
18
|
+
发送搜索请求并做子域匹配
|
19
|
+
|
20
|
+
:param str domain: 域名
|
21
|
+
:param str filtered_subdomain: 过滤的子域
|
22
|
+
"""
|
23
|
+
self.header = self.get_header()
|
24
|
+
self.proxy = self.get_proxy(self.source)
|
25
|
+
self.page_num = 0 # 二次搜索重新置0
|
26
|
+
resp = self.get(self.init)
|
27
|
+
if not resp:
|
28
|
+
return
|
29
|
+
self.cookie = resp.cookies # 获取cookie
|
30
|
+
while True:
|
31
|
+
time.sleep(self.delay)
|
32
|
+
self.proxy = self.get_proxy(self.source)
|
33
|
+
query = 'site:.' + domain + filtered_subdomain
|
34
|
+
params = {'text': query, 'p': self.page_num,
|
35
|
+
'numdoc': self.per_page_num}
|
36
|
+
resp = self.get(self.addr, params)
|
37
|
+
subdomains = self.match_subdomains(resp, fuzzy=False)
|
38
|
+
if not self.check_subdomains(subdomains):
|
39
|
+
break
|
40
|
+
self.subdomains.update(subdomains)
|
41
|
+
if '>next</a>' not in resp.text: # 搜索页面没有出现下一页时停止搜索
|
42
|
+
break
|
43
|
+
self.page_num += 1
|
44
|
+
if self.page_num >= self.limit_num: # 搜索条数限制
|
45
|
+
break
|
46
|
+
|
47
|
+
def run(self):
|
48
|
+
"""
|
49
|
+
类执行入口
|
50
|
+
"""
|
51
|
+
self.begin()
|
52
|
+
|
53
|
+
self.search(self.domain)
|
54
|
+
|
55
|
+
# 排除同一子域搜索结果过多的子域以发现新的子域
|
56
|
+
for statement in self.filter(self.domain, self.subdomains):
|
57
|
+
self.search(self.domain, filtered_subdomain=statement)
|
58
|
+
|
59
|
+
# 递归搜索下一层的子域
|
60
|
+
if self.recursive_search:
|
61
|
+
for subdomain in self.recursive_subdomain():
|
62
|
+
self.search(subdomain)
|
63
|
+
self.finish()
|
64
|
+
self.save_json()
|
65
|
+
self.gen_result()
|
66
|
+
self.save_db()
|
67
|
+
|
68
|
+
|
69
|
+
def run(domain):
|
70
|
+
"""
|
71
|
+
类统一调用入口
|
72
|
+
|
73
|
+
:param str domain: 域名
|
74
|
+
"""
|
75
|
+
search = Yandex(domain)
|
76
|
+
search.run()
|
77
|
+
|
78
|
+
|
79
|
+
if __name__ == '__main__':
|
80
|
+
run('example.com')
|
@@ -0,0 +1,73 @@
|
|
1
|
+
import time
|
2
|
+
from config import settings
|
3
|
+
from common.search import Search
|
4
|
+
|
5
|
+
|
6
|
+
class ZoomEyeAPI(Search):
|
7
|
+
def __init__(self, domain):
|
8
|
+
Search.__init__(self)
|
9
|
+
self.domain = domain
|
10
|
+
self.module = 'Search'
|
11
|
+
self.source = 'ZoomEyeAPISearch'
|
12
|
+
self.addr = 'https://api.zoomeye.org/domain/search'
|
13
|
+
self.delay = 2
|
14
|
+
self.key = settings.zoomeye_api_key
|
15
|
+
|
16
|
+
def search(self):
|
17
|
+
"""
|
18
|
+
发送搜索请求并做子域匹配
|
19
|
+
"""
|
20
|
+
self.per_page_num = 30
|
21
|
+
self.page_num = 1
|
22
|
+
while self.per_page_num * self.page_num < settings.cam_records_maximum_per_domain:
|
23
|
+
time.sleep(self.delay)
|
24
|
+
self.header = self.get_header()
|
25
|
+
self.header.update({'API-KEY': self.key})
|
26
|
+
self.proxy = self.get_proxy(self.source)
|
27
|
+
|
28
|
+
params = {'q': self.domain,
|
29
|
+
'page': self.page_num,
|
30
|
+
'type': 1}
|
31
|
+
resp = self.get(self.addr, params)
|
32
|
+
if not resp:
|
33
|
+
return
|
34
|
+
if resp.status_code == 403:
|
35
|
+
break
|
36
|
+
resp_json = resp.json()
|
37
|
+
subdomains = self.match_subdomains(resp)
|
38
|
+
if not subdomains: # 搜索没有发现子域名则停止搜索
|
39
|
+
break
|
40
|
+
self.subdomains.update(subdomains)
|
41
|
+
total = resp_json.get('total')
|
42
|
+
self.page_num += 1
|
43
|
+
if self.page_num * self.per_page_num >= int(total):
|
44
|
+
break
|
45
|
+
if self.page_num > 400:
|
46
|
+
break
|
47
|
+
|
48
|
+
def run(self):
|
49
|
+
"""
|
50
|
+
类执行入口
|
51
|
+
"""
|
52
|
+
if not self.have_api(self.key):
|
53
|
+
return
|
54
|
+
self.begin()
|
55
|
+
self.search()
|
56
|
+
self.finish()
|
57
|
+
self.save_json()
|
58
|
+
self.gen_result()
|
59
|
+
self.save_db()
|
60
|
+
|
61
|
+
|
62
|
+
def run(domain):
|
63
|
+
"""
|
64
|
+
类统一调用入口
|
65
|
+
|
66
|
+
:param str domain: 域名
|
67
|
+
"""
|
68
|
+
search = ZoomEyeAPI(domain)
|
69
|
+
search.run()
|
70
|
+
|
71
|
+
|
72
|
+
if __name__ == '__main__':
|
73
|
+
run('zhipin.com')
|
OneForAll/modules/srv.py
ADDED
@@ -0,0 +1,75 @@
|
|
1
|
+
"""
|
2
|
+
通过枚举域名常见的SRV记录并做查询来发现子域
|
3
|
+
"""
|
4
|
+
|
5
|
+
import queue
|
6
|
+
import threading
|
7
|
+
|
8
|
+
from common import utils
|
9
|
+
from common.module import Module
|
10
|
+
from config.setting import data_storage_dir
|
11
|
+
|
12
|
+
|
13
|
+
class BruteSRV(Module):
|
14
|
+
def __init__(self, domain):
|
15
|
+
Module.__init__(self)
|
16
|
+
self.domain = domain
|
17
|
+
self.module = 'BruteSRV'
|
18
|
+
self.source = "BruteSRV"
|
19
|
+
self.qtype = 'SRV'
|
20
|
+
self.thread_num = 20
|
21
|
+
self.names_queue = queue.Queue()
|
22
|
+
self.answers_queue = queue.Queue()
|
23
|
+
|
24
|
+
def fill_queue(self):
|
25
|
+
path = data_storage_dir.joinpath('srv_prefixes.json')
|
26
|
+
prefixes = utils.load_json(path)
|
27
|
+
for prefix in prefixes:
|
28
|
+
self.names_queue.put(prefix + self.domain)
|
29
|
+
|
30
|
+
def do_brute(self):
|
31
|
+
for num in range(self.thread_num):
|
32
|
+
thread = BruteThread(self.names_queue, self.answers_queue)
|
33
|
+
thread.name = f'BruteThread-{num}'
|
34
|
+
thread.daemon = True
|
35
|
+
thread.start()
|
36
|
+
self.names_queue.join()
|
37
|
+
|
38
|
+
def deal_answers(self):
|
39
|
+
while not self.answers_queue.empty():
|
40
|
+
answer = self.answers_queue.get()
|
41
|
+
if answer is None:
|
42
|
+
continue
|
43
|
+
for item in answer:
|
44
|
+
record = str(item)
|
45
|
+
subdomains = self.match_subdomains(record)
|
46
|
+
self.subdomains.update(subdomains)
|
47
|
+
|
48
|
+
def run(self):
|
49
|
+
self.begin()
|
50
|
+
self.fill_queue()
|
51
|
+
self.do_brute()
|
52
|
+
self.deal_answers()
|
53
|
+
self.finish()
|
54
|
+
self.save_json()
|
55
|
+
self.gen_result()
|
56
|
+
self.save_db()
|
57
|
+
|
58
|
+
|
59
|
+
class BruteThread(threading.Thread):
|
60
|
+
def __init__(self, names_queue, answers_queue):
|
61
|
+
threading.Thread.__init__(self)
|
62
|
+
self.names_queue = names_queue
|
63
|
+
self.answers_queue = answers_queue
|
64
|
+
|
65
|
+
def run(self):
|
66
|
+
while True:
|
67
|
+
name = self.names_queue.get()
|
68
|
+
answer = utils.dns_query(name, 'SRV')
|
69
|
+
self.answers_queue.put(answer)
|
70
|
+
self.names_queue.task_done()
|
71
|
+
|
72
|
+
|
73
|
+
if __name__ == '__main__':
|
74
|
+
brute = BruteSRV('zonetransfer.me')
|
75
|
+
brute.run()
|
@@ -0,0 +1,319 @@
|
|
1
|
+
import secrets
|
2
|
+
|
3
|
+
import tenacity
|
4
|
+
from dns.exception import Timeout
|
5
|
+
from dns.resolver import NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers
|
6
|
+
|
7
|
+
from common import utils
|
8
|
+
from config import settings
|
9
|
+
from common import similarity
|
10
|
+
from config.log import logger
|
11
|
+
|
12
|
+
|
13
|
+
def gen_random_subdomains(domain, count):
|
14
|
+
"""
|
15
|
+
生成指定数量的随机子域域名列表
|
16
|
+
|
17
|
+
:param domain: 主域
|
18
|
+
:param count: 数量
|
19
|
+
"""
|
20
|
+
subdomains = set()
|
21
|
+
if count < 1:
|
22
|
+
return subdomains
|
23
|
+
for _ in range(count):
|
24
|
+
token = secrets.token_hex(4)
|
25
|
+
subdomains.add(f'{token}.{domain}')
|
26
|
+
return subdomains
|
27
|
+
|
28
|
+
|
29
|
+
def query_a_record(subdomain, resolver):
|
30
|
+
"""
|
31
|
+
查询子域A记录
|
32
|
+
|
33
|
+
:param subdomain: 子域
|
34
|
+
:param resolver: DNS解析器
|
35
|
+
"""
|
36
|
+
try:
|
37
|
+
answer = resolver.query(subdomain, 'A')
|
38
|
+
except Exception as e:
|
39
|
+
logger.log('DEBUG', f'Query {subdomain} wildcard dns record error')
|
40
|
+
logger.log('DEBUG', e.args)
|
41
|
+
return False
|
42
|
+
if answer.rrset is None:
|
43
|
+
return False
|
44
|
+
ttl = answer.ttl
|
45
|
+
name = answer.name
|
46
|
+
ips = {item.address for item in answer}
|
47
|
+
logger.log('ALERT', f'{subdomain} resolve to: {name} '
|
48
|
+
f'IP: {ips} TTL: {ttl}')
|
49
|
+
return True
|
50
|
+
|
51
|
+
|
52
|
+
def all_resolve_success(subdomains):
|
53
|
+
"""
|
54
|
+
判断是否所有子域都解析成功
|
55
|
+
|
56
|
+
:param subdomains: 子域列表
|
57
|
+
"""
|
58
|
+
resolver = utils.dns_resolver()
|
59
|
+
resolver.cache = None # 不使用DNS缓存
|
60
|
+
status = set()
|
61
|
+
for subdomain in subdomains:
|
62
|
+
status.add(query_a_record(subdomain, resolver))
|
63
|
+
return all(status)
|
64
|
+
|
65
|
+
|
66
|
+
def all_request_success(subdomains):
|
67
|
+
"""
|
68
|
+
判断是否所有子域都请求成功
|
69
|
+
|
70
|
+
:param subdomains: 子域列表
|
71
|
+
"""
|
72
|
+
result = list()
|
73
|
+
for subdomain in subdomains:
|
74
|
+
url = f'http://{subdomain}'
|
75
|
+
resp = utils.get_url_resp(url)
|
76
|
+
if resp:
|
77
|
+
logger.log('ALERT', f'Request: {url} Status: {resp.status_code} '
|
78
|
+
f'Size: {len(resp.content)}')
|
79
|
+
result.append(resp.text)
|
80
|
+
else:
|
81
|
+
result.append(resp)
|
82
|
+
return all(result), result
|
83
|
+
|
84
|
+
|
85
|
+
def any_similar_html(resp_list):
|
86
|
+
"""
|
87
|
+
判断是否有一组HTML页面结构相似
|
88
|
+
|
89
|
+
:param resp_list: 响应HTML页面
|
90
|
+
"""
|
91
|
+
html_doc1, html_doc2, html_doc3 = resp_list
|
92
|
+
if similarity.is_similar(html_doc1, html_doc2):
|
93
|
+
return True
|
94
|
+
if similarity.is_similar(html_doc1, html_doc3):
|
95
|
+
return True
|
96
|
+
if similarity.is_similar(html_doc2, html_doc3):
|
97
|
+
return True
|
98
|
+
return False
|
99
|
+
|
100
|
+
|
101
|
+
def to_detect_wildcard(domain):
|
102
|
+
"""
|
103
|
+
Detect use wildcard dns record or not
|
104
|
+
|
105
|
+
:param str domain: domain
|
106
|
+
:return bool use wildcard dns record or not
|
107
|
+
"""
|
108
|
+
logger.log('INFOR', f'Detecting {domain} use wildcard dns record or not')
|
109
|
+
random_subdomains = gen_random_subdomains(domain, 3)
|
110
|
+
if not all_resolve_success(random_subdomains):
|
111
|
+
return False
|
112
|
+
is_all_success, all_request_resp = all_request_success(random_subdomains)
|
113
|
+
if not is_all_success:
|
114
|
+
return True
|
115
|
+
return any_similar_html(all_request_resp)
|
116
|
+
|
117
|
+
|
118
|
+
def detect_wildcard(domain):
|
119
|
+
is_enable = to_detect_wildcard(domain)
|
120
|
+
if is_enable:
|
121
|
+
logger.log('ALERT', f'The domain {domain} enables wildcard')
|
122
|
+
else:
|
123
|
+
logger.log('ALERT', f'The domain {domain} disables wildcard')
|
124
|
+
return is_enable
|
125
|
+
|
126
|
+
|
127
|
+
def get_wildcard_record(domain, resolver):
|
128
|
+
logger.log('INFOR', f"Query {domain} 's wildcard dns record "
|
129
|
+
f"in authoritative name server")
|
130
|
+
try:
|
131
|
+
answer = resolver.query(domain, 'A')
|
132
|
+
|
133
|
+
except Timeout as e:
|
134
|
+
logger.log('ALERT', f'Query timeout, retrying')
|
135
|
+
logger.log('DEBUG', e.args)
|
136
|
+
return None, None
|
137
|
+
except (NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers) as e:
|
138
|
+
logger.log('DEBUG', e.args)
|
139
|
+
logger.log('DEBUG', f'{domain} dont have A record on authoritative name server')
|
140
|
+
return None, None
|
141
|
+
except Exception as e:
|
142
|
+
logger.log('ERROR', e.args)
|
143
|
+
logger.log('ERROR', f'Query {domain} wildcard dns record in '
|
144
|
+
f'authoritative name server error')
|
145
|
+
exit(1)
|
146
|
+
else:
|
147
|
+
if answer.rrset is None:
|
148
|
+
logger.log('DEBUG', f'No record of query result')
|
149
|
+
return None, None
|
150
|
+
name = answer.name
|
151
|
+
ip = {item.address for item in answer}
|
152
|
+
ttl = answer.ttl
|
153
|
+
logger.log('INFOR', f'{domain} results on authoritative name server: {name} '
|
154
|
+
f'IP: {ip} TTL: {ttl}')
|
155
|
+
return ip, ttl
|
156
|
+
|
157
|
+
|
158
|
+
def collect_wildcard_record(domain, authoritative_ns):
|
159
|
+
logger.log('INFOR', f'Collecting wildcard dns record for {domain}')
|
160
|
+
if not authoritative_ns:
|
161
|
+
return list(), int()
|
162
|
+
resolver = utils.dns_resolver()
|
163
|
+
resolver.nameservers = authoritative_ns # 使用权威名称服务器
|
164
|
+
resolver.rotate = True # 随机使用NS
|
165
|
+
resolver.cache = None # 不使用DNS缓存
|
166
|
+
ips = set()
|
167
|
+
ttl = int()
|
168
|
+
ips_stat = dict()
|
169
|
+
ips_check = list()
|
170
|
+
while True:
|
171
|
+
token = secrets.token_hex(4)
|
172
|
+
random_subdomain = f'{token}.{domain}'
|
173
|
+
try:
|
174
|
+
ip, ttl = get_wildcard_record(random_subdomain, resolver)
|
175
|
+
except Exception as e:
|
176
|
+
logger.log('DEBUG', e.args)
|
177
|
+
logger.log('ALERT', f'Multiple query errors,'
|
178
|
+
f'try to query a new random subdomain')
|
179
|
+
# 查询出错退出循环
|
180
|
+
break
|
181
|
+
# 每5次连续查询后检查结果列表
|
182
|
+
ips_check.append(ip)
|
183
|
+
# 如果出现50个以上的泛解析则结束查询
|
184
|
+
if len(ips) >= 50:
|
185
|
+
break
|
186
|
+
# 如果连续5次查询都没结果则结束查询
|
187
|
+
if len(ips_check) == 5:
|
188
|
+
if not any(ips_check):
|
189
|
+
logger.log('ALERT', 'The query ends because there are '
|
190
|
+
'no results for 5 consecutive queries.')
|
191
|
+
break
|
192
|
+
ips_check = list()
|
193
|
+
if ip is None:
|
194
|
+
continue
|
195
|
+
ips.update(ip)
|
196
|
+
# 统计每个泛解析IP出现次数
|
197
|
+
for addr in ip:
|
198
|
+
count = ips_stat.setdefault(addr, 0)
|
199
|
+
ips_stat[addr] = count + 1
|
200
|
+
# 筛选出出现次数2次以上的IP地址
|
201
|
+
addrs = list()
|
202
|
+
for addr, times in ips_stat.items():
|
203
|
+
if times >= 2:
|
204
|
+
addrs.append(addr)
|
205
|
+
# 大部分的IP地址出现次数大于2次停止收集泛解析IP记录
|
206
|
+
if len(addrs) / len(ips) >= 0.7:
|
207
|
+
break
|
208
|
+
logger.log('DEBUG', f'Collected the wildcard dns record of {domain}\n{ips}\n{ttl}')
|
209
|
+
return ips, ttl
|
210
|
+
|
211
|
+
|
212
|
+
def check_by_compare(ip, ttl, wc_ips, wc_ttl):
|
213
|
+
"""
|
214
|
+
Use TTL comparison to detect wildcard dns record
|
215
|
+
|
216
|
+
:param set ip: A record IP address set
|
217
|
+
:param int ttl: A record TTL value
|
218
|
+
:param set wc_ips: wildcard dns record IP address set
|
219
|
+
:param int wc_ttl: wildcard dns record TTL value
|
220
|
+
:return bool: result
|
221
|
+
"""
|
222
|
+
# Reference:http://sh3ll.me/archives/201704041222.txt
|
223
|
+
if ip not in wc_ips:
|
224
|
+
return False # 子域IP不在泛解析IP集合则不是泛解析
|
225
|
+
if ttl != wc_ttl and ttl % 60 == 0 and wc_ttl % 60 == 0:
|
226
|
+
return False
|
227
|
+
return True
|
228
|
+
|
229
|
+
|
230
|
+
def check_ip_times(times):
|
231
|
+
"""
|
232
|
+
Use IP address times to determine wildcard or not
|
233
|
+
|
234
|
+
:param times: IP address times
|
235
|
+
:return bool: result
|
236
|
+
"""
|
237
|
+
if times > settings.ip_appear_maximum:
|
238
|
+
return True
|
239
|
+
return False
|
240
|
+
|
241
|
+
|
242
|
+
def check_cname_times(times):
|
243
|
+
"""
|
244
|
+
Use cname times to determine wildcard or not
|
245
|
+
|
246
|
+
:param times: cname times
|
247
|
+
:return bool: result
|
248
|
+
"""
|
249
|
+
if times > settings.cname_appear_maximum:
|
250
|
+
return True
|
251
|
+
return False
|
252
|
+
|
253
|
+
|
254
|
+
def is_valid_subdomain(ip=None, ip_num=None, cname=None, cname_num=None,
|
255
|
+
ttl=None, wc_ttl=None, wc_ips=None):
|
256
|
+
ip_blacklist = settings.brute_ip_blacklist
|
257
|
+
cname_blacklist = settings.brute_cname_blacklist
|
258
|
+
if cname and cname in cname_blacklist:
|
259
|
+
return 0, 'cname blacklist' # 有些泛解析会统一解析到一个cname上
|
260
|
+
if ip and ip in ip_blacklist: # 解析ip在黑名单ip则为非法子域
|
261
|
+
return 0, 'IP blacklist'
|
262
|
+
if all([wc_ips, wc_ttl]): # 有泛解析记录才进行对比
|
263
|
+
if check_by_compare(ip, ttl, wc_ips, wc_ttl):
|
264
|
+
return 0, 'IP wildcard'
|
265
|
+
if ip_num and check_ip_times(ip_num):
|
266
|
+
return 0, 'IP exceeded'
|
267
|
+
if cname_num and check_cname_times(cname_num):
|
268
|
+
return 0, 'cname exceeded'
|
269
|
+
return 1, 'OK'
|
270
|
+
|
271
|
+
|
272
|
+
def stat_times(data):
|
273
|
+
times = dict()
|
274
|
+
for info in data:
|
275
|
+
ip_str = info.get('ip')
|
276
|
+
if isinstance(ip_str, str):
|
277
|
+
ips = ip_str.split(',')
|
278
|
+
for ip in ips:
|
279
|
+
value_one = times.setdefault(ip, 0)
|
280
|
+
times[ip] = value_one + 1
|
281
|
+
cname_str = info.get('cname')
|
282
|
+
if isinstance(cname_str, str):
|
283
|
+
cnames = cname_str.split(',')
|
284
|
+
for cname in cnames:
|
285
|
+
value_two = times.setdefault(cname, 0)
|
286
|
+
times[cname] = value_two + 1
|
287
|
+
return times
|
288
|
+
|
289
|
+
|
290
|
+
def check_valid_subdomain(appear_times, info):
|
291
|
+
ip_str = info.get('ip')
|
292
|
+
if ip_str:
|
293
|
+
ips = ip_str.split(',')
|
294
|
+
for ip in ips:
|
295
|
+
ip_num = appear_times.get(ip)
|
296
|
+
isvalid, reason = is_valid_subdomain(ip=ip, ip_num=ip_num)
|
297
|
+
if not isvalid:
|
298
|
+
return False, reason
|
299
|
+
cname_str = info.get('cname')
|
300
|
+
if cname_str:
|
301
|
+
cnames = cname_str.split(',')
|
302
|
+
for cname in cnames:
|
303
|
+
cname_num = appear_times.get(cname)
|
304
|
+
isvalid, reason = is_valid_subdomain(cname=cname, cname_num=cname_num)
|
305
|
+
if not isvalid:
|
306
|
+
return False, reason
|
307
|
+
return True, 'OK'
|
308
|
+
|
309
|
+
|
310
|
+
def deal_wildcard(data):
|
311
|
+
new_data = list()
|
312
|
+
appear_times = stat_times(data)
|
313
|
+
for info in data:
|
314
|
+
subdomain = info.get('subdomain')
|
315
|
+
isvalid, reason = check_valid_subdomain(appear_times, info)
|
316
|
+
logger.log('DEBUG', f'{subdomain} is {isvalid} subdomain reason because {reason}')
|
317
|
+
if isvalid:
|
318
|
+
new_data.append(info)
|
319
|
+
return new_data
|