oneforall-kjl 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. OneForAll/__init__.py +15 -0
  2. OneForAll/brute.py +503 -0
  3. OneForAll/common/check.py +41 -0
  4. OneForAll/common/crawl.py +10 -0
  5. OneForAll/common/database.py +277 -0
  6. OneForAll/common/domain.py +63 -0
  7. OneForAll/common/ipasn.py +42 -0
  8. OneForAll/common/ipreg.py +139 -0
  9. OneForAll/common/lookup.py +28 -0
  10. OneForAll/common/module.py +369 -0
  11. OneForAll/common/query.py +9 -0
  12. OneForAll/common/records.py +363 -0
  13. OneForAll/common/request.py +264 -0
  14. OneForAll/common/resolve.py +173 -0
  15. OneForAll/common/search.py +78 -0
  16. OneForAll/common/similarity.py +138 -0
  17. OneForAll/common/tablib/__init__.py +0 -0
  18. OneForAll/common/tablib/format.py +89 -0
  19. OneForAll/common/tablib/tablib.py +360 -0
  20. OneForAll/common/tldextract.py +240 -0
  21. OneForAll/common/utils.py +789 -0
  22. OneForAll/config/__init__.py +17 -0
  23. OneForAll/config/api.py +94 -0
  24. OneForAll/config/default.py +255 -0
  25. OneForAll/config/log.py +38 -0
  26. OneForAll/config/setting.py +108 -0
  27. OneForAll/export.py +72 -0
  28. OneForAll/modules/altdns.py +216 -0
  29. OneForAll/modules/autotake/github.py +105 -0
  30. OneForAll/modules/certificates/censys_api.py +73 -0
  31. OneForAll/modules/certificates/certspotter.py +48 -0
  32. OneForAll/modules/certificates/crtsh.py +84 -0
  33. OneForAll/modules/certificates/google.py +48 -0
  34. OneForAll/modules/certificates/myssl.py +46 -0
  35. OneForAll/modules/certificates/racent.py +49 -0
  36. OneForAll/modules/check/axfr.py +97 -0
  37. OneForAll/modules/check/cdx.py +44 -0
  38. OneForAll/modules/check/cert.py +58 -0
  39. OneForAll/modules/check/csp.py +94 -0
  40. OneForAll/modules/check/nsec.py +58 -0
  41. OneForAll/modules/check/robots.py +44 -0
  42. OneForAll/modules/check/sitemap.py +44 -0
  43. OneForAll/modules/collect.py +70 -0
  44. OneForAll/modules/crawl/archivecrawl.py +59 -0
  45. OneForAll/modules/crawl/commoncrawl.py +59 -0
  46. OneForAll/modules/datasets/anubis.py +45 -0
  47. OneForAll/modules/datasets/bevigil.py +50 -0
  48. OneForAll/modules/datasets/binaryedge_api.py +50 -0
  49. OneForAll/modules/datasets/cebaidu.py +45 -0
  50. OneForAll/modules/datasets/chinaz.py +45 -0
  51. OneForAll/modules/datasets/chinaz_api.py +49 -0
  52. OneForAll/modules/datasets/circl_api.py +49 -0
  53. OneForAll/modules/datasets/cloudflare_api.py +130 -0
  54. OneForAll/modules/datasets/dnsdb_api.py +51 -0
  55. OneForAll/modules/datasets/dnsdumpster.py +52 -0
  56. OneForAll/modules/datasets/dnsgrep.py +44 -0
  57. OneForAll/modules/datasets/fullhunt.py +48 -0
  58. OneForAll/modules/datasets/hackertarget.py +45 -0
  59. OneForAll/modules/datasets/ip138.py +45 -0
  60. OneForAll/modules/datasets/ipv4info_api.py +73 -0
  61. OneForAll/modules/datasets/netcraft.py +66 -0
  62. OneForAll/modules/datasets/passivedns_api.py +51 -0
  63. OneForAll/modules/datasets/qianxun.py +61 -0
  64. OneForAll/modules/datasets/rapiddns.py +45 -0
  65. OneForAll/modules/datasets/riddler.py +45 -0
  66. OneForAll/modules/datasets/robtex.py +58 -0
  67. OneForAll/modules/datasets/securitytrails_api.py +56 -0
  68. OneForAll/modules/datasets/sitedossier.py +57 -0
  69. OneForAll/modules/datasets/spyse_api.py +62 -0
  70. OneForAll/modules/datasets/sublist3r.py +45 -0
  71. OneForAll/modules/datasets/urlscan.py +45 -0
  72. OneForAll/modules/datasets/windvane.py +92 -0
  73. OneForAll/modules/dnsquery/mx.py +35 -0
  74. OneForAll/modules/dnsquery/ns.py +35 -0
  75. OneForAll/modules/dnsquery/soa.py +35 -0
  76. OneForAll/modules/dnsquery/spf.py +35 -0
  77. OneForAll/modules/dnsquery/txt.py +35 -0
  78. OneForAll/modules/enrich.py +72 -0
  79. OneForAll/modules/finder.py +206 -0
  80. OneForAll/modules/intelligence/alienvault.py +50 -0
  81. OneForAll/modules/intelligence/riskiq_api.py +58 -0
  82. OneForAll/modules/intelligence/threatbook_api.py +50 -0
  83. OneForAll/modules/intelligence/threatminer.py +45 -0
  84. OneForAll/modules/intelligence/virustotal.py +60 -0
  85. OneForAll/modules/intelligence/virustotal_api.py +59 -0
  86. OneForAll/modules/iscdn.py +86 -0
  87. OneForAll/modules/search/ask.py +69 -0
  88. OneForAll/modules/search/baidu.py +96 -0
  89. OneForAll/modules/search/bing.py +79 -0
  90. OneForAll/modules/search/bing_api.py +78 -0
  91. OneForAll/modules/search/fofa_api.py +74 -0
  92. OneForAll/modules/search/gitee.py +71 -0
  93. OneForAll/modules/search/github_api.py +86 -0
  94. OneForAll/modules/search/google.py +83 -0
  95. OneForAll/modules/search/google_api.py +77 -0
  96. OneForAll/modules/search/hunter_api.py +72 -0
  97. OneForAll/modules/search/quake_api.py +72 -0
  98. OneForAll/modules/search/shodan_api.py +53 -0
  99. OneForAll/modules/search/so.py +75 -0
  100. OneForAll/modules/search/sogou.py +72 -0
  101. OneForAll/modules/search/wzsearch.py +68 -0
  102. OneForAll/modules/search/yahoo.py +81 -0
  103. OneForAll/modules/search/yandex.py +80 -0
  104. OneForAll/modules/search/zoomeye_api.py +73 -0
  105. OneForAll/modules/srv.py +75 -0
  106. OneForAll/modules/wildcard.py +319 -0
  107. OneForAll/oneforall.py +275 -0
  108. OneForAll/takeover.py +168 -0
  109. OneForAll/test.py +23 -0
  110. oneforall_kjl-0.1.1.dist-info/METADATA +18 -0
  111. oneforall_kjl-0.1.1.dist-info/RECORD +114 -0
  112. oneforall_kjl-0.1.1.dist-info/WHEEL +5 -0
  113. oneforall_kjl-0.1.1.dist-info/entry_points.txt +2 -0
  114. oneforall_kjl-0.1.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,50 @@
1
+ from config import settings
2
+ from common.query import Query
3
+
4
+
5
+ class ThreatBookAPI(Query):
6
+ def __init__(self, domain):
7
+ Query.__init__(self)
8
+ self.domain = domain
9
+ self.module = 'Intelligence'
10
+ self.source = 'ThreatBookAPIQuery'
11
+ self.addr = 'https://api.threatbook.cn/v3/domain/sub_domains'
12
+ self.key = settings.threatbook_api_key
13
+
14
+ def query(self):
15
+ """
16
+ 向接口查询子域并做子域匹配
17
+ """
18
+ self.header = self.get_header()
19
+ self.proxy = self.get_proxy(self.source)
20
+ params = {'apikey': self.key,
21
+ 'resource': self.domain}
22
+ resp = self.post(self.addr, params)
23
+ self.subdomains = self.collect_subdomains(resp)
24
+
25
+ def run(self):
26
+ """
27
+ 类执行入口
28
+ """
29
+ if not self.have_api(self.key):
30
+ return
31
+ self.begin()
32
+ self.query()
33
+ self.finish()
34
+ self.save_json()
35
+ self.gen_result()
36
+ self.save_db()
37
+
38
+
39
+ def run(domain):
40
+ """
41
+ 类统一调用入口
42
+
43
+ :param str domain: 域名
44
+ """
45
+ query = ThreatBookAPI(domain)
46
+ query.run()
47
+
48
+
49
+ if __name__ == '__main__':
50
+ run('example.com')
@@ -0,0 +1,45 @@
1
+ from common.query import Query
2
+
3
+
4
+ class ThreatMiner(Query):
5
+ def __init__(self, domain):
6
+ Query.__init__(self)
7
+ self.domain = domain
8
+ self.module = 'Intelligence'
9
+ self.source = 'ThreatMinerQuery'
10
+ self.addr = 'https://api.threatminer.org/v2/domain.php'
11
+
12
+ def query(self):
13
+ """
14
+ 向接口查询子域并做子域匹配
15
+ """
16
+ self.header = self.get_header()
17
+ self.proxy = self.get_proxy(self.source)
18
+ params = {'q': self.domain, 'rt': 5}
19
+ resp = self.get(self.addr, params)
20
+ self.subdomains = self.collect_subdomains(resp)
21
+
22
+ def run(self):
23
+ """
24
+ 类执行入口
25
+ """
26
+ self.begin()
27
+ self.query()
28
+ self.finish()
29
+ self.save_json()
30
+ self.gen_result()
31
+ self.save_db()
32
+
33
+
34
+ def run(domain):
35
+ """
36
+ 类统一调用入口
37
+
38
+ :param str domain: 域名
39
+ """
40
+ query = ThreatMiner(domain)
41
+ query.run()
42
+
43
+
44
+ if __name__ == '__main__':
45
+ run('example.com')
@@ -0,0 +1,60 @@
1
+ from common.query import Query
2
+
3
+ '''
4
+ 最多查询100条
5
+ '''
6
+
7
+
8
+ class VirusTotal(Query):
9
+ def __init__(self, domain):
10
+ Query.__init__(self)
11
+ self.source = 'VirusTotalQuery'
12
+ self.module = 'Intelligence'
13
+ self.domain = domain
14
+
15
+ def query(self):
16
+ """
17
+ 向接口查询子域并做子域匹配
18
+ """
19
+ next_cursor = ''
20
+ while True:
21
+ self.header = self.get_header()
22
+ self.header.update({'Referer': 'https://www.virustotal.com/',
23
+ 'TE': 'Trailers'})
24
+ self.proxy = self.get_proxy(self.source)
25
+ params = {'limit': '40', 'cursor': next_cursor}
26
+ addr = f'https://www.virustotal.com/ui/domains/{self.domain}/subdomains'
27
+ resp = self.get(url=addr, params=params)
28
+ if not resp:
29
+ break
30
+ subdomains = self.match_subdomains(resp)
31
+ if not subdomains:
32
+ break
33
+ self.subdomains.update(subdomains)
34
+ data = resp.json()
35
+ next_cursor = data.get('meta').get('cursor')
36
+
37
+ def run(self):
38
+ """
39
+ 类执行入口
40
+ """
41
+ self.begin()
42
+ self.query()
43
+ self.finish()
44
+ self.save_json()
45
+ self.gen_result()
46
+ self.save_db()
47
+
48
+
49
+ def run(domain):
50
+ """
51
+ 类统一调用入口
52
+
53
+ :param str domain: 域名
54
+ """
55
+ query = VirusTotal(domain)
56
+ query.run()
57
+
58
+
59
+ if __name__ == '__main__':
60
+ run('mi.com')
@@ -0,0 +1,59 @@
1
+ from config import settings
2
+ from common.query import Query
3
+
4
+
5
+ class VirusTotalAPI(Query):
6
+ def __init__(self, domain):
7
+ Query.__init__(self)
8
+ self.domain = domain
9
+ self.module = 'Intelligence'
10
+ self.source = 'VirusTotalAPIQuery'
11
+ self.key = settings.virustotal_api_key
12
+
13
+ def query(self):
14
+ """
15
+ 向接口查询子域并做子域匹配
16
+ """
17
+ next_cursor = ''
18
+ while True:
19
+ self.header = self.get_header()
20
+ self.header.update({'x-apikey': self.key})
21
+ self.proxy = self.get_proxy(self.source)
22
+ params = {'limit': '40', 'cursor': next_cursor}
23
+ addr = f'https://www.virustotal.com/api/v3/domains/{self.domain}/subdomains'
24
+ resp = self.get(url=addr, params=params)
25
+ subdomains = self.match_subdomains(resp)
26
+ if not subdomains:
27
+ break
28
+ self.subdomains.update(subdomains)
29
+ data = resp.json()
30
+ next_cursor = data.get('meta').get('cursor')
31
+ if not next_cursor:
32
+ break
33
+
34
+ def run(self):
35
+ """
36
+ 类执行入口
37
+ """
38
+ if not self.have_api(self.key):
39
+ return
40
+ self.begin()
41
+ self.query()
42
+ self.finish()
43
+ self.save_json()
44
+ self.gen_result()
45
+ self.save_db()
46
+
47
+
48
+ def run(domain):
49
+ """
50
+ 类统一调用入口
51
+
52
+ :param str domain: 域名
53
+ """
54
+ query = VirusTotalAPI(domain)
55
+ query.run()
56
+
57
+
58
+ if __name__ == '__main__':
59
+ run('mi.com')
@@ -0,0 +1,86 @@
1
+ import json
2
+ import ipaddress
3
+
4
+ from config import settings
5
+ from common import utils
6
+ from common.database import Database
7
+ from config.log import logger
8
+
9
+ data_dir = settings.data_storage_dir
10
+
11
+ # from https://github.com/al0ne/Vxscan/blob/master/lib/iscdn.py
12
+ cdn_ip_cidr = utils.load_json(data_dir.joinpath('cdn_ip_cidr.json'))
13
+ cdn_asn_list = utils.load_json(data_dir.joinpath('cdn_asn_list.json'))
14
+
15
+ # from https://github.com/Qclover/CDNCheck/blob/master/checkCDN/cdn3_check.py
16
+ cdn_cname_keyword = utils.load_json(data_dir.joinpath('cdn_cname_keywords.json'))
17
+
18
+ cdn_header_key = utils.load_json(data_dir.joinpath('cdn_header_keys.json'))
19
+
20
+
21
+ def check_cname_keyword(cname):
22
+ if not cname:
23
+ return False
24
+ names = cname.lower().split(',')
25
+ for name in names:
26
+ for keyword in cdn_cname_keyword.keys():
27
+ if keyword in name:
28
+ return True
29
+
30
+
31
+ def check_header_key(header):
32
+ if isinstance(header, str):
33
+ header = json.loads(header)
34
+ if isinstance(header, dict):
35
+ header = set(map(lambda x: x.lower(), header.keys()))
36
+ for key in cdn_header_key:
37
+ if key in header:
38
+ return True
39
+ else:
40
+ return False
41
+
42
+
43
+ def check_cdn_cidr(ips):
44
+ if isinstance(ips, str):
45
+ ips = set(ips.split(','))
46
+ else:
47
+ return False
48
+ for ip in ips:
49
+ try:
50
+ ip = ipaddress.ip_address(ip)
51
+ except Exception as e:
52
+ logger.log('DEBUG', e.args)
53
+ return False
54
+ for cidr in cdn_ip_cidr:
55
+ if ip in ipaddress.ip_network(cidr):
56
+ return True
57
+
58
+
59
+ def check_cdn_asn(asn):
60
+ if isinstance(asn, str):
61
+ if asn in cdn_asn_list:
62
+ return True
63
+ return False
64
+
65
+
66
+ def do_check(data):
67
+ logger.log('DEBUG', f'Checking cdn')
68
+ for index, item in enumerate(data):
69
+ cname = item.get('cname')
70
+ if check_cname_keyword(cname):
71
+ data[index]['cdn'] = 1
72
+ continue
73
+ header = item.get('header')
74
+ if check_header_key(header):
75
+ data[index]['cdn'] = 1
76
+ continue
77
+ ip = item.get('ip')
78
+ if check_cdn_cidr(ip):
79
+ data[index]['cdn'] = 1
80
+ continue
81
+ asn = item.get('asn')
82
+ if check_cdn_asn(asn):
83
+ data[index]['cdn'] = 1
84
+ continue
85
+ data[index]['cdn'] = 0
86
+ return data
@@ -0,0 +1,69 @@
1
+ import time
2
+ from common.search import Search
3
+
4
+
5
+ class Ask(Search):
6
+ def __init__(self, domain):
7
+ Search.__init__(self)
8
+ self.domain = domain
9
+ self.module = 'Search'
10
+ self.source = 'AskSearch'
11
+ self.addr = 'https://www.search.ask.com/web'
12
+ self.limit_num = 200 # 限制搜索条数
13
+ self.per_page_num = 10 # 默认每页显示10页
14
+
15
+ def search(self, domain, filtered_subdomain=''):
16
+ """
17
+ 发送搜索请求并做子域匹配
18
+
19
+ :param str domain: 域名
20
+ :param str filtered_subdomain: 过滤的子域
21
+ """
22
+ self.page_num = 1
23
+ while True:
24
+ time.sleep(self.delay)
25
+ self.header = self.get_header()
26
+ self.proxy = self.get_proxy(self.source)
27
+ query = 'site:.' + domain + filtered_subdomain
28
+ params = {'q': query, 'page': self.page_num}
29
+ resp = self.get(self.addr, params)
30
+ subdomains = self.match_subdomains(resp, fuzzy=False)
31
+ if not self.check_subdomains(subdomains):
32
+ break
33
+ self.subdomains.update(subdomains)
34
+ self.page_num += 1
35
+ if '>Next<' not in resp.text:
36
+ break
37
+
38
+ def run(self):
39
+ """
40
+ 类执行入口
41
+ """
42
+ self.begin()
43
+ self.search(self.domain)
44
+ # 排除同一子域搜索结果过多的子域以发现新的子域
45
+ for statement in self.filter(self.domain, self.subdomains):
46
+ self.search(self.domain, filtered_subdomain=statement)
47
+
48
+ # 递归搜索下一层的子域
49
+ if self.recursive_search:
50
+ for subdomain in self.recursive_subdomain():
51
+ self.search(subdomain)
52
+ self.finish()
53
+ self.save_json()
54
+ self.gen_result()
55
+ self.save_db()
56
+
57
+
58
+ def run(domain):
59
+ """
60
+ 类统一调用入口
61
+
62
+ :param str domain: 域名
63
+ """
64
+ search = Ask(domain)
65
+ search.run()
66
+
67
+
68
+ if __name__ == '__main__':
69
+ run('example.com')
@@ -0,0 +1,96 @@
1
+ import time
2
+ from bs4 import BeautifulSoup
3
+ from common.search import Search
4
+
5
+
6
+ class Baidu(Search):
7
+ def __init__(self, domain):
8
+ Search.__init__(self)
9
+ self.module = 'Search'
10
+ self.source = 'BaiduSearch'
11
+ self.addr = 'https://www.baidu.com/s'
12
+ self.domain = domain
13
+ self.limit_num = 750 # 限制搜索条数
14
+
15
+ def redirect_match(self, html):
16
+ """
17
+ 获取跳转地址并传递地址进行跳转head请求
18
+
19
+ :param html: 响应体
20
+ :return: 子域
21
+ """
22
+ bs = BeautifulSoup(html, 'html.parser')
23
+ subdomains_all = set()
24
+ # 获取搜索结果中所有的跳转URL地址
25
+ for find_res in bs.find_all('a', {'class': 'c-showurl'}):
26
+ url = find_res.get('href')
27
+ subdomains = self.match_location(url)
28
+ subdomains_all.update(subdomains)
29
+ return subdomains_all
30
+
31
+ def search(self, domain, filtered_subdomain=''):
32
+ """
33
+ 发送搜索请求并做子域匹配
34
+
35
+ :param str domain: 域名
36
+ :param str filtered_subdomain: 过滤的子域
37
+ """
38
+ self.page_num = 0 # 二次搜索重新置0
39
+ while True:
40
+ time.sleep(self.delay)
41
+ self.header = self.get_header()
42
+ self.proxy = self.get_proxy(self.source)
43
+ query = 'site:.' + domain + filtered_subdomain
44
+ params = {'wd': query,
45
+ 'pn': self.page_num,
46
+ 'rn': self.per_page_num}
47
+ resp = self.get(self.addr, params)
48
+ if not resp:
49
+ return
50
+ if len(domain) > 12: # 解决百度搜索结果中域名过长会显示不全的问题
51
+ # 获取百度跳转URL响应头的Location字段获取直链
52
+ subdomains = self.redirect_match(resp.text)
53
+ else:
54
+ subdomains = self.match_subdomains(resp, fuzzy=False)
55
+ if not self.check_subdomains(subdomains):
56
+ break
57
+ self.subdomains.update(subdomains)
58
+ self.page_num += self.per_page_num
59
+ # 搜索页面没有出现下一页时停止搜索
60
+ if f'&pn={self.page_num}&' not in resp.text:
61
+ break
62
+ if self.page_num >= self.limit_num: # 搜索条数限制
63
+ break
64
+
65
+ def run(self):
66
+ """
67
+ 类执行入口
68
+ """
69
+ self.begin()
70
+ self.search(self.domain)
71
+ # 排除同一子域搜索结果过多的子域以发现新的子域
72
+ for statement in self.filter(self.domain, self.subdomains):
73
+ self.search(self.domain, filtered_subdomain=statement)
74
+
75
+ # 递归搜索下一层的子域
76
+ if self.recursive_search:
77
+ for subdomain in self.recursive_subdomain():
78
+ self.search(subdomain)
79
+ self.finish()
80
+ self.save_json()
81
+ self.gen_result()
82
+ self.save_db()
83
+
84
+
85
+ def run(domain):
86
+ """
87
+ 类统一调用入口
88
+
89
+ :param str domain: 域名
90
+ """
91
+ search = Baidu(domain)
92
+ search.run()
93
+
94
+
95
+ if __name__ == '__main__':
96
+ run('mi.com')
@@ -0,0 +1,79 @@
1
+ import time
2
+ from common.search import Search
3
+
4
+
5
+ class Bing(Search):
6
+ def __init__(self, domain):
7
+ Search.__init__(self)
8
+ self.domain = domain
9
+ self.module = 'Search'
10
+ self.source = 'BingSearch'
11
+ self.init = 'https://www.bing.com/'
12
+ self.addr = 'https://www.bing.com/search'
13
+ self.limit_num = 1000 # 限制搜索条数
14
+
15
+ def search(self, domain, filtered_subdomain=''):
16
+ """
17
+ 发送搜索请求并做子域匹配
18
+
19
+ :param str domain: 域名
20
+ :param str filtered_subdomain: 过滤的子域
21
+ """
22
+ self.header = self.get_header()
23
+ self.proxy = self.get_proxy(self.source)
24
+ self.page_num = 0 # 二次搜索重新置0
25
+ resp = self.get(self.init)
26
+ if not resp:
27
+ return
28
+ self.cookie = resp.cookies # 获取cookie bing在搜索时需要带上cookie
29
+ while True:
30
+ time.sleep(self.delay)
31
+ self.proxy = self.get_proxy(self.source)
32
+ query = 'site:.' + domain + filtered_subdomain
33
+ params = {'q': query, 'first': self.page_num,
34
+ 'count': self.per_page_num}
35
+ resp = self.get(self.addr, params)
36
+ subdomains = self.match_subdomains(resp, fuzzy=False)
37
+ if not self.check_subdomains(subdomains):
38
+ break
39
+ self.subdomains.update(subdomains)
40
+ # 搜索页面没有出现下一页时停止搜索
41
+ if '<div class="sw_next">' not in resp.text:
42
+ break
43
+ self.page_num += self.per_page_num
44
+ if self.page_num >= self.limit_num: # 搜索条数限制
45
+ break
46
+
47
+ def run(self):
48
+ """
49
+ 类执行入口
50
+ """
51
+ self.begin()
52
+ self.search(self.domain)
53
+
54
+ # 排除同一子域搜索结果过多的子域以发现新的子域
55
+ for statement in self.filter(self.domain, self.subdomains):
56
+ self.search(self.domain, filtered_subdomain=statement)
57
+
58
+ # 递归搜索下一层的子域
59
+ if self.recursive_search:
60
+ for subdomain in self.recursive_subdomain():
61
+ self.search(subdomain)
62
+ self.finish()
63
+ self.save_json()
64
+ self.gen_result()
65
+ self.save_db()
66
+
67
+
68
+ def run(domain):
69
+ """
70
+ 类统一调用入口
71
+
72
+ :param str domain: 域名
73
+ """
74
+ search = Bing(domain)
75
+ search.run()
76
+
77
+
78
+ if __name__ == '__main__':
79
+ run('example.com')
@@ -0,0 +1,78 @@
1
+ import time
2
+ from config import settings
3
+ from common.search import Search
4
+
5
+
6
+ class BingAPI(Search):
7
+ def __init__(self, domain):
8
+ Search.__init__(self)
9
+ self.domain = domain
10
+ self.module = 'Search'
11
+ self.source = 'BingAPISearch'
12
+ self.addr = 'https://api.bing.microsoft.com/v7.0/search'
13
+ self.id = settings.bing_api_id
14
+ self.key = settings.bing_api_key
15
+ self.limit_num = 1000 # 必应同一个搜索关键词限制搜索条数
16
+ self.delay = 1 # 必应自定义搜索限制时延1秒
17
+
18
+ def search(self, domain, filtered_subdomain=''):
19
+ """
20
+ 发送搜索请求并做子域匹配
21
+
22
+ :param str domain: 域名
23
+ :param str filtered_subdomain: 过滤的子域
24
+ """
25
+ self.page_num = 0 # 二次搜索重新置0
26
+ while True:
27
+ time.sleep(self.delay)
28
+ self.header = self.get_header()
29
+ self.header = {'Ocp-Apim-Subscription-Key': self.key}
30
+ self.proxy = self.get_proxy(self.source)
31
+ query = 'site:.' + domain + filtered_subdomain
32
+ params = {'q': query, 'safesearch': 'Off',
33
+ 'count': self.per_page_num,
34
+ 'offset': self.page_num}
35
+ resp = self.get(self.addr, params)
36
+ subdomains = self.match_subdomains(resp)
37
+ if not self.check_subdomains(subdomains):
38
+ break
39
+ self.subdomains.update(subdomains)
40
+ self.page_num += self.per_page_num
41
+ if self.page_num >= self.limit_num: # 搜索条数限制
42
+ break
43
+
44
+ def run(self):
45
+ """
46
+ 类执行入口
47
+ """
48
+ if not self.have_api(self.id, self.key):
49
+ return
50
+ self.begin()
51
+ self.search(self.domain)
52
+
53
+ # 排除同一子域搜索结果过多的子域以发现新的子域
54
+ for statement in self.filter(self.domain, self.subdomains):
55
+ self.search(self.domain, filtered_subdomain=statement)
56
+
57
+ # 递归搜索下一层的子域
58
+ if self.recursive_search:
59
+ for subdomain in self.recursive_subdomain():
60
+ self.search(subdomain)
61
+ self.finish()
62
+ self.save_json()
63
+ self.gen_result()
64
+ self.save_db()
65
+
66
+
67
+ def run(domain):
68
+ """
69
+ 类统一调用入口
70
+
71
+ :param str domain: 域名
72
+ """
73
+ search = BingAPI(domain)
74
+ search.run()
75
+
76
+
77
+ if __name__ == '__main__':
78
+ run('example.com')
@@ -0,0 +1,74 @@
1
+ import base64
2
+ import time
3
+
4
+ from config import settings
5
+ from common.search import Search
6
+
7
+
8
+ class FoFa(Search):
9
+ def __init__(self, domain):
10
+ Search.__init__(self)
11
+ self.domain = domain
12
+ self.module = 'Search'
13
+ self.source = 'FoFaAPISearch'
14
+ self.addr = 'https://fofa.info/api/v1/search/all'
15
+ self.delay = 1
16
+ self.email = settings.fofa_api_email
17
+ self.key = settings.fofa_api_key
18
+
19
+ def search(self):
20
+ """
21
+ 发送搜索请求并做子域匹配
22
+ """
23
+ self.page_num = 1
24
+ subdomain_encode = f'domain="{self.domain}"'.encode('utf-8')
25
+ query_data = base64.b64encode(subdomain_encode)
26
+ while 100 * self.page_num < settings.cam_records_maximum_per_domain:
27
+ time.sleep(self.delay)
28
+ self.header = self.get_header()
29
+ self.proxy = self.get_proxy(self.source)
30
+ query = {'email': self.email,
31
+ 'key': self.key,
32
+ 'qbase64': query_data,
33
+ 'page': self.page_num,
34
+ 'full': 'true',
35
+ 'size': min(1000, settings.cam_records_maximum_per_domain)}
36
+ resp = self.get(self.addr, query)
37
+ if not resp:
38
+ return
39
+ resp_json = resp.json()
40
+ subdomains = self.match_subdomains(resp)
41
+ if not subdomains: # 搜索没有发现子域名则停止搜索
42
+ break
43
+ self.subdomains.update(subdomains)
44
+ size = resp_json.get('size')
45
+ if size < min(1000, settings.cam_records_maximum_per_domain):
46
+ break
47
+ self.page_num += 1
48
+
49
+ def run(self):
50
+ """
51
+ 类执行入口
52
+ """
53
+ if not self.have_api(self.email, self.key):
54
+ return
55
+ self.begin()
56
+ self.search()
57
+ self.finish()
58
+ self.save_json()
59
+ self.gen_result()
60
+ self.save_db()
61
+
62
+
63
+ def run(domain):
64
+ """
65
+ 类统一调用入口
66
+
67
+ :param str domain: 域名
68
+ """
69
+ search = FoFa(domain)
70
+ search.run()
71
+
72
+
73
+ if __name__ == '__main__':
74
+ run('example.com')