pgsqlpot 2.0.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +0 -0
- core/config.py +50 -0
- core/logfile.py +74 -0
- core/output.py +39 -0
- core/paths.py +53 -0
- core/protocol.py +161 -0
- core/tools.py +170 -0
- output_plugins/__init__.py +0 -0
- output_plugins/couch.py +68 -0
- output_plugins/datadog.py +74 -0
- output_plugins/discord.py +133 -0
- output_plugins/elastic.py +137 -0
- output_plugins/hpfeed.py +43 -0
- output_plugins/influx2.py +66 -0
- output_plugins/jsonlog.py +36 -0
- output_plugins/kafka.py +57 -0
- output_plugins/localsyslog.py +66 -0
- output_plugins/mongodb.py +83 -0
- output_plugins/mysql.py +210 -0
- output_plugins/nlcvapi.py +119 -0
- output_plugins/postgres.py +154 -0
- output_plugins/redisdb.py +47 -0
- output_plugins/rethinkdblog.py +46 -0
- output_plugins/slack.py +94 -0
- output_plugins/socketlog.py +40 -0
- output_plugins/sqlite.py +141 -0
- output_plugins/telegram.py +141 -0
- output_plugins/textlog.py +46 -0
- output_plugins/xmpp.py +193 -0
- pgsqlpot/__init__.py +25 -0
- pgsqlpot/cli.py +512 -0
- pgsqlpot/data/Dockerfile +56 -0
- pgsqlpot/data/docs/INSTALL.md +400 -0
- pgsqlpot/data/docs/INSTALLWIN.md +411 -0
- pgsqlpot/data/docs/PLUGINS.md +21 -0
- pgsqlpot/data/docs/TODO.md +8 -0
- pgsqlpot/data/docs/datadog/README.md +32 -0
- pgsqlpot/data/docs/discord/README.md +58 -0
- pgsqlpot/data/docs/geoipupdtask.ps1 +270 -0
- pgsqlpot/data/docs/mysql/README.md +176 -0
- pgsqlpot/data/docs/mysql/READMEWIN.md +157 -0
- pgsqlpot/data/docs/mysql/mysql.sql +85 -0
- pgsqlpot/data/docs/postgres/README.md +184 -0
- pgsqlpot/data/docs/postgres/READMEWIN.md +196 -0
- pgsqlpot/data/docs/postgres/postgres.sql +73 -0
- pgsqlpot/data/docs/slack/README.md +68 -0
- pgsqlpot/data/docs/sqlite3/README.md +131 -0
- pgsqlpot/data/docs/sqlite3/READMEWIN.md +123 -0
- pgsqlpot/data/docs/sqlite3/sqlite3.sql +69 -0
- pgsqlpot/data/docs/telegram/README.md +103 -0
- pgsqlpot/data/etc/honeypot.cfg +415 -0
- pgsqlpot/data/etc/honeypot.cfg.base +418 -0
- pgsqlpot/data/test/.gitignore +3 -0
- pgsqlpot/data/test/test.py +51 -0
- pgsqlpot/honeypot.py +117 -0
- pgsqlpot-2.0.0.dist-info/METADATA +152 -0
- pgsqlpot-2.0.0.dist-info/RECORD +61 -0
- pgsqlpot-2.0.0.dist-info/WHEEL +6 -0
- pgsqlpot-2.0.0.dist-info/entry_points.txt +2 -0
- pgsqlpot-2.0.0.dist-info/licenses/LICENSE +674 -0
- pgsqlpot-2.0.0.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
Simple Discord webhook logger
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import absolute_import
|
|
9
|
+
|
|
10
|
+
from io import BytesIO
|
|
11
|
+
from json import dumps, loads
|
|
12
|
+
from time import gmtime, strftime, time
|
|
13
|
+
|
|
14
|
+
from core import output
|
|
15
|
+
from core.config import CONFIG
|
|
16
|
+
from core.tools import decode, to_bytes
|
|
17
|
+
|
|
18
|
+
from twisted.internet import reactor
|
|
19
|
+
from twisted.internet.ssl import ClientContextFactory
|
|
20
|
+
from twisted.internet.task import deferLater
|
|
21
|
+
from twisted.python.log import msg
|
|
22
|
+
from twisted.web.client import (
|
|
23
|
+
Agent,
|
|
24
|
+
FileBodyProducer,
|
|
25
|
+
HTTPConnectionPool,
|
|
26
|
+
_HTTP11ClientFactory,
|
|
27
|
+
readBody
|
|
28
|
+
)
|
|
29
|
+
from twisted.web.http_headers import Headers
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class WebClientContextFactory(ClientContextFactory):
|
|
33
|
+
def getContext(self, hostname, port):
|
|
34
|
+
return ClientContextFactory.getContext(self)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class QuietHTTP11ClientFactory(_HTTP11ClientFactory):
|
|
38
|
+
noisy = False
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class Output(output.Output):
|
|
42
|
+
|
|
43
|
+
def start(self):
|
|
44
|
+
self.url = to_bytes(CONFIG.get('output_discord', 'url'))
|
|
45
|
+
self.delay = CONFIG.getfloat('output_discord', 'delay', fallback=2.0)
|
|
46
|
+
contextFactory = WebClientContextFactory()
|
|
47
|
+
pool = HTTPConnectionPool(reactor)
|
|
48
|
+
pool._factory = QuietHTTP11ClientFactory
|
|
49
|
+
self.agent = Agent(reactor, contextFactory=contextFactory, pool=pool)
|
|
50
|
+
self.last_sent = 0
|
|
51
|
+
self.requests_list = []
|
|
52
|
+
|
|
53
|
+
def stop(self):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
def write(self, event):
|
|
57
|
+
operation = event['operation'].lower()
|
|
58
|
+
|
|
59
|
+
message = '__New event__\n'
|
|
60
|
+
message += '[{} UTC] [PGSQLPot on {} ({})]: {}'.format(
|
|
61
|
+
strftime('%Y-%m-%d %H:%M:%S', gmtime(event['unixtime'])), event['sensor'],
|
|
62
|
+
event['session'], operation.capitalize()
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if operation == 'unknown':
|
|
66
|
+
message += ' operation: "{}"'.format(event['username'])
|
|
67
|
+
|
|
68
|
+
message += ' from {}:{}'.format(event['src_ip'], event['dst_port'])
|
|
69
|
+
|
|
70
|
+
if operation == 'login':
|
|
71
|
+
message += ', username: "{}", password: "{}"'.format(event['username'], event['password'])
|
|
72
|
+
message += '.\n'
|
|
73
|
+
|
|
74
|
+
self.requests_list.append({'content': message})
|
|
75
|
+
self._drain()
|
|
76
|
+
|
|
77
|
+
def _drain(self):
|
|
78
|
+
"""
|
|
79
|
+
Send the next queued entry if the rate-limit window has elapsed.
|
|
80
|
+
If entries remain, schedule another drain after self.delay seconds.
|
|
81
|
+
"""
|
|
82
|
+
if not self.requests_list:
|
|
83
|
+
return
|
|
84
|
+
now = time()
|
|
85
|
+
elapsed = now - self.last_sent
|
|
86
|
+
if elapsed < self.delay:
|
|
87
|
+
deferLater(reactor, self.delay - elapsed, self._drain)
|
|
88
|
+
return
|
|
89
|
+
self.last_sent = now
|
|
90
|
+
entry = self.requests_list.pop(0)
|
|
91
|
+
d = self._post(entry)
|
|
92
|
+
if self.requests_list:
|
|
93
|
+
deferLater(reactor, self.delay, self._drain)
|
|
94
|
+
return d
|
|
95
|
+
|
|
96
|
+
def _post(self, entry):
|
|
97
|
+
|
|
98
|
+
def cbBody(body):
|
|
99
|
+
return processResult(body)
|
|
100
|
+
|
|
101
|
+
def cbPartial(failure):
|
|
102
|
+
"""
|
|
103
|
+
Google HTTP Server does not set Content-Length. Twisted marks it as partial
|
|
104
|
+
"""
|
|
105
|
+
failure.printTraceback()
|
|
106
|
+
return processResult(failure.value)
|
|
107
|
+
|
|
108
|
+
def cbResponse(response):
|
|
109
|
+
if response is None or response.code in [200, 201, 204]:
|
|
110
|
+
return
|
|
111
|
+
msg('Discord error: {} {}'.format(response.code, decode(response.phrase)))
|
|
112
|
+
d = readBody(response)
|
|
113
|
+
d.addCallback(cbBody)
|
|
114
|
+
d.addErrback(cbPartial)
|
|
115
|
+
return d
|
|
116
|
+
|
|
117
|
+
def cbError(failure):
|
|
118
|
+
failure.printTraceback()
|
|
119
|
+
|
|
120
|
+
def processResult(result):
|
|
121
|
+
if result:
|
|
122
|
+
try:
|
|
123
|
+
j = loads(result)
|
|
124
|
+
msg('Discord response: {}'.format(j.get('message', '')))
|
|
125
|
+
except Exception:
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
headers = Headers({b'Content-Type': [b'application/json']})
|
|
129
|
+
body = FileBodyProducer(BytesIO(to_bytes(dumps(entry, sort_keys=True))))
|
|
130
|
+
d = self.agent.request(b'POST', self.url, headers, body)
|
|
131
|
+
d.addCallback(cbResponse)
|
|
132
|
+
d.addErrback(cbError)
|
|
133
|
+
return d
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from sys import version_info
|
|
5
|
+
|
|
6
|
+
from core import output
|
|
7
|
+
from core.config import CONFIG
|
|
8
|
+
|
|
9
|
+
from twisted.python.log import msg
|
|
10
|
+
|
|
11
|
+
PY2 = version_info[0] < 3
|
|
12
|
+
|
|
13
|
+
if PY2:
|
|
14
|
+
import elasticsearch # type: ignore
|
|
15
|
+
from elasticsearch import Elasticsearch, NotFoundError # type: ignore
|
|
16
|
+
version_module = elasticsearch
|
|
17
|
+
version_attr = '__version__'
|
|
18
|
+
else:
|
|
19
|
+
import elasticsearch8
|
|
20
|
+
from elasticsearch8 import Elasticsearch, NotFoundError
|
|
21
|
+
version_module = elasticsearch8
|
|
22
|
+
version_attr = '__versionstr__'
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class Output(output.Output):
|
|
26
|
+
|
|
27
|
+
def start(self):
|
|
28
|
+
host = CONFIG.get('output_elastic', 'host', fallback='localhost')
|
|
29
|
+
port = CONFIG.getint('output_elastic', 'port', fallback=9200)
|
|
30
|
+
username = CONFIG.get('output_elastic', 'username', fallback=None)
|
|
31
|
+
password = CONFIG.get('output_elastic', 'password', fallback=None)
|
|
32
|
+
use_ssl = CONFIG.getboolean('output_elastic', 'ssl', fallback=False)
|
|
33
|
+
verify_certs = CONFIG.getboolean('output_elastic', 'verify_certs', fallback=False)
|
|
34
|
+
ca_certs = CONFIG.get('output_elastic', 'ca_certs', fallback=None)
|
|
35
|
+
self.index = CONFIG.get('output_elastic', 'index', fallback='pgsqlpot')
|
|
36
|
+
self.pipeline = CONFIG.get('output_elastic', 'pipeline', fallback='geoip')
|
|
37
|
+
|
|
38
|
+
scheme = 'https' if use_ssl else 'http'
|
|
39
|
+
hosts = [{'host': host, 'port': port, 'scheme': scheme}]
|
|
40
|
+
kwargs = {}
|
|
41
|
+
if username and password:
|
|
42
|
+
kwargs['http_auth' if PY2 else 'basic_auth'] = (username, password)
|
|
43
|
+
if use_ssl:
|
|
44
|
+
kwargs['verify_certs'] = verify_certs
|
|
45
|
+
if verify_certs and ca_certs:
|
|
46
|
+
kwargs['ca_certs'] = ca_certs
|
|
47
|
+
|
|
48
|
+
# Create client
|
|
49
|
+
self.es = Elasticsearch(hosts=hosts, **kwargs)
|
|
50
|
+
|
|
51
|
+
# Detect major version from module attribute
|
|
52
|
+
ver_attr_value = getattr(version_module, version_attr)
|
|
53
|
+
if isinstance(ver_attr_value, tuple):
|
|
54
|
+
# Python 2.7 elasticsearch 7.x
|
|
55
|
+
self.es_ver = ver_attr_value[0]
|
|
56
|
+
else:
|
|
57
|
+
# Python 3.6+ elasticsearch8
|
|
58
|
+
self.es_ver = int(str(ver_attr_value).split('.')[0])
|
|
59
|
+
|
|
60
|
+
# Test connection
|
|
61
|
+
try:
|
|
62
|
+
info = self.es.info()
|
|
63
|
+
msg('output_elastic: connected to ES version {}'.format(info['version']['number']))
|
|
64
|
+
except Exception as e:
|
|
65
|
+
msg('output_elastic: connection failed: {}'.format(repr(e)))
|
|
66
|
+
raise
|
|
67
|
+
|
|
68
|
+
# Ensure index exists
|
|
69
|
+
if not self.es.indices.exists(index=self.index):
|
|
70
|
+
self.es.indices.create(index=self.index)
|
|
71
|
+
|
|
72
|
+
# Setup geoip
|
|
73
|
+
if self.pipeline == 'geoip':
|
|
74
|
+
self._setup_geoip_mapping()
|
|
75
|
+
self._setup_geoip_pipeline()
|
|
76
|
+
|
|
77
|
+
def _setup_geoip_mapping(self):
|
|
78
|
+
try:
|
|
79
|
+
body = {
|
|
80
|
+
'properties': {
|
|
81
|
+
'geo': {
|
|
82
|
+
'properties': {
|
|
83
|
+
'location': {
|
|
84
|
+
'type': 'geo_point'
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
if self.es_ver >= 8:
|
|
91
|
+
self.es.indices.put_mapping(index=self.index, properties=body['properties'])
|
|
92
|
+
else:
|
|
93
|
+
self.es.indices.put_mapping(index=self.index, body=body)
|
|
94
|
+
except Exception as e:
|
|
95
|
+
msg('output_elastic: mapping setup failed: {}'.format(repr(e)))
|
|
96
|
+
|
|
97
|
+
def _setup_geoip_pipeline(self):
|
|
98
|
+
try:
|
|
99
|
+
self.es.ingest.get_pipeline(id=self.pipeline)
|
|
100
|
+
except NotFoundError:
|
|
101
|
+
body = {
|
|
102
|
+
'description': 'Add geoip info',
|
|
103
|
+
'processors': [
|
|
104
|
+
{
|
|
105
|
+
'geoip': {
|
|
106
|
+
'field': 'src_ip',
|
|
107
|
+
'target_field': 'geo',
|
|
108
|
+
'database_file': 'GeoLite2-City.mmdb'
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
]
|
|
112
|
+
}
|
|
113
|
+
try:
|
|
114
|
+
if self.es_ver >= 8:
|
|
115
|
+
self.es.ingest.put_pipeline(
|
|
116
|
+
id=self.pipeline,
|
|
117
|
+
processors=body['processors'],
|
|
118
|
+
description=body['description']
|
|
119
|
+
)
|
|
120
|
+
else:
|
|
121
|
+
self.es.ingest.put_pipeline(id=self.pipeline, body=body)
|
|
122
|
+
except Exception as e:
|
|
123
|
+
msg('output_elastic: pipeline creation failed: {}'.format(repr(e)))
|
|
124
|
+
|
|
125
|
+
def stop(self):
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
def write(self, entry):
|
|
129
|
+
try:
|
|
130
|
+
if self.es_ver >= 8:
|
|
131
|
+
self.es.index(index=self.index, document=entry, pipeline=self.pipeline)
|
|
132
|
+
else:
|
|
133
|
+
self.es.index(index=self.index, body=entry, pipeline=self.pipeline)
|
|
134
|
+
except Exception as e:
|
|
135
|
+
msg('output_elastic: write failed: {}'.format(repr(e)))
|
|
136
|
+
if hasattr(e, 'info'):
|
|
137
|
+
msg('output_elastic info: {}'.format(e.info))
|
output_plugins/hpfeed.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from json import dumps
|
|
5
|
+
|
|
6
|
+
from core import output
|
|
7
|
+
from core.config import CONFIG
|
|
8
|
+
from core.tools import to_bytes
|
|
9
|
+
|
|
10
|
+
from hpfeeds.twisted import ClientSessionService
|
|
11
|
+
|
|
12
|
+
from twisted.internet import endpoints, reactor, ssl
|
|
13
|
+
|
|
14
|
+
class Output(output.Output):
|
|
15
|
+
|
|
16
|
+
def start(self):
|
|
17
|
+
self.channel = CONFIG.get('output_hpfeed', 'channel', fallback='pgsqlpot')
|
|
18
|
+
|
|
19
|
+
if CONFIG.has_option('output_hpfeed', 'endpoint'):
|
|
20
|
+
endpoint = CONFIG.get('output_hpfeed', 'endpoint')
|
|
21
|
+
else:
|
|
22
|
+
server = CONFIG.get('output_hpfeed', 'server')
|
|
23
|
+
port = CONFIG.getint('output_hpfeed', 'port')
|
|
24
|
+
|
|
25
|
+
if CONFIG.has_option('output_hpfeed', 'tlscert'):
|
|
26
|
+
with open(CONFIG.get('output_hpfeed', 'tlscert')) as fp:
|
|
27
|
+
authority = ssl.Certificate.loadPEM(fp.read())
|
|
28
|
+
options = ssl.optionsForClientTLS(server, authority)
|
|
29
|
+
endpoint = endpoints.SSL4ClientEndpoint(reactor, server, port, options)
|
|
30
|
+
else:
|
|
31
|
+
endpoint = endpoints.HostnameEndpoint(reactor, server, port)
|
|
32
|
+
|
|
33
|
+
ident = CONFIG.get('output_hpfeed', 'identifier', raw=True)
|
|
34
|
+
secret = CONFIG.get('output_hpfeed', 'secret', raw=True)
|
|
35
|
+
|
|
36
|
+
self.client = ClientSessionService(endpoint, ident, secret)
|
|
37
|
+
self.client.startService()
|
|
38
|
+
|
|
39
|
+
def stop(self):
|
|
40
|
+
self.client.stopService()
|
|
41
|
+
|
|
42
|
+
def write(self, event):
|
|
43
|
+
self.client.publish(self.channel, to_bytes(dumps(event, sort_keys=True)))
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
from core import output
|
|
7
|
+
from core.config import CONFIG
|
|
8
|
+
|
|
9
|
+
from influxdb_client import InfluxDBClient, Point, WritePrecision
|
|
10
|
+
from influxdb_client.client.write_api import SYNCHRONOUS
|
|
11
|
+
|
|
12
|
+
from pytz import timezone
|
|
13
|
+
|
|
14
|
+
from twisted.python.log import msg
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Output(output.Output):
|
|
18
|
+
|
|
19
|
+
def start(self):
|
|
20
|
+
host = CONFIG.get('output_influx2', 'host')
|
|
21
|
+
token = CONFIG.get('output_influx2', 'token', raw=True)
|
|
22
|
+
|
|
23
|
+
self.client = None
|
|
24
|
+
try:
|
|
25
|
+
self.client = InfluxDBClient(url=host, token=token)
|
|
26
|
+
except Exception as e:
|
|
27
|
+
msg("output_influx2: I/O error: '{}'".format(e))
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
if self.client is None:
|
|
31
|
+
msg('output_influx2: cannot instantiate client!')
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
self.org = CONFIG.get('output_influx2', 'org')
|
|
35
|
+
self.bucket = CONFIG.get('output_influx2', 'bucket', fallback='pgsqlpot')
|
|
36
|
+
self.write_api = self.client.write_api(write_options=SYNCHRONOUS)
|
|
37
|
+
|
|
38
|
+
def stop(self):
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
def write(self, event):
|
|
42
|
+
m = Point(event['eventid'].replace('.', '_').replace('-', '_'))\
|
|
43
|
+
.tag('src_ip', event['src_ip'])\
|
|
44
|
+
.time(datetime.fromtimestamp(event['unixtime'], tz=timezone('UTC')), WritePrecision.NS)
|
|
45
|
+
|
|
46
|
+
fields = [
|
|
47
|
+
'src_port',
|
|
48
|
+
'dst_ip',
|
|
49
|
+
'dst_port',
|
|
50
|
+
'eventid',
|
|
51
|
+
'operation',
|
|
52
|
+
'sensor'
|
|
53
|
+
]
|
|
54
|
+
operation = event['operation'].lower()
|
|
55
|
+
if operation == 'login':
|
|
56
|
+
fields.append('username')
|
|
57
|
+
fields.append('password')
|
|
58
|
+
elif operation == 'command':
|
|
59
|
+
fields.append('command')
|
|
60
|
+
fields.append('args')
|
|
61
|
+
|
|
62
|
+
for key in fields:
|
|
63
|
+
if key in event:
|
|
64
|
+
m.field(key, event[key])
|
|
65
|
+
|
|
66
|
+
self.write_api.write(bucket=self.bucket, org=self.org, record=m)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from json import dump
|
|
6
|
+
from os import linesep
|
|
7
|
+
from os.path import basename, dirname
|
|
8
|
+
|
|
9
|
+
from core import output
|
|
10
|
+
from core.tools import mkdir
|
|
11
|
+
from core.config import CONFIG
|
|
12
|
+
from core.logfile import HoneypotDailyLogFile
|
|
13
|
+
|
|
14
|
+
class Output(output.Output):
|
|
15
|
+
|
|
16
|
+
def start(self):
|
|
17
|
+
self.epoch_timestamp = CONFIG.getboolean('output_jsonlog', 'epoch_timestamp', fallback=False)
|
|
18
|
+
fn = CONFIG.get('output_jsonlog', 'logfile')
|
|
19
|
+
dirs = dirname(fn)
|
|
20
|
+
base = basename(fn)
|
|
21
|
+
mkdir(dirs)
|
|
22
|
+
self.outfile = HoneypotDailyLogFile(base, dirs, defaultMode=0o664)
|
|
23
|
+
|
|
24
|
+
def stop(self):
|
|
25
|
+
self.outfile.flush()
|
|
26
|
+
|
|
27
|
+
def write(self, event):
|
|
28
|
+
if not self.epoch_timestamp:
|
|
29
|
+
# We need 'unixtime' value in some other plugins
|
|
30
|
+
event_dump = deepcopy(event)
|
|
31
|
+
event_dump.pop('unixtime', None)
|
|
32
|
+
else:
|
|
33
|
+
event_dump = event
|
|
34
|
+
dump(event_dump, self.outfile, separators=(',', ':'))
|
|
35
|
+
self.outfile.write(linesep)
|
|
36
|
+
self.outfile.flush()
|
output_plugins/kafka.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from json import dumps
|
|
5
|
+
|
|
6
|
+
from core import output
|
|
7
|
+
from core.config import CONFIG
|
|
8
|
+
from core.tools import to_bytes
|
|
9
|
+
|
|
10
|
+
from confluent_kafka import Producer
|
|
11
|
+
|
|
12
|
+
from twisted.python.log import msg
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Output(output.Output):
|
|
16
|
+
|
|
17
|
+
def start(self):
|
|
18
|
+
self.topic = CONFIG.get('output_kafka', 'topic', fallback='pgsqlpot')
|
|
19
|
+
self.debug = CONFIG.getboolean('output_kafka', 'debug', fallback=False)
|
|
20
|
+
|
|
21
|
+
self.producer = Producer({
|
|
22
|
+
'bootstrap.servers': '{}:{}'.format(
|
|
23
|
+
CONFIG.get('output_kafka', 'host', fallback='localhost'),
|
|
24
|
+
CONFIG.get('output_kafka', 'port', fallback=9092)
|
|
25
|
+
),
|
|
26
|
+
#'sasl.mechanism': 'SCRAM-SHA-256',
|
|
27
|
+
#'security.protocol': 'SASL_SSL',
|
|
28
|
+
#'sasl.username': CONFIG.get('output_kafka', 'username'),
|
|
29
|
+
#'sasl.password': CONFIG.get('output_kafka', 'password')
|
|
30
|
+
})
|
|
31
|
+
|
|
32
|
+
def stop(self):
|
|
33
|
+
self.producer.flush()
|
|
34
|
+
|
|
35
|
+
def write(self, event):
|
|
36
|
+
self.postentry(event)
|
|
37
|
+
|
|
38
|
+
def delivery_callback(self, err, message):
|
|
39
|
+
if err:
|
|
40
|
+
msg('output_kafka: Message failed delivery: {}.'.format(err))
|
|
41
|
+
else:
|
|
42
|
+
if self.debug:
|
|
43
|
+
msg('output_kafka: Message delivered to {} [{}] @ {}'.format(
|
|
44
|
+
message.topic(), message.partition(), message.offset()
|
|
45
|
+
))
|
|
46
|
+
|
|
47
|
+
def postentry(self, event):
|
|
48
|
+
try:
|
|
49
|
+
self.producer.produce(
|
|
50
|
+
self.topic,
|
|
51
|
+
to_bytes(dumps(event, sort_keys=True)),
|
|
52
|
+
callback=self.delivery_callback
|
|
53
|
+
)
|
|
54
|
+
self.producer.poll(0)
|
|
55
|
+
self.producer.flush()
|
|
56
|
+
except Exception as e:
|
|
57
|
+
msg('Kafka error: {}'.format(e))
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
import syslog
|
|
5
|
+
|
|
6
|
+
from core import output
|
|
7
|
+
from core.config import CONFIG
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def formatCef(logentry):
|
|
11
|
+
"""
|
|
12
|
+
Take logentry and turn into CEF string
|
|
13
|
+
"""
|
|
14
|
+
# Jan 18 11:07:53 host CEF:Version|Device Vendor|Device Product|
|
|
15
|
+
# Device Version|Signature ID|Name|Severity|[Extension]
|
|
16
|
+
cefVendor = 'Bontchev'
|
|
17
|
+
cefProduct = 'PGSQLPot'
|
|
18
|
+
cefVersion = '1.0'
|
|
19
|
+
cefSignature = logentry['eventid']
|
|
20
|
+
cefName = logentry['eventid']
|
|
21
|
+
cefSeverity = '5'
|
|
22
|
+
|
|
23
|
+
cefExtensions = {
|
|
24
|
+
'deviceExternalId': logentry['sensor'],
|
|
25
|
+
'op': logentry['operation'],
|
|
26
|
+
'src': logentry['src_ip'],
|
|
27
|
+
'spt': logentry['src_port'],
|
|
28
|
+
'dpt': logentry['dst_port'],
|
|
29
|
+
'dst': logentry['dst_ip'],
|
|
30
|
+
'proto': 'tcp'
|
|
31
|
+
}
|
|
32
|
+
if logentry['operation'].lower() == 'login':
|
|
33
|
+
cefExtensions['usr'] = logentry['username']
|
|
34
|
+
cefExtensions['pwd'] = logentry['password']
|
|
35
|
+
|
|
36
|
+
cefList = []
|
|
37
|
+
for key in list(cefExtensions.keys()):
|
|
38
|
+
value = str(cefExtensions[key])
|
|
39
|
+
cefList.append('{}={}'.format(key, value))
|
|
40
|
+
|
|
41
|
+
cefExtension = ' '.join(cefList)
|
|
42
|
+
|
|
43
|
+
cefString = 'CEF:0|' + \
|
|
44
|
+
cefVendor + '|' + \
|
|
45
|
+
cefProduct + '|' + \
|
|
46
|
+
cefVersion + '|' + \
|
|
47
|
+
cefSignature + '|' + \
|
|
48
|
+
cefName + '|' + \
|
|
49
|
+
cefSeverity + '|' + \
|
|
50
|
+
cefExtension
|
|
51
|
+
|
|
52
|
+
return cefString
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class Output(output.Output):
|
|
56
|
+
|
|
57
|
+
def start(self):
|
|
58
|
+
facilityString = CONFIG.get('output_localsyslog', 'facility', fallback='USER')
|
|
59
|
+
facility = vars(syslog)['LOG_' + facilityString]
|
|
60
|
+
syslog.openlog(logoption=syslog.LOG_PID, facility=facility)
|
|
61
|
+
|
|
62
|
+
def stop(self):
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
def write(self, event):
|
|
66
|
+
syslog.syslog(syslog.LOG_INFO, formatCef(event))
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import absolute_import
|
|
3
|
+
|
|
4
|
+
from core import output
|
|
5
|
+
from core.config import CONFIG
|
|
6
|
+
from core.tools import geolocate
|
|
7
|
+
|
|
8
|
+
from warnings import filterwarnings
|
|
9
|
+
from cryptography.utils import CryptographyDeprecationWarning
|
|
10
|
+
filterwarnings('ignore', category=CryptographyDeprecationWarning)
|
|
11
|
+
|
|
12
|
+
from pymongo import MongoClient
|
|
13
|
+
from pymongo.server_api import ServerApi
|
|
14
|
+
from geoip2.database import Reader
|
|
15
|
+
|
|
16
|
+
from twisted.python.log import msg
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Output(output.Output):
|
|
20
|
+
|
|
21
|
+
def insert_one(self, collection, event):
|
|
22
|
+
try:
|
|
23
|
+
return collection.insert_one(event).inserted_id
|
|
24
|
+
except Exception as e:
|
|
25
|
+
msg('output_mongodb: Error: {}'.format(e))
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
def start(self):
|
|
29
|
+
host = CONFIG.get('output_mongodb', 'host', fallback='localhost')
|
|
30
|
+
username = CONFIG.get('output_mongodb', 'username', fallback='', raw=True)
|
|
31
|
+
password = CONFIG.get('output_mongodb', 'password', fallback='', raw=True)
|
|
32
|
+
db_name = CONFIG.get('output_mongodb', 'database', fallback='pgsqlpot')
|
|
33
|
+
db_addr = CONFIG.get('output_mongodb', 'connection_string')
|
|
34
|
+
db_addr = db_addr.format(username, password, host)
|
|
35
|
+
|
|
36
|
+
self.geoip = CONFIG.getboolean('output_mongodb', 'geoip', fallback=True)
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
self.mongo_client = MongoClient(db_addr, server_api=ServerApi('1'))
|
|
40
|
+
self.mongo_db = self.mongo_client[db_name]
|
|
41
|
+
self.col_events = self.mongo_db['connections']
|
|
42
|
+
if self.geoip:
|
|
43
|
+
self.col_geolocation = self.mongo_db['geolocation']
|
|
44
|
+
except Exception as e:
|
|
45
|
+
msg('output_mongodb: Error: {}'.format(e))
|
|
46
|
+
|
|
47
|
+
if self.geoip:
|
|
48
|
+
geoipdb_city_path = CONFIG.get('output_mongodb', 'geoip_citydb', fallback='data/GeoLite2-City.mmdb')
|
|
49
|
+
geoipdb_asn_path = CONFIG.get('output_mongodb', 'geoip_asndb', fallback='data/GeoLite2-ASN.mmdb')
|
|
50
|
+
try:
|
|
51
|
+
self.reader_city = Reader(geoipdb_city_path)
|
|
52
|
+
except Exception:
|
|
53
|
+
self.reader_city = None
|
|
54
|
+
msg('Failed to open City GeoIP database {}'.format(geoipdb_city_path))
|
|
55
|
+
try:
|
|
56
|
+
self.reader_asn = Reader(geoipdb_asn_path)
|
|
57
|
+
except Exception:
|
|
58
|
+
self.reader_asn = None
|
|
59
|
+
msg('Failed to open ASN GeoIP database {}'.format(geoipdb_asn_path))
|
|
60
|
+
|
|
61
|
+
def stop(self):
|
|
62
|
+
self.mongo_client.close()
|
|
63
|
+
if self.geoip:
|
|
64
|
+
if self.reader_city is not None:
|
|
65
|
+
self.reader_city.close()
|
|
66
|
+
if self.reader_asn is not None:
|
|
67
|
+
self.reader_asn.close()
|
|
68
|
+
|
|
69
|
+
def write(self, event):
|
|
70
|
+
self.insert_one(self.col_events, event)
|
|
71
|
+
if self.geoip:
|
|
72
|
+
remote_ip = event['src_ip']
|
|
73
|
+
if not self.col_geolocation.find_one({'ip': remote_ip}):
|
|
74
|
+
country, country_code, city, org, asn_num = geolocate(remote_ip, self.reader_city, self.reader_asn)
|
|
75
|
+
geo_entry = {
|
|
76
|
+
'ip': remote_ip,
|
|
77
|
+
'country': country,
|
|
78
|
+
'country_code': country_code,
|
|
79
|
+
'city': city,
|
|
80
|
+
'org': org,
|
|
81
|
+
'asn': asn_num
|
|
82
|
+
}
|
|
83
|
+
self.insert_one(self.col_geolocation, geo_entry)
|